mirror of
https://github.com/home-assistant/core.git
synced 2025-04-23 08:47:57 +00:00
Generate Stream snapshots using next keyframe (#96991)
* Add wait_for_next_keyframe option to stream images Add STREAM_SNAPSHOT to CameraEntityFeature Use wait_for_next_keyframe option for snapshots using stream * Update stream test comments * Add generic camera snapshot test * Get stream still images directly in camera Remove getting stream images from generic, nest, and ONVIF Refactor camera preferences Add use_stream_for_stills setting to camera Update tests * Only attempt to get stream image if integration supports stream * Use property instead of entity registry setting * Split out getting stream prerequisites from stream_source in nest * Use cached_property for rtsp live stream trait * Make rtsp live stream trait NestCamera attribute * Update homeassistant/components/nest/camera.py Co-authored-by: Allen Porter <allen.porter@gmail.com> * Change usage of async_timeout * Change import formatting in generic/test_camera * Simplify Nest camera property initialization --------- Co-authored-by: Allen Porter <allen.porter@gmail.com>
This commit is contained in:
parent
e003903bc5
commit
407aa31adc
@ -168,9 +168,14 @@ async def _async_get_image(
|
||||
"""
|
||||
with suppress(asyncio.CancelledError, asyncio.TimeoutError):
|
||||
async with asyncio.timeout(timeout):
|
||||
if image_bytes := await camera.async_camera_image(
|
||||
width=width, height=height
|
||||
):
|
||||
image_bytes = (
|
||||
await _async_get_stream_image(
|
||||
camera, width=width, height=height, wait_for_next_keyframe=False
|
||||
)
|
||||
if camera.use_stream_for_stills
|
||||
else await camera.async_camera_image(width=width, height=height)
|
||||
)
|
||||
if image_bytes:
|
||||
content_type = camera.content_type
|
||||
image = Image(content_type, image_bytes)
|
||||
if (
|
||||
@ -205,6 +210,21 @@ async def async_get_image(
|
||||
return await _async_get_image(camera, timeout, width, height)
|
||||
|
||||
|
||||
async def _async_get_stream_image(
|
||||
camera: Camera,
|
||||
width: int | None = None,
|
||||
height: int | None = None,
|
||||
wait_for_next_keyframe: bool = False,
|
||||
) -> bytes | None:
|
||||
if not camera.stream and camera.supported_features & SUPPORT_STREAM:
|
||||
camera.stream = await camera.async_create_stream()
|
||||
if camera.stream:
|
||||
return await camera.stream.async_get_image(
|
||||
width=width, height=height, wait_for_next_keyframe=wait_for_next_keyframe
|
||||
)
|
||||
return None
|
||||
|
||||
|
||||
@bind_hass
|
||||
async def async_get_stream_source(hass: HomeAssistant, entity_id: str) -> str | None:
|
||||
"""Fetch the stream source for a camera entity."""
|
||||
@ -360,6 +380,7 @@ async def async_setup(hass: HomeAssistant, config: ConfigType) -> bool:
|
||||
await component.async_setup(config)
|
||||
|
||||
async def preload_stream(_event: Event) -> None:
|
||||
"""Load stream prefs and start stream if preload_stream is True."""
|
||||
for camera in list(component.entities):
|
||||
stream_prefs = await prefs.get_dynamic_stream_settings(camera.entity_id)
|
||||
if not stream_prefs.preload_stream:
|
||||
@ -459,6 +480,11 @@ class Camera(Entity):
|
||||
return self._attr_entity_picture
|
||||
return ENTITY_IMAGE_URL.format(self.entity_id, self.access_tokens[-1])
|
||||
|
||||
@property
|
||||
def use_stream_for_stills(self) -> bool:
|
||||
"""Whether or not to use stream to generate stills."""
|
||||
return False
|
||||
|
||||
@property
|
||||
def supported_features(self) -> CameraEntityFeature:
|
||||
"""Flag supported features."""
|
||||
@ -926,7 +952,12 @@ async def async_handle_snapshot_service(
|
||||
f"Cannot write `{snapshot_file}`, no access to path; `allowlist_external_dirs` may need to be adjusted in `configuration.yaml`"
|
||||
)
|
||||
|
||||
image = await camera.async_camera_image()
|
||||
async with asyncio.timeout(CAMERA_IMAGE_TIMEOUT):
|
||||
image = (
|
||||
await _async_get_stream_image(camera, wait_for_next_keyframe=True)
|
||||
if camera.use_stream_for_stills
|
||||
else await camera.async_camera_image()
|
||||
)
|
||||
|
||||
if image is None:
|
||||
return
|
||||
|
@ -172,15 +172,16 @@ class GenericCamera(Camera):
|
||||
self._last_url = None
|
||||
self._last_image = None
|
||||
|
||||
@property
|
||||
def use_stream_for_stills(self) -> bool:
|
||||
"""Whether or not to use stream to generate stills."""
|
||||
return not self._still_image_url
|
||||
|
||||
async def async_camera_image(
|
||||
self, width: int | None = None, height: int | None = None
|
||||
) -> bytes | None:
|
||||
"""Return a still image response from the camera."""
|
||||
if not self._still_image_url:
|
||||
if not self.stream:
|
||||
await self.async_create_stream()
|
||||
if self.stream:
|
||||
return await self.stream.async_get_image(width, height)
|
||||
return None
|
||||
try:
|
||||
url = self._still_image_url.async_render(parse_result=False)
|
||||
|
@ -7,6 +7,7 @@ import datetime
|
||||
import functools
|
||||
import logging
|
||||
from pathlib import Path
|
||||
from typing import cast
|
||||
|
||||
from google_nest_sdm.camera_traits import (
|
||||
CameraImageTrait,
|
||||
@ -71,9 +72,24 @@ class NestCamera(Camera):
|
||||
self._stream: RtspStream | None = None
|
||||
self._create_stream_url_lock = asyncio.Lock()
|
||||
self._stream_refresh_unsub: Callable[[], None] | None = None
|
||||
self._attr_is_streaming = CameraLiveStreamTrait.NAME in self._device.traits
|
||||
self._attr_is_streaming = False
|
||||
self._attr_supported_features = CameraEntityFeature(0)
|
||||
self._rtsp_live_stream_trait: CameraLiveStreamTrait | None = None
|
||||
if CameraLiveStreamTrait.NAME in self._device.traits:
|
||||
self._attr_is_streaming = True
|
||||
self._attr_supported_features |= CameraEntityFeature.STREAM
|
||||
trait = cast(
|
||||
CameraLiveStreamTrait, self._device.traits[CameraLiveStreamTrait.NAME]
|
||||
)
|
||||
if StreamingProtocol.RTSP in trait.supported_protocols:
|
||||
self._rtsp_live_stream_trait = trait
|
||||
self.stream_options[CONF_EXTRA_PART_WAIT_TIME] = 3
|
||||
|
||||
@property
|
||||
def use_stream_for_stills(self) -> bool:
|
||||
"""Whether or not to use stream to generate stills."""
|
||||
return self._rtsp_live_stream_trait is not None
|
||||
|
||||
@property
|
||||
def unique_id(self) -> str:
|
||||
"""Return a unique ID."""
|
||||
@ -95,14 +111,6 @@ class NestCamera(Camera):
|
||||
"""Return the camera model."""
|
||||
return self._device_info.device_model
|
||||
|
||||
@property
|
||||
def supported_features(self) -> CameraEntityFeature:
|
||||
"""Flag supported features."""
|
||||
supported_features = CameraEntityFeature(0)
|
||||
if CameraLiveStreamTrait.NAME in self._device.traits:
|
||||
supported_features |= CameraEntityFeature.STREAM
|
||||
return supported_features
|
||||
|
||||
@property
|
||||
def frontend_stream_type(self) -> StreamType | None:
|
||||
"""Return the type of stream supported by this camera."""
|
||||
@ -125,18 +133,15 @@ class NestCamera(Camera):
|
||||
|
||||
async def stream_source(self) -> str | None:
|
||||
"""Return the source of the stream."""
|
||||
if not self.supported_features & CameraEntityFeature.STREAM:
|
||||
return None
|
||||
if CameraLiveStreamTrait.NAME not in self._device.traits:
|
||||
return None
|
||||
trait = self._device.traits[CameraLiveStreamTrait.NAME]
|
||||
if StreamingProtocol.RTSP not in trait.supported_protocols:
|
||||
if not self._rtsp_live_stream_trait:
|
||||
return None
|
||||
async with self._create_stream_url_lock:
|
||||
if not self._stream:
|
||||
_LOGGER.debug("Fetching stream url")
|
||||
try:
|
||||
self._stream = await trait.generate_rtsp_stream()
|
||||
self._stream = (
|
||||
await self._rtsp_live_stream_trait.generate_rtsp_stream()
|
||||
)
|
||||
except ApiException as err:
|
||||
raise HomeAssistantError(f"Nest API error: {err}") from err
|
||||
self._schedule_stream_refresh()
|
||||
@ -204,10 +209,7 @@ class NestCamera(Camera):
|
||||
) -> bytes | None:
|
||||
"""Return bytes of camera image."""
|
||||
# Use the thumbnail from RTSP stream, or a placeholder if stream is
|
||||
# not supported (e.g. WebRTC)
|
||||
stream = await self.async_create_stream()
|
||||
if stream:
|
||||
return await stream.async_get_image(width, height)
|
||||
# not supported (e.g. WebRTC) as a fallback when 'use_stream_for_stills' if False
|
||||
return await self.hass.async_add_executor_job(self.placeholder_image)
|
||||
|
||||
@classmethod
|
||||
|
@ -114,6 +114,11 @@ class ONVIFCameraEntity(ONVIFBaseEntity, Camera):
|
||||
self._stream_uri: str | None = None
|
||||
self._stream_uri_future: asyncio.Future[str] | None = None
|
||||
|
||||
@property
|
||||
def use_stream_for_stills(self) -> bool:
|
||||
"""Whether or not to use stream to generate stills."""
|
||||
return bool(self.stream and self.stream.dynamic_stream_settings.preload_stream)
|
||||
|
||||
@property
|
||||
def name(self) -> str:
|
||||
"""Return the name of this camera."""
|
||||
@ -140,9 +145,6 @@ class ONVIFCameraEntity(ONVIFBaseEntity, Camera):
|
||||
) -> bytes | None:
|
||||
"""Return a still image response from the camera."""
|
||||
|
||||
if self.stream and self.stream.dynamic_stream_settings.preload_stream:
|
||||
return await self.stream.async_get_image(width, height)
|
||||
|
||||
if self.device.capabilities.snapshot:
|
||||
try:
|
||||
if image := await self.device.device.get_snapshot(
|
||||
|
@ -537,6 +537,7 @@ class Stream:
|
||||
self,
|
||||
width: int | None = None,
|
||||
height: int | None = None,
|
||||
wait_for_next_keyframe: bool = False,
|
||||
) -> bytes | None:
|
||||
"""Fetch an image from the Stream and return it as a jpeg in bytes.
|
||||
|
||||
@ -548,7 +549,9 @@ class Stream:
|
||||
self.add_provider(HLS_PROVIDER)
|
||||
await self.start()
|
||||
return await self._keyframe_converter.async_get_image(
|
||||
width=width, height=height
|
||||
width=width,
|
||||
height=height,
|
||||
wait_for_next_keyframe=wait_for_next_keyframe,
|
||||
)
|
||||
|
||||
def get_diagnostics(self) -> dict[str, Any]:
|
||||
|
@ -441,7 +441,8 @@ class KeyFrameConverter:
|
||||
# pylint: disable-next=import-outside-toplevel
|
||||
from homeassistant.components.camera.img_util import TurboJPEGSingleton
|
||||
|
||||
self.packet: Packet = None
|
||||
self._packet: Packet = None
|
||||
self._event: asyncio.Event = asyncio.Event()
|
||||
self._hass = hass
|
||||
self._image: bytes | None = None
|
||||
self._turbojpeg = TurboJPEGSingleton.instance()
|
||||
@ -450,6 +451,14 @@ class KeyFrameConverter:
|
||||
self._stream_settings = stream_settings
|
||||
self._dynamic_stream_settings = dynamic_stream_settings
|
||||
|
||||
def stash_keyframe_packet(self, packet: Packet) -> None:
|
||||
"""Store the keyframe and set the asyncio.Event from the event loop.
|
||||
|
||||
This is called from the worker thread.
|
||||
"""
|
||||
self._packet = packet
|
||||
self._hass.loop.call_soon_threadsafe(self._event.set)
|
||||
|
||||
def create_codec_context(self, codec_context: CodecContext) -> None:
|
||||
"""Create a codec context to be used for decoding the keyframes.
|
||||
|
||||
@ -482,10 +491,10 @@ class KeyFrameConverter:
|
||||
at a time per instance.
|
||||
"""
|
||||
|
||||
if not (self._turbojpeg and self.packet and self._codec_context):
|
||||
if not (self._turbojpeg and self._packet and self._codec_context):
|
||||
return
|
||||
packet = self.packet
|
||||
self.packet = None
|
||||
packet = self._packet
|
||||
self._packet = None
|
||||
for _ in range(2): # Retry once if codec context needs to be flushed
|
||||
try:
|
||||
# decode packet (flush afterwards)
|
||||
@ -519,10 +528,14 @@ class KeyFrameConverter:
|
||||
self,
|
||||
width: int | None = None,
|
||||
height: int | None = None,
|
||||
wait_for_next_keyframe: bool = False,
|
||||
) -> bytes | None:
|
||||
"""Fetch an image from the Stream and return it as a jpeg in bytes."""
|
||||
|
||||
# Use a lock to ensure only one thread is working on the keyframe at a time
|
||||
if wait_for_next_keyframe:
|
||||
self._event.clear()
|
||||
await self._event.wait()
|
||||
async with self._lock:
|
||||
await self._hass.async_add_executor_job(self._generate_image, width, height)
|
||||
return self._image
|
||||
|
@ -624,4 +624,4 @@ def stream_worker(
|
||||
muxer.mux_packet(packet)
|
||||
|
||||
if packet.is_keyframe and is_video(packet):
|
||||
keyframe_converter.packet = packet
|
||||
keyframe_converter.stash_keyframe_packet(packet)
|
||||
|
@ -909,3 +909,61 @@ async def test_rtsp_to_web_rtc_offer_not_accepted(
|
||||
assert mock_provider.called
|
||||
|
||||
unsub()
|
||||
|
||||
|
||||
async def test_use_stream_for_stills(
|
||||
hass: HomeAssistant,
|
||||
hass_client: ClientSessionGenerator,
|
||||
mock_camera,
|
||||
) -> None:
|
||||
"""Test that the component can grab images from stream."""
|
||||
|
||||
client = await hass_client()
|
||||
|
||||
with patch(
|
||||
"homeassistant.components.demo.camera.DemoCamera.stream_source",
|
||||
return_value=None,
|
||||
) as mock_stream_source, patch(
|
||||
"homeassistant.components.demo.camera.DemoCamera.use_stream_for_stills",
|
||||
return_value=True,
|
||||
):
|
||||
# First test when the integration does not support stream should fail
|
||||
resp = await client.get("/api/camera_proxy/camera.demo_camera")
|
||||
await hass.async_block_till_done()
|
||||
mock_stream_source.assert_not_called()
|
||||
assert resp.status == HTTPStatus.INTERNAL_SERVER_ERROR
|
||||
# Test when the integration does not provide a stream_source should fail
|
||||
with patch(
|
||||
"homeassistant.components.demo.camera.DemoCamera.supported_features",
|
||||
return_value=camera.SUPPORT_STREAM,
|
||||
):
|
||||
resp = await client.get("/api/camera_proxy/camera.demo_camera")
|
||||
await hass.async_block_till_done()
|
||||
mock_stream_source.assert_called_once()
|
||||
assert resp.status == HTTPStatus.INTERNAL_SERVER_ERROR
|
||||
|
||||
with patch(
|
||||
"homeassistant.components.demo.camera.DemoCamera.stream_source",
|
||||
return_value="rtsp://some_source",
|
||||
) as mock_stream_source, patch(
|
||||
"homeassistant.components.camera.create_stream"
|
||||
) as mock_create_stream, patch(
|
||||
"homeassistant.components.demo.camera.DemoCamera.supported_features",
|
||||
return_value=camera.SUPPORT_STREAM,
|
||||
), patch(
|
||||
"homeassistant.components.demo.camera.DemoCamera.use_stream_for_stills",
|
||||
return_value=True,
|
||||
):
|
||||
# Now test when creating the stream succeeds
|
||||
mock_stream = Mock()
|
||||
mock_stream.async_get_image = AsyncMock()
|
||||
mock_stream.async_get_image.return_value = b"stream_keyframe_image"
|
||||
mock_create_stream.return_value = mock_stream
|
||||
|
||||
# should start the stream and get the image
|
||||
resp = await client.get("/api/camera_proxy/camera.demo_camera")
|
||||
await hass.async_block_till_done()
|
||||
mock_create_stream.assert_called_once()
|
||||
mock_stream.async_get_image.assert_called_once()
|
||||
assert resp.status == HTTPStatus.OK
|
||||
assert await resp.read() == b"stream_keyframe_image"
|
||||
|
@ -27,7 +27,7 @@ from homeassistant.const import CONF_PASSWORD, CONF_USERNAME, CONF_VERIFY_SSL
|
||||
from homeassistant.core import HomeAssistant
|
||||
from homeassistant.setup import async_setup_component
|
||||
|
||||
from tests.common import AsyncMock, Mock, MockConfigEntry
|
||||
from tests.common import Mock, MockConfigEntry
|
||||
from tests.typing import ClientSessionGenerator, WebSocketGenerator
|
||||
|
||||
|
||||
@ -503,51 +503,6 @@ async def test_timeout_cancelled(
|
||||
assert await resp.read() == fakeimgbytes_png
|
||||
|
||||
|
||||
async def test_no_still_image_url(
|
||||
hass: HomeAssistant, hass_client: ClientSessionGenerator
|
||||
) -> None:
|
||||
"""Test that the component can grab images from stream with no still_image_url."""
|
||||
assert await async_setup_component(
|
||||
hass,
|
||||
"camera",
|
||||
{
|
||||
"camera": {
|
||||
"name": "config_test",
|
||||
"platform": "generic",
|
||||
"stream_source": "rtsp://example.com:554/rtsp/",
|
||||
},
|
||||
},
|
||||
)
|
||||
await hass.async_block_till_done()
|
||||
|
||||
client = await hass_client()
|
||||
|
||||
with patch(
|
||||
"homeassistant.components.generic.camera.GenericCamera.stream_source",
|
||||
return_value=None,
|
||||
) as mock_stream_source:
|
||||
# First test when there is no stream_source should fail
|
||||
resp = await client.get("/api/camera_proxy/camera.config_test")
|
||||
await hass.async_block_till_done()
|
||||
mock_stream_source.assert_called_once()
|
||||
assert resp.status == HTTPStatus.INTERNAL_SERVER_ERROR
|
||||
|
||||
with patch("homeassistant.components.camera.create_stream") as mock_create_stream:
|
||||
# Now test when creating the stream succeeds
|
||||
mock_stream = Mock()
|
||||
mock_stream.async_get_image = AsyncMock()
|
||||
mock_stream.async_get_image.return_value = b"stream_keyframe_image"
|
||||
mock_create_stream.return_value = mock_stream
|
||||
|
||||
# should start the stream and get the image
|
||||
resp = await client.get("/api/camera_proxy/camera.config_test")
|
||||
await hass.async_block_till_done()
|
||||
mock_create_stream.assert_called_once()
|
||||
mock_stream.async_get_image.assert_called_once()
|
||||
assert resp.status == HTTPStatus.OK
|
||||
assert await resp.read() == b"stream_keyframe_image"
|
||||
|
||||
|
||||
async def test_frame_interval_property(hass: HomeAssistant) -> None:
|
||||
"""Test that the frame interval is calculated and returned correctly."""
|
||||
|
||||
|
@ -423,7 +423,7 @@ async def test_form_only_stream(
|
||||
await hass.async_block_till_done()
|
||||
|
||||
with patch(
|
||||
"homeassistant.components.generic.camera.GenericCamera.async_camera_image",
|
||||
"homeassistant.components.camera._async_get_stream_image",
|
||||
return_value=fakeimgbytes_jpg,
|
||||
):
|
||||
image_obj = await async_get_image(hass, "camera.127_0_0_1")
|
||||
|
@ -244,8 +244,6 @@ async def test_camera_stream(
|
||||
stream_source = await camera.async_get_stream_source(hass, "camera.my_camera")
|
||||
assert stream_source == "rtsp://some/url?auth=g.0.streamingToken"
|
||||
|
||||
assert await async_get_image(hass) == IMAGE_BYTES_FROM_STREAM
|
||||
|
||||
|
||||
async def test_camera_ws_stream(
|
||||
hass: HomeAssistant,
|
||||
@ -280,8 +278,6 @@ async def test_camera_ws_stream(
|
||||
assert msg["success"]
|
||||
assert msg["result"]["url"] == "http://home.assistant/playlist.m3u8"
|
||||
|
||||
assert await async_get_image(hass) == IMAGE_BYTES_FROM_STREAM
|
||||
|
||||
|
||||
async def test_camera_ws_stream_failure(
|
||||
hass: HomeAssistant,
|
||||
@ -746,8 +742,6 @@ async def test_camera_multiple_streams(
|
||||
stream_source = await camera.async_get_stream_source(hass, "camera.my_camera")
|
||||
assert stream_source == "rtsp://some/url?auth=g.0.streamingToken"
|
||||
|
||||
assert await async_get_image(hass) == IMAGE_BYTES_FROM_STREAM
|
||||
|
||||
# WebRTC stream
|
||||
client = await hass_ws_client(hass)
|
||||
await client.send_json(
|
||||
|
@ -643,7 +643,7 @@ async def test_pts_out_of_order(hass: HomeAssistant) -> None:
|
||||
|
||||
|
||||
async def test_stream_stopped_while_decoding(hass: HomeAssistant) -> None:
|
||||
"""Tests that worker quits when stop() is called while decodign."""
|
||||
"""Tests that worker quits when stop() is called while decoding."""
|
||||
# Add some synchronization so that the test can pause the background
|
||||
# worker. When the worker is stopped, the test invokes stop() which
|
||||
# will cause the worker thread to exit once it enters the decode
|
||||
@ -966,7 +966,7 @@ async def test_h265_video_is_hvc1(hass: HomeAssistant, worker_finished_stream) -
|
||||
|
||||
|
||||
async def test_get_image(hass: HomeAssistant, h264_video, filename) -> None:
|
||||
"""Test that the has_keyframe metadata matches the media."""
|
||||
"""Test getting an image from the stream."""
|
||||
await async_setup_component(hass, "stream", {"stream": {}})
|
||||
|
||||
# Since libjpeg-turbo is not installed on the CI runner, we use a mock
|
||||
@ -976,10 +976,30 @@ async def test_get_image(hass: HomeAssistant, h264_video, filename) -> None:
|
||||
mock_turbo_jpeg_singleton.instance.return_value = mock_turbo_jpeg()
|
||||
stream = create_stream(hass, h264_video, {}, dynamic_stream_settings())
|
||||
|
||||
with patch.object(hass.config, "is_allowed_path", return_value=True):
|
||||
worker_wake = threading.Event()
|
||||
|
||||
temp_av_open = av.open
|
||||
|
||||
def blocking_open(stream_source, *args, **kwargs):
|
||||
# Block worker thread until test wakes up
|
||||
worker_wake.wait()
|
||||
return temp_av_open(stream_source, *args, **kwargs)
|
||||
|
||||
with patch.object(hass.config, "is_allowed_path", return_value=True), patch(
|
||||
"av.open", new=blocking_open
|
||||
):
|
||||
make_recording = hass.async_create_task(stream.async_record(filename))
|
||||
assert stream._keyframe_converter._image is None
|
||||
# async_get_image should not work because there is no keyframe yet
|
||||
assert not await stream.async_get_image()
|
||||
# async_get_image should work if called with wait_for_next_keyframe=True
|
||||
next_keyframe_request = hass.async_create_task(
|
||||
stream.async_get_image(wait_for_next_keyframe=True)
|
||||
)
|
||||
worker_wake.set()
|
||||
await make_recording
|
||||
assert stream._keyframe_converter._image is None
|
||||
|
||||
assert await next_keyframe_request == EMPTY_8_6_JPEG
|
||||
|
||||
assert await stream.async_get_image() == EMPTY_8_6_JPEG
|
||||
|
||||
@ -1008,7 +1028,7 @@ async def test_worker_disable_ll_hls(hass: HomeAssistant) -> None:
|
||||
|
||||
|
||||
async def test_get_image_rotated(hass: HomeAssistant, h264_video, filename) -> None:
|
||||
"""Test that the has_keyframe metadata matches the media."""
|
||||
"""Test getting a rotated image."""
|
||||
await async_setup_component(hass, "stream", {"stream": {}})
|
||||
|
||||
# Since libjpeg-turbo is not installed on the CI runner, we use a mock
|
||||
|
Loading…
x
Reference in New Issue
Block a user