Compare commits

..

13 Commits

Author SHA1 Message Date
copilot-swe-agent[bot]
ef63083c08 Support Docker containerd snapshotter for image extraction progress
Co-authored-by: agners <34061+agners@users.noreply.github.com>
2025-11-11 09:42:10 +00:00
copilot-swe-agent[bot]
8ac60b7c34 Initial plan 2025-11-11 09:30:49 +00:00
Stefan Agner
91a9cb98c3 Avoid adding Content-Type to non-body responses (#6266)
* Avoid adding Content-Type to non-body responses

The current code sets the content-type header for all responses
to the result's content_type property if upstream does not set a
content_type. The default value for content_type is
"application/octet-stream".

For responses that do not have a body (like 204 No Content or
304 Not Modified), setting a content-type header is unnecessary and
potentially misleading. Follow HTTP standards by only adding the
content-type header to responses that actually contain a body.

* Add pytest for ingress proxy

* Preserve Content-Type header for HEAD requests in ingress API
2025-11-10 17:39:10 +01:00
Stefan Agner
8f2b0763b7 Add zstd compression support (#6302)
Add zstd compression support to allow zstd compressed proxing for
ingress. Zstd is automatically supported by aiohttp if the package
is present.
2025-11-10 17:04:06 +01:00
Stefan Agner
5018d5d04e Bump pytest-asyncio to 1.2.0 (#6301) 2025-11-10 12:00:25 +01:00
Stefan Agner
1ba1ad9fc7 Remove Docker version from unhealthy reasons (#6292)
Any unhealthy reason blocks Home Assistant OS updates. If the Docker
version on a system running Home Assistant OS is outdated, the user
needs to be able to update Home Assistant OS to get a supported Docker
version. Therefore, we should not mark the system as unhealthy due to
an outdated Docker version.
2025-11-10 10:23:12 +01:00
dependabot[bot]
f0ef40eb3e Bump astroid from 4.0.1 to 4.0.2 (#6297)
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2025-11-10 09:55:16 +01:00
dependabot[bot]
6eed5b02b4 Bump coverage from 7.11.0 to 7.11.3 (#6298) 2025-11-09 23:24:55 -08:00
dependabot[bot]
e59dcf7089 Bump dbus-fast from 2.44.5 to 2.45.1 (#6299) 2025-11-09 23:15:39 -08:00
dependabot[bot]
48da3d8a8d Bump pre-commit from 4.3.0 to 4.4.0 (#6300) 2025-11-09 23:07:49 -08:00
dependabot[bot]
7b82ebe3aa Bump ruff from 0.14.3 to 0.14.4 (#6291)
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2025-11-07 09:06:06 +01:00
Stefan Agner
d96ea9aef9 Fix docker image pull progress blocked by small layers (#6287)
* Fix docker image pull progress blocked by small layers

Small Docker layers (typically <100 bytes) can skip the downloading phase
entirely, going directly from "Pulling fs layer" to "Download complete"
without emitting any progress events with byte counts. This caused the
aggregate progress calculation to block indefinitely, as it required all
layer jobs to have their `extra` field populated with byte counts before
proceeding.

The issue manifested as parent job progress jumping from 0% to 97.9% after
long delays, as seen when a 96-byte layer held up progress reporting for
~50 seconds until it finally reached the "Extracting" phase.

Set a minimal `extra` field (current=1, total=1) when layers reach
"Download complete" without having gone through the downloading phase.
This allows the aggregate progress calculation to proceed immediately
while still correctly representing the layer as 100% downloaded.

🤖 Generated with [Claude Code](https://claude.com/claude-code)

Co-Authored-By: Claude <noreply@anthropic.com>

* Update test to capture issue correctly

* Improve pytest

* Fix pytest comment

* Fix pylint warning

---------

Co-authored-by: Claude <noreply@anthropic.com>
2025-11-06 09:04:55 +01:00
dependabot[bot]
4e5ec2d6be Bump brotli from 1.1.0 to 1.2.0 (#6288)
Bumps [brotli](https://github.com/google/brotli) from 1.1.0 to 1.2.0.
- [Release notes](https://github.com/google/brotli/releases)
- [Changelog](https://github.com/google/brotli/blob/master/CHANGELOG.md)
- [Commits](https://github.com/google/brotli/compare/go/cbrotli/v1.1.0...v1.2.0)

---
updated-dependencies:
- dependency-name: brotli
  dependency-version: 1.2.0
  dependency-type: direct:production
  update-type: version-update:semver-minor
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2025-11-06 09:04:00 +01:00
8 changed files with 389 additions and 131 deletions

View File

@@ -3,8 +3,9 @@ aiohttp==3.13.2
atomicwrites-homeassistant==1.4.1
attrs==25.4.0
awesomeversion==25.8.0
backports.zstd==1.0.0
blockbuster==1.5.25
brotli==1.1.0
brotli==1.2.0
ciso8601==2.3.3
colorlog==6.10.1
cpe==1.3.1
@@ -26,5 +27,5 @@ securetar==2025.2.1
sentry-sdk==2.43.0
setuptools==80.9.0
voluptuous==0.15.2
dbus-fast==2.44.5
dbus-fast==2.45.1
zlib-fast==0.2.1

View File

@@ -1,14 +1,14 @@
astroid==4.0.1
coverage==7.11.0
astroid==4.0.2
coverage==7.11.3
mypy==1.18.2
pre-commit==4.3.0
pre-commit==4.4.0
pylint==4.0.2
pytest-aiohttp==1.1.0
pytest-asyncio==0.25.2
pytest-asyncio==1.2.0
pytest-cov==7.0.0
pytest-timeout==2.4.0
pytest==8.4.2
ruff==0.14.3
ruff==0.14.4
time-machine==2.19.0
types-docker==7.1.0.20251009
types-pyyaml==6.0.12.20250915

View File

@@ -253,18 +253,28 @@ class APIIngress(CoreSysAttributes):
skip_auto_headers={hdrs.CONTENT_TYPE},
) as result:
headers = _response_header(result)
# Avoid parsing content_type in simple cases for better performance
if maybe_content_type := result.headers.get(hdrs.CONTENT_TYPE):
content_type = (maybe_content_type.partition(";"))[0].strip()
else:
content_type = result.content_type
# Empty body responses (304, 204, HEAD, etc.) should not be streamed,
# otherwise aiohttp < 3.9.0 may generate an invalid "0\r\n\r\n" chunk
# This also avoids setting content_type for empty responses.
if must_be_empty_body(request.method, result.status):
# If upstream contains content-type, preserve it (e.g. for HEAD requests)
if maybe_content_type:
headers[hdrs.CONTENT_TYPE] = content_type
return web.Response(
headers=headers,
status=result.status,
)
# Simple request
if (
# empty body responses should not be streamed,
# otherwise aiohttp < 3.9.0 may generate
# an invalid "0\r\n\r\n" chunk instead of an empty response.
must_be_empty_body(request.method, result.status)
or hdrs.CONTENT_LENGTH in result.headers
hdrs.CONTENT_LENGTH in result.headers
and int(result.headers.get(hdrs.CONTENT_LENGTH, 0)) < 4_194_000
):
# Return Response

View File

@@ -309,18 +309,30 @@ class DockerInterface(JobGroup, ABC):
stage in {PullImageLayerStage.DOWNLOADING, PullImageLayerStage.EXTRACTING}
and reference.progress_detail
):
# For containerd snapshotter, extracting phase has total=None
# In that case, use the download_total from the downloading phase
current_extra: dict[str, Any] = job.extra if job.extra else {}
if (
stage == PullImageLayerStage.DOWNLOADING
and reference.progress_detail.total
):
# Store download total for use in extraction phase with containerd snapshotter
current_extra["download_total"] = reference.progress_detail.total
job.update(
progress=progress,
stage=stage.status,
extra={
"current": reference.progress_detail.current,
"total": reference.progress_detail.total,
"total": reference.progress_detail.total
or current_extra.get("download_total"),
"download_total": current_extra.get("download_total"),
},
)
else:
# If we reach DOWNLOAD_COMPLETE without ever having set extra (small layers that skip
# the downloading phase), set a minimal extra so aggregate progress calculation can proceed
extra = job.extra
extra: dict[str, Any] | None = job.extra
if stage == PullImageLayerStage.DOWNLOAD_COMPLETE and not job.extra:
extra = {"current": 1, "total": 1}
@@ -341,40 +353,31 @@ class DockerInterface(JobGroup, ABC):
]
# First set the total bytes to be downloaded/extracted on the main job
# Note: With containerd snapshotter, total may be None (time-based progress instead of byte-based)
if not install_job.extra:
total = 0
has_byte_progress = True
for job in layer_jobs:
if not job.extra:
return
# If any layer has None for total, we can't do byte-weighted aggregation
if job.extra["total"] is None:
has_byte_progress = False
break
total += job.extra["total"]
# Store whether we have byte-based progress for later use
install_job.extra = {"total": total if has_byte_progress else None}
# Use download_total if available (for containerd snapshotter), otherwise use total
layer_total = job.extra.get("download_total") or job.extra.get("total")
if layer_total is None:
return
total += layer_total
install_job.extra = {"total": total}
else:
total = install_job.extra["total"]
has_byte_progress = total is not None
# Then determine total progress based on progress of each sub-job
# If we have byte counts, weight by size. Otherwise, simple average.
# Then determine total progress based on progress of each sub-job, factoring in size of each compared to total
progress = 0.0
stage = PullImageLayerStage.PULL_COMPLETE
for job in layer_jobs:
if not job.extra:
return
if has_byte_progress:
# Byte-weighted progress (classic Docker behavior)
progress += job.progress * (job.extra["total"] / total)
else:
# Simple average progress (containerd snapshotter with time-based progress)
progress += job.progress / len(layer_jobs)
# Use download_total if available (for containerd snapshotter), otherwise use total
layer_total = job.extra.get("download_total") or job.extra.get("total")
if layer_total is None:
return
progress += job.progress * (layer_total / total)
job_stage = PullImageLayerStage.from_status(cast(str, job.stage))
if job_stage < PullImageLayerStage.EXTRACTING:

View File

@@ -13,7 +13,6 @@ from .validate import get_valid_modules
_LOGGER: logging.Logger = logging.getLogger(__name__)
UNHEALTHY = [
UnsupportedReason.DOCKER_VERSION,
UnsupportedReason.LXC,
UnsupportedReason.PRIVILEGED,
]

View File

@@ -1,12 +1,28 @@
"""Test ingress API."""
from unittest.mock import AsyncMock, patch
from collections.abc import AsyncGenerator
from unittest.mock import AsyncMock, MagicMock, patch
from aiohttp.test_utils import TestClient
import aiohttp
from aiohttp import hdrs, web
from aiohttp.test_utils import TestClient, TestServer
import pytest
from supervisor.addons.addon import Addon
from supervisor.coresys import CoreSys
@pytest.fixture(name="real_websession")
async def fixture_real_websession(
coresys: CoreSys,
) -> AsyncGenerator[aiohttp.ClientSession]:
"""Fixture for real aiohttp ClientSession for ingress proxy tests."""
session = aiohttp.ClientSession()
coresys._websession = session # pylint: disable=W0212
yield session
await session.close()
async def test_validate_session(api_client: TestClient, coresys: CoreSys):
"""Test validating ingress session."""
with patch("aiohttp.web_request.BaseRequest.__getitem__", return_value=None):
@@ -86,3 +102,126 @@ async def test_validate_session_with_user_id(
assert (
coresys.ingress.get_session_data(session).user.display_name == "Some Name"
)
async def test_ingress_proxy_no_content_type_for_empty_body_responses(
api_client: TestClient, coresys: CoreSys, real_websession: aiohttp.ClientSession
):
"""Test that empty body responses don't get Content-Type header."""
# Create a mock add-on backend server that returns various status codes
async def mock_addon_handler(request: web.Request) -> web.Response:
"""Mock add-on handler that returns different status codes based on path."""
path = request.path
if path == "/204":
# 204 No Content - should not have Content-Type
return web.Response(status=204)
elif path == "/304":
# 304 Not Modified - should not have Content-Type
return web.Response(status=304)
elif path == "/100":
# 100 Continue - should not have Content-Type
return web.Response(status=100)
elif path == "/head":
# HEAD request - should have Content-Type (same as GET would)
return web.Response(body=b"test", content_type="text/html")
elif path == "/200":
# 200 OK with body - should have Content-Type
return web.Response(body=b"test content", content_type="text/plain")
elif path == "/200-no-content-type":
# 200 OK without explicit Content-Type - should get default
return web.Response(body=b"test content")
elif path == "/200-json":
# 200 OK with JSON - should preserve Content-Type
return web.Response(
body=b'{"key": "value"}', content_type="application/json"
)
else:
return web.Response(body=b"default", content_type="text/html")
# Create test server for mock add-on
app = web.Application()
app.router.add_route("*", "/{tail:.*}", mock_addon_handler)
addon_server = TestServer(app)
await addon_server.start_server()
try:
# Create ingress session
resp = await api_client.post("/ingress/session")
result = await resp.json()
session = result["data"]["session"]
# Create a mock add-on
mock_addon = MagicMock(spec=Addon)
mock_addon.slug = "test_addon"
mock_addon.ip_address = addon_server.host
mock_addon.ingress_port = addon_server.port
mock_addon.ingress_stream = False
# Generate an ingress token and register the add-on
ingress_token = coresys.ingress.create_session()
with patch.object(coresys.ingress, "get", return_value=mock_addon):
# Test 204 No Content - should NOT have Content-Type
resp = await api_client.get(
f"/ingress/{ingress_token}/204",
cookies={"ingress_session": session},
)
assert resp.status == 204
assert hdrs.CONTENT_TYPE not in resp.headers
# Test 304 Not Modified - should NOT have Content-Type
resp = await api_client.get(
f"/ingress/{ingress_token}/304",
cookies={"ingress_session": session},
)
assert resp.status == 304
assert hdrs.CONTENT_TYPE not in resp.headers
# Test HEAD request - SHOULD have Content-Type (same as GET)
# per RFC 9110: HEAD should return same headers as GET
resp = await api_client.head(
f"/ingress/{ingress_token}/head",
cookies={"ingress_session": session},
)
assert resp.status == 200
assert hdrs.CONTENT_TYPE in resp.headers
assert "text/html" in resp.headers[hdrs.CONTENT_TYPE]
# Body should be empty for HEAD
body = await resp.read()
assert body == b""
# Test 200 OK with body - SHOULD have Content-Type
resp = await api_client.get(
f"/ingress/{ingress_token}/200",
cookies={"ingress_session": session},
)
assert resp.status == 200
assert hdrs.CONTENT_TYPE in resp.headers
assert resp.headers[hdrs.CONTENT_TYPE] == "text/plain"
body = await resp.read()
assert body == b"test content"
# Test 200 OK without explicit Content-Type - SHOULD get default
resp = await api_client.get(
f"/ingress/{ingress_token}/200-no-content-type",
cookies={"ingress_session": session},
)
assert resp.status == 200
assert hdrs.CONTENT_TYPE in resp.headers
# Should get application/octet-stream as default from aiohttp ClientResponse
assert "application/octet-stream" in resp.headers[hdrs.CONTENT_TYPE]
# Test 200 OK with JSON - SHOULD preserve Content-Type
resp = await api_client.get(
f"/ingress/{ingress_token}/200-json",
cookies={"ingress_session": session},
)
assert resp.status == 200
assert hdrs.CONTENT_TYPE in resp.headers
assert "application/json" in resp.headers[hdrs.CONTENT_TYPE]
body = await resp.read()
assert body == b'{"key": "value"}'
finally:
await addon_server.close()

View File

@@ -576,54 +576,78 @@ async def test_install_progress_handles_layers_skipping_download(
test_docker_interface: DockerInterface,
capture_exception: Mock,
):
"""Test install handles small layers that skip downloading phase and go directly to download complete."""
"""Test install handles small layers that skip downloading phase and go directly to download complete.
Reproduces the real-world scenario from Supervisor issue #6286:
- Small layer (02a6e69d8d00) completes Download complete at 10:14:08 without ever Downloading
- Normal layer (3f4a84073184) starts Downloading at 10:14:09 with progress updates
"""
coresys.core.set_state(CoreState.RUNNING)
# Simulate multiple layers where one small layer (96 bytes) skips the downloading phase
# This layer should not block progress reporting for the parent job
# Reproduce EXACT sequence from SupervisorNoUpdateProgressLogs.txt:
# Small layer (02a6e69d8d00) completes BEFORE normal layer (3f4a84073184) starts downloading
coresys.docker.docker.api.pull.return_value = [
{"status": "Pulling from test/image", "id": "latest"},
# Layer 1: Normal layer with downloading phase
{"status": "Pulling fs layer", "progressDetail": {}, "id": "layer1"},
# Small layer that skips downloading (02a6e69d8d00 in logs, 96 bytes)
{"status": "Pulling fs layer", "progressDetail": {}, "id": "02a6e69d8d00"},
{"status": "Pulling fs layer", "progressDetail": {}, "id": "3f4a84073184"},
{"status": "Waiting", "progressDetail": {}, "id": "02a6e69d8d00"},
{"status": "Waiting", "progressDetail": {}, "id": "3f4a84073184"},
# Goes straight to Download complete (10:14:08 in logs) - THIS IS THE KEY MOMENT
{"status": "Download complete", "progressDetail": {}, "id": "02a6e69d8d00"},
# Normal layer that downloads (3f4a84073184 in logs, 25MB)
# Downloading starts (10:14:09 in logs) - progress updates should happen NOW!
{
"status": "Downloading",
"progressDetail": {"current": 100, "total": 1000},
"progress": "[=====> ] 100B/1000B",
"id": "layer1",
"progressDetail": {"current": 260937, "total": 25371463},
"progress": "[> ] 260.9kB/25.37MB",
"id": "3f4a84073184",
},
{
"status": "Downloading",
"progressDetail": {"current": 1000, "total": 1000},
"progress": "[==================================================>] 1000B/1000B",
"id": "layer1",
"progressDetail": {"current": 5505024, "total": 25371463},
"progress": "[==========> ] 5.505MB/25.37MB",
"id": "3f4a84073184",
},
{"status": "Download complete", "progressDetail": {}, "id": "layer1"},
{
"status": "Downloading",
"progressDetail": {"current": 11272192, "total": 25371463},
"progress": "[======================> ] 11.27MB/25.37MB",
"id": "3f4a84073184",
},
{"status": "Download complete", "progressDetail": {}, "id": "3f4a84073184"},
{
"status": "Extracting",
"progressDetail": {"current": 1000, "total": 1000},
"progress": "[==================================================>] 1000B/1000B",
"id": "layer1",
"progressDetail": {"current": 25371463, "total": 25371463},
"progress": "[==================================================>] 25.37MB/25.37MB",
"id": "3f4a84073184",
},
{"status": "Pull complete", "progressDetail": {}, "id": "layer1"},
# Layer 2: Small layer that skips downloading (like 02a6e69d8d00 from the logs)
{"status": "Pulling fs layer", "progressDetail": {}, "id": "layer2"},
{"status": "Waiting", "progressDetail": {}, "id": "layer2"},
# Goes straight to Download complete without Downloading phase
{"status": "Download complete", "progressDetail": {}, "id": "layer2"},
{"status": "Pull complete", "progressDetail": {}, "id": "3f4a84073184"},
# Small layer finally extracts (10:14:58 in logs)
{
"status": "Extracting",
"progressDetail": {"current": 96, "total": 96},
"progress": "[==================================================>] 96B/96B",
"id": "layer2",
"id": "02a6e69d8d00",
},
{"status": "Pull complete", "progressDetail": {}, "id": "layer2"},
{"status": "Pull complete", "progressDetail": {}, "id": "02a6e69d8d00"},
{"status": "Digest: sha256:test"},
{"status": "Status: Downloaded newer image for test/image:latest"},
]
with patch.object(
type(coresys.supervisor), "arch", PropertyMock(return_value="amd64")
):
# Schedule job so we can observe that it completes successfully
# Capture immutable snapshots of install job progress using job.as_dict()
# This solves the mutable object problem - we snapshot state at call time
install_job_snapshots = []
original_on_job_change = coresys.jobs._on_job_change # pylint: disable=W0212
def capture_and_forward(job_obj, attribute, value):
# Capture immutable snapshot if this is the install job with progress
if job_obj.name == "docker_interface_install" and job_obj.progress > 0:
install_job_snapshots.append(job_obj.as_dict())
# Forward to original to maintain functionality
return original_on_job_change(job_obj, attribute, value)
with patch.object(coresys.jobs, "_on_job_change", side_effect=capture_and_forward):
event = asyncio.Event()
job, install_task = coresys.jobs.schedule_job(
test_docker_interface.install,
@@ -641,12 +665,16 @@ async def test_install_progress_handles_layers_skipping_download(
await install_task
await event.wait()
# The key assertion: Job should complete successfully without errors
# Without the fix, layer2 would block all progress reporting until it reached Extracting,
# preventing the aggregate progress calculation from running
assert job.done is True
assert job.progress == 100
capture_exception.assert_not_called()
# First update from layer download should have rather low progress ((260937/25445459) / 2 ~ 0.5%)
assert install_job_snapshots[0]["progress"] < 1
# Total 8 events should lead to a progress update on the install job
assert len(install_job_snapshots) == 8
# Job should complete successfully
assert job.done is True
assert job.progress == 100
capture_exception.assert_not_called()
async def test_install_progress_handles_containerd_snapshotter(
@@ -654,68 +682,24 @@ async def test_install_progress_handles_containerd_snapshotter(
test_docker_interface: DockerInterface,
capture_exception: Mock,
):
"""Test install handles containerd snapshotter time-based progress (total=None)."""
"""Test install handles containerd snapshotter format where extraction has no total bytes.
With containerd snapshotter, the extraction phase reports time elapsed in seconds
rather than bytes extracted. The progress_detail has format:
{"current": <seconds>, "units": "s"} with total=None
This test ensures we handle this gracefully by using the download size for
aggregate progress calculation.
"""
coresys.core.set_state(CoreState.RUNNING)
# Containerd snapshotter reports extraction progress as time elapsed (e.g., "7 s")
# with current=7, total=None instead of byte-based progress
coresys.docker.docker.api.pull.return_value = [
{"status": "Pulling from test/image", "id": "latest"},
{"status": "Pulling fs layer", "progressDetail": {}, "id": "layer1"},
{
"status": "Downloading",
"progressDetail": {"current": 100, "total": 1000},
"progress": "[=====> ] 100B/1000B",
"id": "layer1",
},
{
"status": "Downloading",
"progressDetail": {"current": 1000, "total": 1000},
"progress": "[==================================================>] 1000B/1000B",
"id": "layer1",
},
{"status": "Download complete", "progressDetail": {}, "id": "layer1"},
{
"status": "Extracting",
"progressDetail": {"current": 1000, "total": 1000},
"progress": "[==================================================>] 1000B/1000B",
"id": "layer1",
},
{"status": "Pull complete", "progressDetail": {}, "id": "layer1"},
# Layer 2: Containerd snapshotter with time-based extraction
{"status": "Pulling fs layer", "progressDetail": {}, "id": "layer2"},
{
"status": "Downloading",
"progressDetail": {"current": 50, "total": 500},
"progress": "[=====> ] 50B/500B",
"id": "layer2",
},
{
"status": "Downloading",
"progressDetail": {"current": 500, "total": 500},
"progress": "[==================================================>] 500B/500B",
"id": "layer2",
},
{"status": "Download complete", "progressDetail": {}, "id": "layer2"},
# Time-based extraction progress (containerd snapshotter)
{
"status": "Extracting",
"progressDetail": {"current": 3, "total": None},
"progress": "3 s",
"id": "layer2",
},
{
"status": "Extracting",
"progressDetail": {"current": 7, "total": None},
"progress": "7 s",
"id": "layer2",
},
{"status": "Pull complete", "progressDetail": {}, "id": "layer2"},
{"status": "Digest: sha256:test"},
{"status": "Status: Downloaded newer image for test/image:latest"},
]
# Fixture emulates containerd snapshotter pull log format
coresys.docker.docker.api.pull.return_value = load_json_fixture(
"docker_pull_image_log_containerd.json"
)
with patch.object(
type(coresys.supervisor), "arch", PropertyMock(return_value="amd64")
type(coresys.supervisor), "arch", PropertyMock(return_value="i386")
):
event = asyncio.Event()
job, install_task = coresys.jobs.schedule_job(
@@ -734,7 +718,7 @@ async def test_install_progress_handles_containerd_snapshotter(
await install_task
await event.wait()
# The key assertion: Job should complete without crashing on None total
# Job should complete successfully without exceptions
assert job.done is True
assert job.progress == 100
capture_exception.assert_not_called()

View File

@@ -0,0 +1,122 @@
[
{
"status": "Pulling from home-assistant/test-image",
"id": "2025.7.1"
},
{
"status": "Pulling fs layer",
"progressDetail": {},
"id": "layer1"
},
{
"status": "Pulling fs layer",
"progressDetail": {},
"id": "layer2"
},
{
"status": "Downloading",
"progressDetail": {
"current": 1048576,
"total": 5178461
},
"progress": "[===========> ] 1.049MB/5.178MB",
"id": "layer1"
},
{
"status": "Downloading",
"progressDetail": {
"current": 5178461,
"total": 5178461
},
"progress": "[==================================================>] 5.178MB/5.178MB",
"id": "layer1"
},
{
"status": "Download complete",
"progressDetail": {
"hidecounts": true
},
"id": "layer1"
},
{
"status": "Downloading",
"progressDetail": {
"current": 1048576,
"total": 10485760
},
"progress": "[=====> ] 1.049MB/10.49MB",
"id": "layer2"
},
{
"status": "Downloading",
"progressDetail": {
"current": 10485760,
"total": 10485760
},
"progress": "[==================================================>] 10.49MB/10.49MB",
"id": "layer2"
},
{
"status": "Download complete",
"progressDetail": {
"hidecounts": true
},
"id": "layer2"
},
{
"status": "Extracting",
"progressDetail": {
"current": 1,
"units": "s"
},
"progress": "1 s",
"id": "layer1"
},
{
"status": "Extracting",
"progressDetail": {
"current": 5,
"units": "s"
},
"progress": "5 s",
"id": "layer1"
},
{
"status": "Pull complete",
"progressDetail": {
"hidecounts": true
},
"id": "layer1"
},
{
"status": "Extracting",
"progressDetail": {
"current": 1,
"units": "s"
},
"progress": "1 s",
"id": "layer2"
},
{
"status": "Extracting",
"progressDetail": {
"current": 3,
"units": "s"
},
"progress": "3 s",
"id": "layer2"
},
{
"status": "Pull complete",
"progressDetail": {
"hidecounts": true
},
"id": "layer2"
},
{
"status": "Digest: sha256:abc123"
},
{
"status": "Status: Downloaded newer image for test/image:2025.7.1"
}
]