Compare commits

..

1 Commits

Author SHA1 Message Date
Stefan Agner
94e923f9f6 Use Docker's official registry domain detection logic
Replace the custom IMAGE_WITH_HOST regex with a proper implementation
based on Docker's reference parser (vendor/github.com/distribution/
reference/normalize.go).

Changes:
- Change DOCKER_HUB from "hub.docker.com" to "docker.io" (official default)
- Add DOCKER_HUB_LEGACY for backward compatibility with "hub.docker.com"
- Add IMAGE_DOMAIN_REGEX and get_domain() function that properly detects:
  - localhost (with optional port)
  - Domains with "." (e.g., ghcr.io, 127.0.0.1)
  - Domains with ":" port (e.g., myregistry:5000)
  - IPv6 addresses (e.g., [::1]:5000)
- Update credential handling to support both docker.io and hub.docker.com
- Add comprehensive tests for domain detection

🤖 Generated with [Claude Code](https://claude.com/claude-code)

Co-Authored-By: Claude <noreply@anthropic.com>
2025-11-27 11:27:14 +01:00
17 changed files with 223 additions and 6292 deletions

View File

@@ -22,7 +22,7 @@ from ..const import (
SOCKET_DOCKER,
)
from ..coresys import CoreSys, CoreSysAttributes
from ..docker.const import DOCKER_HUB
from ..docker.const import DOCKER_HUB, DOCKER_HUB_LEGACY
from ..docker.interface import MAP_ARCH
from ..exceptions import ConfigurationFileError, HassioArchNotFound
from ..utils.common import FileConfiguration, find_one_filetype
@@ -154,8 +154,11 @@ class AddonBuild(FileConfiguration, CoreSysAttributes):
# Use the actual registry URL for the key
# Docker Hub uses "https://index.docker.io/v1/" as the key
# Support both docker.io (official) and hub.docker.com (legacy)
registry_key = (
"https://index.docker.io/v1/" if registry == DOCKER_HUB else registry
"https://index.docker.io/v1/"
if registry in (DOCKER_HUB, DOCKER_HUB_LEGACY)
else registry
)
config = {"auths": {registry_key: {"auth": auth_string}}}

View File

@@ -813,10 +813,6 @@ class RestAPI(CoreSysAttributes):
self.webapp.add_routes(
[
web.get("/docker/info", api_docker.info),
web.post(
"/docker/migrate-storage-driver",
api_docker.migrate_docker_storage_driver,
),
web.post("/docker/options", api_docker.options),
web.get("/docker/registries", api_docker.registries),
web.post("/docker/registries", api_docker.create_registry),

View File

@@ -4,7 +4,6 @@ import logging
from typing import Any
from aiohttp import web
from awesomeversion import AwesomeVersion
import voluptuous as vol
from supervisor.resolution.const import ContextType, IssueType, SuggestionType
@@ -17,7 +16,6 @@ from ..const import (
ATTR_PASSWORD,
ATTR_REGISTRIES,
ATTR_STORAGE,
ATTR_STORAGE_DRIVER,
ATTR_USERNAME,
ATTR_VERSION,
)
@@ -44,12 +42,6 @@ SCHEMA_OPTIONS = vol.Schema(
}
)
SCHEMA_MIGRATE_DOCKER_STORAGE_DRIVER = vol.Schema(
{
vol.Required(ATTR_STORAGE_DRIVER): vol.In(["overlayfs", "overlay2"]),
}
)
class APIDocker(CoreSysAttributes):
"""Handle RESTful API for Docker configuration."""
@@ -131,27 +123,3 @@ class APIDocker(CoreSysAttributes):
del self.sys_docker.config.registries[hostname]
await self.sys_docker.config.save_data()
@api_process
async def migrate_docker_storage_driver(self, request: web.Request) -> None:
"""Migrate Docker storage driver."""
if (
not self.coresys.os.available
or not self.coresys.os.version
or self.coresys.os.version < AwesomeVersion("17.0.dev0")
):
raise APINotFound(
"Home Assistant OS 17.0 or newer required for Docker storage driver migration"
)
body = await api_validate(SCHEMA_MIGRATE_DOCKER_STORAGE_DRIVER, request)
await self.sys_dbus.agent.system.migrate_docker_storage_driver(
body[ATTR_STORAGE_DRIVER]
)
_LOGGER.info("Host system reboot required to apply Docker storage migration")
self.sys_resolution.create_issue(
IssueType.REBOOT_REQUIRED,
ContextType.SYSTEM,
suggestions=[SuggestionType.EXECUTE_REBOOT],
)

View File

@@ -328,7 +328,6 @@ ATTR_STATE = "state"
ATTR_STATIC = "static"
ATTR_STDIN = "stdin"
ATTR_STORAGE = "storage"
ATTR_STORAGE_DRIVER = "storage_driver"
ATTR_SUGGESTIONS = "suggestions"
ATTR_SUPERVISOR = "supervisor"
ATTR_SUPERVISOR_INTERNET = "supervisor_internet"

View File

@@ -15,8 +15,3 @@ class System(DBusInterface):
async def schedule_wipe_device(self) -> bool:
"""Schedule a factory reset on next system boot."""
return await self.connected_dbus.System.call("schedule_wipe_device")
@dbus_connected
async def migrate_docker_storage_driver(self, backend: str) -> None:
"""Migrate Docker storage driver."""
await self.connected_dbus.System.call("migrate_docker_storage_driver", backend)

View File

@@ -15,11 +15,64 @@ from ..const import MACHINE_ID
RE_RETRYING_DOWNLOAD_STATUS = re.compile(r"Retrying in \d+ seconds?")
# Docker Hub registry identifier
DOCKER_HUB = "hub.docker.com"
# Docker Hub registry identifier (official default)
# Docker's default registry is docker.io
DOCKER_HUB = "docker.io"
# Regex to match images with a registry host (e.g., ghcr.io/org/image)
IMAGE_WITH_HOST = re.compile(r"^((?:[a-z0-9]+(?:-[a-z0-9]+)*\.)+[a-z]{2,})\/.+")
# Legacy Docker Hub identifier for backward compatibility
DOCKER_HUB_LEGACY = "hub.docker.com"
# Docker image reference domain regex
# Based on Docker's reference implementation:
# vendor/github.com/distribution/reference/normalize.go
#
# A domain is detected if the part before the first / contains:
# - "localhost" (with optional port)
# - Contains "." (like registry.example.com or 127.0.0.1)
# - Contains ":" (like myregistry:5000)
# - IPv6 addresses in brackets (like [::1]:5000)
#
# Note: Docker also treats uppercase letters as domain indicators since
# namespaces must be lowercase, but this regex handles lowercase matching
# and the get_domain() function validates the domain rules.
IMAGE_DOMAIN_REGEX = re.compile(
r"^("
r"localhost(?::[0-9]+)?|" # localhost with optional port
r"(?:[a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9])" # domain component
r"(?:\.(?:[a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9]))*" # more components
r"(?::[0-9]+)?|" # optional port
r"\[[a-fA-F0-9:]+\](?::[0-9]+)?" # IPv6 with optional port
r")/" # must be followed by /
)
def get_domain(image_ref: str) -> str | None:
"""Extract domain from Docker image reference.
Returns the registry domain if the image reference contains one,
or None if the image uses Docker Hub (docker.io).
Based on Docker's reference implementation:
vendor/github.com/distribution/reference/normalize.go
Examples:
get_domain("nginx") -> None (docker.io)
get_domain("library/nginx") -> None (docker.io)
get_domain("myregistry.com/nginx") -> "myregistry.com"
get_domain("localhost/myimage") -> "localhost"
get_domain("localhost:5000/myimage") -> "localhost:5000"
get_domain("registry.io:5000/org/app:v1") -> "registry.io:5000"
get_domain("[::1]:5000/myimage") -> "[::1]:5000"
"""
match = IMAGE_DOMAIN_REGEX.match(image_ref)
if match:
domain = match.group(1)
# Must contain '.' or ':' or be 'localhost' to be a real domain
# This prevents treating "myuser/myimage" as having domain "myuser"
if "." in domain or ":" in domain or domain == "localhost":
return domain
return None # No domain = Docker Hub (docker.io)
class Capabilities(StrEnum):

View File

@@ -45,7 +45,13 @@ from ..jobs.decorator import Job
from ..jobs.job_group import JobGroup
from ..resolution.const import ContextType, IssueType, SuggestionType
from ..utils.sentry import async_capture_exception
from .const import DOCKER_HUB, ContainerState, PullImageLayerStage, RestartPolicy
from .const import (
DOCKER_HUB,
DOCKER_HUB_LEGACY,
ContainerState,
PullImageLayerStage,
RestartPolicy,
)
from .manager import CommandReturn, PullLogEntry
from .monitor import DockerContainerStateEvent
from .stats import DockerStats
@@ -184,7 +190,8 @@ class DockerInterface(JobGroup, ABC):
stored = self.sys_docker.config.registries[registry]
credentials[ATTR_USERNAME] = stored[ATTR_USERNAME]
credentials[ATTR_PASSWORD] = stored[ATTR_PASSWORD]
if registry != DOCKER_HUB:
# Don't include registry for Docker Hub (both official and legacy)
if registry not in (DOCKER_HUB, DOCKER_HUB_LEGACY):
credentials[ATTR_REGISTRY] = registry
_LOGGER.debug(
@@ -226,16 +233,28 @@ class DockerInterface(JobGroup, ABC):
job = j
break
# There should no longer be any real risk of logs out of order anymore.
# However tests with very small images have shown that sometimes Docker
# skips stages in log. So keeping this one as a safety check on null job
# This likely only occurs if the logs came in out of sync and we got progress before the Pulling FS Layer one
if not job:
raise DockerLogOutOfOrder(
f"Received pull image log with status {reference.status} for image id {reference.id} and parent job {install_job_id} but could not find a matching job, skipping",
_LOGGER.debug,
)
# For progress calculation we assume downloading is 70% of time, extracting is 30% and others stages negligible
# Hopefully these come in order but if they sometimes get out of sync, avoid accidentally going backwards
# If it happens a lot though we may need to reconsider the value of this feature
if job.done:
raise DockerLogOutOfOrder(
f"Received pull image log with status {reference.status} for job {job.uuid} but job was done, skipping",
_LOGGER.debug,
)
if job.stage and stage < PullImageLayerStage.from_status(job.stage):
raise DockerLogOutOfOrder(
f"Received pull image log with status {reference.status} for job {job.uuid} but job was already on stage {job.stage}, skipping",
_LOGGER.debug,
)
# For progress calcuation we assume downloading and extracting are each 50% of the time and others stages negligible
progress = job.progress
match stage:
case PullImageLayerStage.DOWNLOADING | PullImageLayerStage.EXTRACTING:
@@ -244,26 +263,22 @@ class DockerInterface(JobGroup, ABC):
and reference.progress_detail.current
and reference.progress_detail.total
):
progress = (
progress = 50 * (
reference.progress_detail.current
/ reference.progress_detail.total
)
if stage == PullImageLayerStage.DOWNLOADING:
progress = 70 * progress
else:
progress = 70 + 30 * progress
if stage == PullImageLayerStage.EXTRACTING:
progress += 50
case (
PullImageLayerStage.VERIFYING_CHECKSUM
| PullImageLayerStage.DOWNLOAD_COMPLETE
):
progress = 70
progress = 50
case PullImageLayerStage.PULL_COMPLETE:
progress = 100
case PullImageLayerStage.RETRYING_DOWNLOAD:
progress = 0
# No real risk of getting things out of order in current implementation
# but keeping this one in case another change to these trips us up.
if stage != PullImageLayerStage.RETRYING_DOWNLOAD and progress < job.progress:
raise DockerLogOutOfOrder(
f"Received pull image log with status {reference.status} for job {job.uuid} that implied progress was {progress} but current progress is {job.progress}, skipping",
@@ -312,44 +327,24 @@ class DockerInterface(JobGroup, ABC):
and job.name == "Pulling container image layer"
]
# Calculate total from layers that have reported size info
# With containerd snapshotter, some layers skip "Downloading" and go directly to
# "Download complete", so we can't wait for all layers to have extra before reporting progress
layers_with_extra = [
job for job in layer_jobs if job.extra and job.extra.get("total")
]
if not layers_with_extra:
return
# First set the total bytes to be downloaded/extracted on the main job
if not install_job.extra:
total = 0
for job in layer_jobs:
if not job.extra:
return
total += job.extra["total"]
install_job.extra = {"total": total}
else:
total = install_job.extra["total"]
# Sum up total bytes. Layers that skip downloading get placeholder extra={1,1}
# which doesn't represent actual size. Separate "real" layers from placeholders.
# Filter guarantees job.extra is not None and has "total" key
real_layers = [
job for job in layers_with_extra if cast(dict, job.extra)["total"] > 1
]
placeholder_layers = [
job for job in layers_with_extra if cast(dict, job.extra)["total"] == 1
]
# If we only have placeholder layers (no real size info yet), don't report progress
# This prevents tiny cached layers from showing inflated progress before
# the actual download sizes are known
if not real_layers:
return
total = sum(cast(dict, job.extra)["total"] for job in real_layers)
if total == 0:
return
# Update install_job.extra with current total (may increase as more layers report)
install_job.extra = {"total": total}
# Calculate progress based on layers that have real size info
# Placeholder layers (skipped downloads) count as complete but don't affect weighted progress
# Then determine total progress based on progress of each sub-job, factoring in size of each compared to total
progress = 0.0
stage = PullImageLayerStage.PULL_COMPLETE
for job in real_layers:
progress += job.progress * (cast(dict, job.extra)["total"] / total)
for job in layer_jobs:
if not job.extra:
return
progress += job.progress * (job.extra["total"] / total)
job_stage = PullImageLayerStage.from_status(cast(str, job.stage))
if job_stage < PullImageLayerStage.EXTRACTING:
@@ -360,28 +355,6 @@ class DockerInterface(JobGroup, ABC):
):
stage = PullImageLayerStage.EXTRACTING
# Check if any layers are still pending (no extra yet)
# If so, we're still in downloading phase even if all layers_with_extra are done
layers_pending = len(layer_jobs) - len(layers_with_extra)
if layers_pending > 0:
# Scale progress to account for unreported layers
# This prevents tiny layers that complete first from showing inflated progress
# e.g., if 2/25 layers reported at 70%, actual progress is ~70 * 2/25 = 5.6%
layers_fraction = len(layers_with_extra) / len(layer_jobs)
progress = progress * layers_fraction
if stage == PullImageLayerStage.PULL_COMPLETE:
stage = PullImageLayerStage.DOWNLOADING
# Also check if all placeholders are done but we're waiting for real layers
if placeholder_layers and stage == PullImageLayerStage.PULL_COMPLETE:
# All real layers are done, but check if placeholders are still extracting
for job in placeholder_layers:
job_stage = PullImageLayerStage.from_status(cast(str, job.stage))
if job_stage < PullImageLayerStage.PULL_COMPLETE:
stage = PullImageLayerStage.EXTRACTING
break
# Ensure progress is 100 at this point to prevent float drift
if stage == PullImageLayerStage.PULL_COMPLETE:
progress = 100

View File

@@ -49,7 +49,7 @@ from ..exceptions import (
)
from ..utils.common import FileConfiguration
from ..validate import SCHEMA_DOCKER_CONFIG
from .const import DOCKER_HUB, IMAGE_WITH_HOST, LABEL_MANAGED
from .const import DOCKER_HUB, DOCKER_HUB_LEGACY, LABEL_MANAGED, get_domain
from .monitor import DockerMonitor
from .network import DockerNetwork
@@ -111,15 +111,10 @@ class PullProgressDetail:
"""Progress detail information for pull.
Documentation lacking but both of these seem to be in bytes when populated.
Containerd-snapshot update - When leveraging this new feature, this information
becomes useless to us while extracting. It simply tells elapsed time using
current and units.
"""
current: int | None = None
total: int | None = None
units: str | None = None
@classmethod
def from_pull_log_dict(cls, value: dict[str, int]) -> PullProgressDetail:
@@ -212,19 +207,25 @@ class DockerConfig(FileConfiguration):
Matches the image against configured registries and returns the registry
name if found, or None if no matching credentials are configured.
Uses Docker's domain detection logic from:
vendor/github.com/distribution/reference/normalize.go
"""
if not self.registries:
return None
# Check if image uses a custom registry (e.g., ghcr.io/org/image)
matcher = IMAGE_WITH_HOST.match(image)
if matcher:
registry = matcher.group(1)
if registry in self.registries:
return registry
# If no registry prefix, check for Docker Hub credentials
elif DOCKER_HUB in self.registries:
return DOCKER_HUB
domain = get_domain(image)
if domain:
if domain in self.registries:
return domain
else:
# No domain prefix means Docker Hub
# Support both docker.io (official) and hub.docker.com (legacy)
if DOCKER_HUB in self.registries:
return DOCKER_HUB
if DOCKER_HUB_LEGACY in self.registries:
return DOCKER_HUB_LEGACY
return None

View File

@@ -4,11 +4,6 @@ from aiohttp.test_utils import TestClient
import pytest
from supervisor.coresys import CoreSys
from supervisor.resolution.const import ContextType, IssueType, SuggestionType
from supervisor.resolution.data import Issue, Suggestion
from tests.dbus_service_mocks.agent_system import System as SystemService
from tests.dbus_service_mocks.base import DBusServiceMock
@pytest.mark.asyncio
@@ -89,79 +84,3 @@ async def test_registry_not_found(api_client: TestClient):
assert resp.status == 404
body = await resp.json()
assert body["message"] == "Hostname bad does not exist in registries"
@pytest.mark.parametrize("os_available", ["17.0.rc1"], indirect=True)
async def test_api_migrate_docker_storage_driver(
api_client: TestClient,
coresys: CoreSys,
os_agent_services: dict[str, DBusServiceMock],
os_available,
):
"""Test Docker storage driver migration."""
system_service: SystemService = os_agent_services["agent_system"]
system_service.MigrateDockerStorageDriver.calls.clear()
resp = await api_client.post(
"/docker/migrate-storage-driver",
json={"storage_driver": "overlayfs"},
)
assert resp.status == 200
assert system_service.MigrateDockerStorageDriver.calls == [("overlayfs",)]
assert (
Issue(IssueType.REBOOT_REQUIRED, ContextType.SYSTEM)
in coresys.resolution.issues
)
assert (
Suggestion(SuggestionType.EXECUTE_REBOOT, ContextType.SYSTEM)
in coresys.resolution.suggestions
)
# Test migration back to overlay2 (graph driver)
system_service.MigrateDockerStorageDriver.calls.clear()
resp = await api_client.post(
"/docker/migrate-storage-driver",
json={"storage_driver": "overlay2"},
)
assert resp.status == 200
assert system_service.MigrateDockerStorageDriver.calls == [("overlay2",)]
@pytest.mark.parametrize("os_available", ["17.0.rc1"], indirect=True)
async def test_api_migrate_docker_storage_driver_invalid_backend(
api_client: TestClient,
os_available,
):
"""Test 400 is returned for invalid storage driver."""
resp = await api_client.post(
"/docker/migrate-storage-driver",
json={"storage_driver": "invalid"},
)
assert resp.status == 400
async def test_api_migrate_docker_storage_driver_not_os(
api_client: TestClient,
coresys: CoreSys,
):
"""Test 404 is returned if not running on HAOS."""
resp = await api_client.post(
"/docker/migrate-storage-driver",
json={"storage_driver": "overlayfs"},
)
assert resp.status == 404
@pytest.mark.parametrize("os_available", ["16.2"], indirect=True)
async def test_api_migrate_docker_storage_driver_old_os(
api_client: TestClient,
coresys: CoreSys,
os_available,
):
"""Test 404 is returned if OS is older than 17.0."""
resp = await api_client.post(
"/docker/migrate-storage-driver",
json={"storage_driver": "overlayfs"},
)
assert resp.status == 404

View File

@@ -323,29 +323,29 @@ async def test_api_progress_updates_home_assistant_update(
},
{
"stage": None,
"progress": 1.7,
"progress": 1.2,
"done": False,
},
{
"stage": None,
"progress": 4.0,
"progress": 2.8,
"done": False,
},
]
assert events[-5:] == [
{
"stage": None,
"progress": 98.2,
"progress": 97.2,
"done": False,
},
{
"stage": None,
"progress": 98.3,
"progress": 98.4,
"done": False,
},
{
"stage": None,
"progress": 99.3,
"progress": 99.4,
"done": False,
},
{

View File

@@ -773,29 +773,29 @@ async def test_api_progress_updates_addon_install_update(
},
{
"stage": None,
"progress": 1.7,
"progress": 1.2,
"done": False,
},
{
"stage": None,
"progress": 4.0,
"progress": 2.8,
"done": False,
},
]
assert events[-5:] == [
{
"stage": None,
"progress": 98.2,
"progress": 97.2,
"done": False,
},
{
"stage": None,
"progress": 98.3,
"progress": 98.4,
"done": False,
},
{
"stage": None,
"progress": 99.3,
"progress": 99.4,
"done": False,
},
{

View File

@@ -371,29 +371,29 @@ async def test_api_progress_updates_supervisor_update(
},
{
"stage": None,
"progress": 1.7,
"progress": 1.2,
"done": False,
},
{
"stage": None,
"progress": 4.0,
"progress": 2.8,
"done": False,
},
]
assert events[-5:] == [
{
"stage": None,
"progress": 98.2,
"progress": 97.2,
"done": False,
},
{
"stage": None,
"progress": 98.3,
"progress": 98.4,
"done": False,
},
{
"stage": None,
"progress": 99.3,
"progress": 99.4,
"done": False,
},
{

View File

@@ -1,6 +1,6 @@
"""Mock of OS Agent System dbus service."""
from dbus_fast import DBusError, ErrorType
from dbus_fast import DBusError
from .base import DBusServiceMock, dbus_method
@@ -21,7 +21,6 @@ class System(DBusServiceMock):
object_path = "/io/hass/os/System"
interface = "io.hass.os.System"
response_schedule_wipe_device: bool | DBusError = True
response_migrate_docker_storage_driver: None | DBusError = None
@dbus_method()
def ScheduleWipeDevice(self) -> "b":
@@ -29,14 +28,3 @@ class System(DBusServiceMock):
if isinstance(self.response_schedule_wipe_device, DBusError):
raise self.response_schedule_wipe_device # pylint: disable=raising-bad-type
return self.response_schedule_wipe_device
@dbus_method()
def MigrateDockerStorageDriver(self, backend: "s") -> None:
"""Migrate Docker storage driver."""
if isinstance(self.response_migrate_docker_storage_driver, DBusError):
raise self.response_migrate_docker_storage_driver # pylint: disable=raising-bad-type
if backend not in ("overlayfs", "overlay2"):
raise DBusError(
ErrorType.FAILED,
f"unsupported driver: {backend} (only 'overlayfs' and 'overlay2' are supported)",
)

View File

@@ -1,11 +1,50 @@
"""Test docker login."""
import pytest
# pylint: disable=protected-access
from supervisor.coresys import CoreSys
from supervisor.docker.const import DOCKER_HUB
from supervisor.docker.const import DOCKER_HUB, DOCKER_HUB_LEGACY, get_domain
from supervisor.docker.interface import DockerInterface
@pytest.mark.parametrize(
("image_ref", "expected_domain"),
[
# No domain - Docker Hub images
("nginx", None),
("nginx:latest", None),
("library/nginx", None),
("library/nginx:latest", None),
("homeassistant/amd64-supervisor", None),
("homeassistant/amd64-supervisor:1.2.3", None),
# Domain with dot
("ghcr.io/homeassistant/amd64-supervisor", "ghcr.io"),
("ghcr.io/homeassistant/amd64-supervisor:latest", "ghcr.io"),
("myregistry.com/nginx", "myregistry.com"),
("registry.example.com/org/image:v1", "registry.example.com"),
("127.0.0.1/myimage", "127.0.0.1"),
# Domain with port
("myregistry:5000/myimage", "myregistry:5000"),
("localhost:5000/myimage", "localhost:5000"),
("registry.io:5000/org/app:v1", "registry.io:5000"),
# localhost special case
("localhost/myimage", "localhost"),
("localhost/myimage:tag", "localhost"),
# IPv6
("[::1]:5000/myimage", "[::1]:5000"),
("[2001:db8::1]:5000/myimage:tag", "[2001:db8::1]:5000"),
],
)
def test_get_domain(image_ref: str, expected_domain: str | None):
"""Test get_domain extracts registry domain from image reference.
Based on Docker's reference implementation:
vendor/github.com/distribution/reference/normalize.go
"""
assert get_domain(image_ref) == expected_domain
def test_no_credentials(coresys: CoreSys, test_docker_interface: DockerInterface):
"""Test no credentials."""
coresys.docker.config._data["registries"] = {
@@ -47,3 +86,36 @@ def test_matching_credentials(coresys: CoreSys, test_docker_interface: DockerInt
)
assert credentials["username"] == "Spongebob Squarepants"
assert "registry" not in credentials
def test_legacy_docker_hub_credentials(
coresys: CoreSys, test_docker_interface: DockerInterface
):
"""Test legacy hub.docker.com credentials are used for Docker Hub images."""
coresys.docker.config._data["registries"] = {
DOCKER_HUB_LEGACY: {"username": "LegacyUser", "password": "Password1!"},
}
credentials = test_docker_interface._get_credentials(
"homeassistant/amd64-supervisor"
)
assert credentials["username"] == "LegacyUser"
# No registry should be included for Docker Hub
assert "registry" not in credentials
def test_docker_hub_preferred_over_legacy(
coresys: CoreSys, test_docker_interface: DockerInterface
):
"""Test docker.io is preferred over legacy hub.docker.com when both exist."""
coresys.docker.config._data["registries"] = {
DOCKER_HUB: {"username": "NewUser", "password": "Password1!"},
DOCKER_HUB_LEGACY: {"username": "LegacyUser", "password": "Password2!"},
}
credentials = test_docker_interface._get_credentials(
"homeassistant/amd64-supervisor"
)
# docker.io should be preferred
assert credentials["username"] == "NewUser"
assert "registry" not in credentials

View File

@@ -26,10 +26,7 @@ from supervisor.exceptions import (
DockerNotFound,
DockerRequestError,
)
from supervisor.homeassistant.const import WSEvent, WSType
from supervisor.jobs import ChildJobSyncFilter, JobSchedulerOptions, SupervisorJob
from supervisor.jobs.decorator import Job
from supervisor.supervisor import Supervisor
from supervisor.jobs import JobSchedulerOptions, SupervisorJob
from tests.common import AsyncIterator, load_json_fixture
@@ -317,7 +314,7 @@ async def test_install_fires_progress_events(
},
{"status": "Already exists", "progressDetail": {}, "id": "6e771e15690e"},
{"status": "Pulling fs layer", "progressDetail": {}, "id": "1578b14a573c"},
{"status": "Waiting", "progressDetail": {}, "id": "1578b14a573c"},
{"status": "Waiting", "progressDetail": {}, "id": "2488d0e401e1"},
{
"status": "Downloading",
"progressDetail": {"current": 1378, "total": 1486},
@@ -387,7 +384,7 @@ async def test_install_fires_progress_events(
job_id=ANY,
status="Waiting",
progress_detail=PullProgressDetail(),
id="1578b14a573c",
id="2488d0e401e1",
),
PullLogEntry(
job_id=ANY,
@@ -541,7 +538,6 @@ async def test_install_raises_on_pull_error(
"status": "Pulling from home-assistant/odroid-n2-homeassistant",
"id": "2025.7.2",
},
{"status": "Pulling fs layer", "progressDetail": {}, "id": "1578b14a573c"},
{
"status": "Downloading",
"progressDetail": {"current": 1378, "total": 1486},
@@ -596,39 +592,16 @@ async def test_install_progress_handles_download_restart(
capture_exception.assert_not_called()
@pytest.mark.parametrize(
"extract_log",
[
{
"status": "Extracting",
"progressDetail": {"current": 96, "total": 96},
"progress": "[==================================================>] 96B/96B",
"id": "02a6e69d8d00",
},
{
"status": "Extracting",
"progressDetail": {"current": 1, "units": "s"},
"progress": "1 s",
"id": "02a6e69d8d00",
},
],
ids=["normal_extract_log", "containerd_snapshot_extract_log"],
)
async def test_install_progress_handles_layers_skipping_download(
coresys: CoreSys,
test_docker_interface: DockerInterface,
capture_exception: Mock,
extract_log: dict[str, Any],
):
"""Test install handles small layers that skip downloading phase and go directly to download complete.
Reproduces the real-world scenario from Supervisor issue #6286:
- Small layer (02a6e69d8d00) completes Download complete at 10:14:08 without ever Downloading
- Normal layer (3f4a84073184) starts Downloading at 10:14:09 with progress updates
Under containerd snapshotter this presumably can still occur and Supervisor will have even less info
since extract logs don't have a total. Supervisor should generally just ignore these and set progress
from the larger images that take all the time.
"""
coresys.core.set_state(CoreState.RUNNING)
@@ -672,7 +645,12 @@ async def test_install_progress_handles_layers_skipping_download(
},
{"status": "Pull complete", "progressDetail": {}, "id": "3f4a84073184"},
# Small layer finally extracts (10:14:58 in logs)
extract_log,
{
"status": "Extracting",
"progressDetail": {"current": 96, "total": 96},
"progress": "[==================================================>] 96B/96B",
"id": "02a6e69d8d00",
},
{"status": "Pull complete", "progressDetail": {}, "id": "02a6e69d8d00"},
{"status": "Digest: sha256:test"},
{"status": "Status: Downloaded newer image for test/image:latest"},
@@ -709,13 +687,11 @@ async def test_install_progress_handles_layers_skipping_download(
await install_task
await event.wait()
# First update from layer download should have rather low progress ((260937/25371463) ~= 1%)
assert install_job_snapshots[0]["progress"] < 2
# First update from layer download should have rather low progress ((260937/25445459) / 2 ~ 0.5%)
assert install_job_snapshots[0]["progress"] < 1
# Total 7 events should lead to a progress update on the install job:
# 3 Downloading events + Download complete (70%) + Extracting + Pull complete (100%) + stage change
# Note: The small placeholder layer ({1,1}) is excluded from progress calculation
assert len(install_job_snapshots) == 7
# Total 8 events should lead to a progress update on the install job
assert len(install_job_snapshots) == 8
# Job should complete successfully
assert job.done is True
@@ -782,170 +758,3 @@ async def test_missing_total_handled_gracefully(
await event.wait()
capture_exception.assert_not_called()
async def test_install_progress_containerd_snapshot(
coresys: CoreSys, ha_ws_client: AsyncMock
):
"""Test install handles docker progress events using containerd snapshotter."""
coresys.core.set_state(CoreState.RUNNING)
class TestDockerInterface(DockerInterface):
"""Test interface for events."""
@property
def name(self) -> str:
"""Name of test interface."""
return "test_interface"
@Job(
name="mock_docker_interface_install",
child_job_syncs=[
ChildJobSyncFilter("docker_interface_install", progress_allocation=1.0)
],
)
async def mock_install(self) -> None:
"""Mock install."""
await super().install(
AwesomeVersion("1.2.3"), image="test", arch=CpuArch.I386
)
# Fixture emulates log as received when using containerd snapshotter
# Should not error but progress gets choppier once extraction starts
logs = load_json_fixture("docker_pull_image_log_containerd_snapshot.json")
coresys.docker.images.pull.return_value = AsyncIterator(logs)
test_docker_interface = TestDockerInterface(coresys)
with patch.object(Supervisor, "arch", PropertyMock(return_value="i386")):
await test_docker_interface.mock_install()
coresys.docker.images.pull.assert_called_once_with(
"test", tag="1.2.3", platform="linux/386", auth=None, stream=True
)
coresys.docker.images.inspect.assert_called_once_with("test:1.2.3")
await asyncio.sleep(1)
def job_event(progress: float, done: bool = False):
return {
"type": WSType.SUPERVISOR_EVENT,
"data": {
"event": WSEvent.JOB,
"data": {
"name": "mock_docker_interface_install",
"reference": "test_interface",
"uuid": ANY,
"progress": progress,
"stage": None,
"done": done,
"parent_id": None,
"errors": [],
"created": ANY,
"extra": None,
},
},
}
# Get progress values from the events
job_events = [
c.args[0]
for c in ha_ws_client.async_send_command.call_args_list
if c.args[0].get("data", {}).get("event") == WSEvent.JOB
and c.args[0].get("data", {}).get("data", {}).get("name")
== "mock_docker_interface_install"
]
progress_values = [e["data"]["data"]["progress"] for e in job_events]
# Should have multiple progress updates (not just 0 and 100)
assert len(progress_values) >= 10, (
f"Expected >=10 progress updates, got {len(progress_values)}"
)
# Progress should be monotonically increasing
for i in range(1, len(progress_values)):
assert progress_values[i] >= progress_values[i - 1], (
f"Progress decreased at index {i}: {progress_values[i - 1]} -> {progress_values[i]}"
)
# Should start at 0 and end at 100
assert progress_values[0] == 0
assert progress_values[-1] == 100
# Should have progress values in the downloading phase (< 70%)
# Note: with layer scaling, early progress may be lower than before
downloading_progress = [p for p in progress_values if 0 < p < 70]
assert len(downloading_progress) > 0, (
"Expected progress updates during downloading phase"
)
async def test_install_progress_containerd_snapshotter_real_world(
coresys: CoreSys, ha_ws_client: AsyncMock
):
"""Test install handles real-world containerd snapshotter events.
This test uses real pull events captured from a Home Assistant Core update
where some layers skip the Downloading phase entirely (going directly from
"Pulling fs layer" to "Download complete"). This causes the bug where progress
jumps from 0 to 100 without intermediate updates.
Root cause: _update_install_job_status() returns early if ANY layer has
extra=None. Layers that skip Downloading don't get extra until Download complete,
so progress cannot be calculated until ALL layers reach Download complete.
"""
coresys.core.set_state(CoreState.RUNNING)
class TestDockerInterface(DockerInterface):
"""Test interface for events."""
@property
def name(self) -> str:
"""Name of test interface."""
return "test_interface"
@Job(
name="mock_docker_interface_install_realworld",
child_job_syncs=[
ChildJobSyncFilter("docker_interface_install", progress_allocation=1.0)
],
)
async def mock_install(self) -> None:
"""Mock install."""
await super().install(
AwesomeVersion("1.2.3"), image="test", arch=CpuArch.I386
)
# Real-world fixture: 12 layers, 262 Downloading events
# Some layers skip Downloading entirely (small layers with containerd snapshotter)
logs = load_json_fixture("docker_pull_image_log_containerd_snapshotter_real.json")
coresys.docker.images.pull.return_value = AsyncIterator(logs)
test_docker_interface = TestDockerInterface(coresys)
with patch.object(Supervisor, "arch", PropertyMock(return_value="i386")):
await test_docker_interface.mock_install()
await asyncio.sleep(1)
# Get progress events for the parent job (what UI sees)
job_events = [
c.args[0]
for c in ha_ws_client.async_send_command.call_args_list
if c.args[0].get("data", {}).get("event") == WSEvent.JOB
and c.args[0].get("data", {}).get("data", {}).get("name")
== "mock_docker_interface_install_realworld"
]
progress_values = [e["data"]["data"]["progress"] for e in job_events]
# We should have intermediate progress updates, not just 0 and 100
assert len(progress_values) > 3, (
f"BUG: Progress jumped 0->100 without intermediate updates. "
f"Got {len(progress_values)} updates: {progress_values}. "
f"Expected intermediate progress during the 262 Downloading events."
)
# Progress should be monotonically increasing
for i in range(1, len(progress_values)):
assert progress_values[i] >= progress_values[i - 1]
# Should see progress in downloading phase (0-70%)
downloading_progress = [p for p in progress_values if 0 < p < 70]
assert len(downloading_progress) > 0

View File

@@ -1,196 +0,0 @@
[
{
"status": "Pulling from home-assistant/home-assistant",
"id": "2025.12.0.dev202511080235"
},
{ "status": "Pulling fs layer", "progressDetail": {}, "id": "eafecc6b43cc" },
{ "status": "Pulling fs layer", "progressDetail": {}, "id": "333270549f95" },
{
"status": "Downloading",
"progressDetail": { "current": 1048576, "total": 21863319 },
"progress": "[==\u003e ] 1.049MB/21.86MB",
"id": "eafecc6b43cc"
},
{
"status": "Downloading",
"progressDetail": { "current": 1048576, "total": 21179924 },
"progress": "[==\u003e ] 1.049MB/21.18MB",
"id": "333270549f95"
},
{
"status": "Downloading",
"progressDetail": { "current": 4194304, "total": 21863319 },
"progress": "[=========\u003e ] 4.194MB/21.86MB",
"id": "eafecc6b43cc"
},
{
"status": "Downloading",
"progressDetail": { "current": 2097152, "total": 21179924 },
"progress": "[====\u003e ] 2.097MB/21.18MB",
"id": "333270549f95"
},
{
"status": "Downloading",
"progressDetail": { "current": 7340032, "total": 21863319 },
"progress": "[================\u003e ] 7.34MB/21.86MB",
"id": "eafecc6b43cc"
},
{
"status": "Downloading",
"progressDetail": { "current": 4194304, "total": 21179924 },
"progress": "[=========\u003e ] 4.194MB/21.18MB",
"id": "333270549f95"
},
{
"status": "Downloading",
"progressDetail": { "current": 13631488, "total": 21863319 },
"progress": "[===============================\u003e ] 13.63MB/21.86MB",
"id": "eafecc6b43cc"
},
{
"status": "Downloading",
"progressDetail": { "current": 8388608, "total": 21179924 },
"progress": "[===================\u003e ] 8.389MB/21.18MB",
"id": "333270549f95"
},
{
"status": "Downloading",
"progressDetail": { "current": 17825792, "total": 21863319 },
"progress": "[========================================\u003e ] 17.83MB/21.86MB",
"id": "eafecc6b43cc"
},
{
"status": "Downloading",
"progressDetail": { "current": 12582912, "total": 21179924 },
"progress": "[=============================\u003e ] 12.58MB/21.18MB",
"id": "333270549f95"
},
{
"status": "Downloading",
"progressDetail": { "current": 21863319, "total": 21863319 },
"progress": "[==================================================\u003e] 21.86MB/21.86MB",
"id": "eafecc6b43cc"
},
{
"status": "Downloading",
"progressDetail": { "current": 16777216, "total": 21179924 },
"progress": "[=======================================\u003e ] 16.78MB/21.18MB",
"id": "333270549f95"
},
{
"status": "Downloading",
"progressDetail": { "current": 21179924, "total": 21179924 },
"progress": "[==================================================\u003e] 21.18MB/21.18MB",
"id": "333270549f95"
},
{
"status": "Download complete",
"progressDetail": { "hidecounts": true },
"id": "eafecc6b43cc"
},
{
"status": "Download complete",
"progressDetail": { "hidecounts": true },
"id": "333270549f95"
},
{
"status": "Extracting",
"progressDetail": { "current": 1, "units": "s" },
"progress": "1 s",
"id": "333270549f95"
},
{
"status": "Extracting",
"progressDetail": { "current": 1, "units": "s" },
"progress": "1 s",
"id": "333270549f95"
},
{
"status": "Pull complete",
"progressDetail": { "hidecounts": true },
"id": "333270549f95"
},
{
"status": "Extracting",
"progressDetail": { "current": 1, "units": "s" },
"progress": "1 s",
"id": "eafecc6b43cc"
},
{
"status": "Extracting",
"progressDetail": { "current": 1, "units": "s" },
"progress": "1 s",
"id": "eafecc6b43cc"
},
{
"status": "Extracting",
"progressDetail": { "current": 2, "units": "s" },
"progress": "2 s",
"id": "eafecc6b43cc"
},
{
"status": "Extracting",
"progressDetail": { "current": 2, "units": "s" },
"progress": "2 s",
"id": "eafecc6b43cc"
},
{
"status": "Extracting",
"progressDetail": { "current": 3, "units": "s" },
"progress": "3 s",
"id": "eafecc6b43cc"
},
{
"status": "Extracting",
"progressDetail": { "current": 3, "units": "s" },
"progress": "3 s",
"id": "eafecc6b43cc"
},
{
"status": "Extracting",
"progressDetail": { "current": 4, "units": "s" },
"progress": "4 s",
"id": "eafecc6b43cc"
},
{
"status": "Extracting",
"progressDetail": { "current": 4, "units": "s" },
"progress": "4 s",
"id": "eafecc6b43cc"
},
{
"status": "Extracting",
"progressDetail": { "current": 5, "units": "s" },
"progress": "5 s",
"id": "eafecc6b43cc"
},
{
"status": "Extracting",
"progressDetail": { "current": 5, "units": "s" },
"progress": "5 s",
"id": "eafecc6b43cc"
},
{
"status": "Extracting",
"progressDetail": { "current": 6, "units": "s" },
"progress": "6 s",
"id": "eafecc6b43cc"
},
{
"status": "Extracting",
"progressDetail": { "current": 6, "units": "s" },
"progress": "6 s",
"id": "eafecc6b43cc"
},
{
"status": "Pull complete",
"progressDetail": { "hidecounts": true },
"id": "eafecc6b43cc"
},
{
"status": "Digest: sha256:bfc9efc13552c0c228f3d9d35987331cce68b43c9bc79c80a57eeadadd44cccf"
},
{
"status": "Status: Downloaded newer image for ghcr.io/home-assistant/home-assistant:2025.12.0.dev202511080235"
}
]

File diff suppressed because it is too large Load Diff