mirror of
				https://github.com/home-assistant/supervisor.git
				synced 2025-11-04 00:19:36 +00:00 
			
		
		
		
	Compare commits
	
		
			34 Commits
		
	
	
		
			2025.10.0
			...
			reject-cor
		
	
	| Author | SHA1 | Date | |
|---|---|---|---|
| 
						 | 
					5d9d33c9fa | ||
| 
						 | 
					e4959b4f10 | ||
| 
						 | 
					78353220de | ||
| 
						 | 
					131cc3b6d1 | ||
| 
						 | 
					b92f5976a3 | ||
| 
						 | 
					370c961c9e | ||
| 
						 | 
					b903e1196f | ||
| 
						 | 
					9f8e8ab15a | ||
| 
						 | 
					56bffc839b | ||
| 
						 | 
					952a553c3b | ||
| 
						 | 
					717f1c85f5 | ||
| 
						 | 
					ffd498a515 | ||
| 
						 | 
					35f0645cb9 | ||
| 
						 | 
					15c6547382 | ||
| 
						 | 
					adefa242e5 | ||
| 
						 | 
					583a8a82fb | ||
| 
						 | 
					322df15e73 | ||
| 
						 | 
					51490c8e41 | ||
| 
						 | 
					3c21a8b8ef | ||
| 
						 | 
					ddb8588d77 | ||
| 
						 | 
					81e46b20b8 | ||
| 
						 | 
					5041a1ed5c | ||
| 
						 | 
					337731a55a | ||
| 
						 | 
					53a8044aff | ||
| 
						 | 
					c71553f37d | ||
| 
						 | 
					c1eb97d8ab | ||
| 
						 | 
					190b734332 | ||
| 
						 | 
					559b6982a3 | ||
| 
						 | 
					301362e9e5 | ||
| 
						 | 
					fc928d294c | ||
| 
						 | 
					f42aeb4937 | ||
| 
						 | 
					fd21886de9 | ||
| 
						 | 
					e4bb415e30 | ||
| 
						 | 
					622dda5382 | 
							
								
								
									
										13
									
								
								.github/workflows/builder.yml
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										13
									
								
								.github/workflows/builder.yml
									
									
									
									
										vendored
									
									
								
							@@ -107,7 +107,7 @@ jobs:
 | 
			
		||||
      # home-assistant/wheels doesn't support sha pinning
 | 
			
		||||
      - name: Build wheels
 | 
			
		||||
        if: needs.init.outputs.requirements == 'true'
 | 
			
		||||
        uses: home-assistant/wheels@2025.09.1
 | 
			
		||||
        uses: home-assistant/wheels@2025.10.0
 | 
			
		||||
        with:
 | 
			
		||||
          abi: cp313
 | 
			
		||||
          tag: musllinux_1_2
 | 
			
		||||
@@ -132,7 +132,7 @@ jobs:
 | 
			
		||||
 | 
			
		||||
      - name: Install Cosign
 | 
			
		||||
        if: needs.init.outputs.publish == 'true'
 | 
			
		||||
        uses: sigstore/cosign-installer@d7543c93d881b35a8faa02e8e3605f69b7a1ce62 # v3.10.0
 | 
			
		||||
        uses: sigstore/cosign-installer@faadad0cce49287aee09b3a48701e75088a2c6ad # v4.0.0
 | 
			
		||||
        with:
 | 
			
		||||
          cosign-release: "v2.5.3"
 | 
			
		||||
 | 
			
		||||
@@ -320,6 +320,15 @@ jobs:
 | 
			
		||||
            exit 1
 | 
			
		||||
          fi
 | 
			
		||||
 | 
			
		||||
      - name: Wait for Home Assistant Core to start
 | 
			
		||||
        run: |
 | 
			
		||||
          echo "Waiting for Home Assistant Core to start"
 | 
			
		||||
          timeout 10m ha supervisor logs -f -n 10000 -b 0 | grep -q "Detect a running Home Assistant instance"
 | 
			
		||||
          if [ "$?" != "0" ]; then
 | 
			
		||||
            echo "Home Assistant Core did not start within 10 minutes"
 | 
			
		||||
            exit 1
 | 
			
		||||
          fi
 | 
			
		||||
 | 
			
		||||
      - name: Create full backup
 | 
			
		||||
        id: backup
 | 
			
		||||
        run: |
 | 
			
		||||
 
 | 
			
		||||
							
								
								
									
										2
									
								
								.github/workflows/ci.yaml
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										2
									
								
								.github/workflows/ci.yaml
									
									
									
									
										vendored
									
									
								
							@@ -346,7 +346,7 @@ jobs:
 | 
			
		||||
        with:
 | 
			
		||||
          python-version: ${{ needs.prepare.outputs.python-version }}
 | 
			
		||||
      - name: Install Cosign
 | 
			
		||||
        uses: sigstore/cosign-installer@d7543c93d881b35a8faa02e8e3605f69b7a1ce62 # v3.10.0
 | 
			
		||||
        uses: sigstore/cosign-installer@faadad0cce49287aee09b3a48701e75088a2c6ad # v4.0.0
 | 
			
		||||
        with:
 | 
			
		||||
          cosign-release: "v2.5.3"
 | 
			
		||||
      - name: Restore Python virtual environment
 | 
			
		||||
 
 | 
			
		||||
							
								
								
									
										3
									
								
								.github/workflows/stale.yml
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										3
									
								
								.github/workflows/stale.yml
									
									
									
									
										vendored
									
									
								
							@@ -9,13 +9,14 @@ jobs:
 | 
			
		||||
  stale:
 | 
			
		||||
    runs-on: ubuntu-latest
 | 
			
		||||
    steps:
 | 
			
		||||
      - uses: actions/stale@3a9db7e6a41a89f618792c92c0e97cc736e1b13f # v10.0.0
 | 
			
		||||
      - uses: actions/stale@5f858e3efba33a5ca4407a664cc011ad407f2008 # v10.1.0
 | 
			
		||||
        with:
 | 
			
		||||
          repo-token: ${{ secrets.GITHUB_TOKEN }}
 | 
			
		||||
          days-before-stale: 30
 | 
			
		||||
          days-before-close: 7
 | 
			
		||||
          stale-issue-label: "stale"
 | 
			
		||||
          exempt-issue-labels: "no-stale,Help%20wanted,help-wanted,pinned,rfc,security"
 | 
			
		||||
          only-issue-types: "bug"
 | 
			
		||||
          stale-issue-message: >
 | 
			
		||||
            There hasn't been any activity on this issue recently. Due to the
 | 
			
		||||
            high number of incoming GitHub notifications, we have to clean some
 | 
			
		||||
 
 | 
			
		||||
@@ -1,14 +1,14 @@
 | 
			
		||||
aiodns==3.5.0
 | 
			
		||||
aiohttp==3.12.15
 | 
			
		||||
aiohttp==3.13.1
 | 
			
		||||
atomicwrites-homeassistant==1.4.1
 | 
			
		||||
attrs==25.3.0
 | 
			
		||||
attrs==25.4.0
 | 
			
		||||
awesomeversion==25.8.0
 | 
			
		||||
blockbuster==1.5.25
 | 
			
		||||
brotli==1.1.0
 | 
			
		||||
ciso8601==2.3.3
 | 
			
		||||
colorlog==6.9.0
 | 
			
		||||
colorlog==6.10.1
 | 
			
		||||
cpe==1.3.1
 | 
			
		||||
cryptography==46.0.2
 | 
			
		||||
cryptography==46.0.3
 | 
			
		||||
debugpy==1.8.17
 | 
			
		||||
deepmerge==2.0
 | 
			
		||||
dirhash==0.5.0
 | 
			
		||||
@@ -19,12 +19,12 @@ jinja2==3.1.6
 | 
			
		||||
log-rate-limit==1.4.2
 | 
			
		||||
orjson==3.11.3
 | 
			
		||||
pulsectl==24.12.0
 | 
			
		||||
pyudev==0.24.3
 | 
			
		||||
pyudev==0.24.4
 | 
			
		||||
PyYAML==6.0.3
 | 
			
		||||
requests==2.32.5
 | 
			
		||||
securetar==2025.2.1
 | 
			
		||||
sentry-sdk==2.39.0
 | 
			
		||||
sentry-sdk==2.42.1
 | 
			
		||||
setuptools==80.9.0
 | 
			
		||||
voluptuous==0.15.2
 | 
			
		||||
dbus-fast==2.44.3
 | 
			
		||||
dbus-fast==2.44.5
 | 
			
		||||
zlib-fast==0.2.1
 | 
			
		||||
 
 | 
			
		||||
@@ -1,16 +1,16 @@
 | 
			
		||||
astroid==3.3.11
 | 
			
		||||
coverage==7.10.7
 | 
			
		||||
astroid==4.0.1
 | 
			
		||||
coverage==7.11.0
 | 
			
		||||
mypy==1.18.2
 | 
			
		||||
pre-commit==4.3.0
 | 
			
		||||
pylint==3.3.8
 | 
			
		||||
pylint==4.0.2
 | 
			
		||||
pytest-aiohttp==1.1.0
 | 
			
		||||
pytest-asyncio==0.25.2
 | 
			
		||||
pytest-cov==7.0.0
 | 
			
		||||
pytest-timeout==2.4.0
 | 
			
		||||
pytest==8.4.2
 | 
			
		||||
ruff==0.13.2
 | 
			
		||||
ruff==0.14.2
 | 
			
		||||
time-machine==2.19.0
 | 
			
		||||
types-docker==7.1.0.20250916
 | 
			
		||||
types-docker==7.1.0.20251009
 | 
			
		||||
types-pyyaml==6.0.12.20250915
 | 
			
		||||
types-requests==2.32.4.20250913
 | 
			
		||||
urllib3==2.5.0
 | 
			
		||||
 
 | 
			
		||||
@@ -226,6 +226,7 @@ class Addon(AddonModel):
 | 
			
		||||
        )
 | 
			
		||||
 | 
			
		||||
        await self._check_ingress_port()
 | 
			
		||||
 | 
			
		||||
        default_image = self._image(self.data)
 | 
			
		||||
        try:
 | 
			
		||||
            await self.instance.attach(version=self.version)
 | 
			
		||||
@@ -774,7 +775,6 @@ class Addon(AddonModel):
 | 
			
		||||
            raise AddonsError("Missing from store, cannot install!")
 | 
			
		||||
 | 
			
		||||
        await self.sys_addons.data.install(self.addon_store)
 | 
			
		||||
        await self.load()
 | 
			
		||||
 | 
			
		||||
        def setup_data():
 | 
			
		||||
            if not self.path_data.is_dir():
 | 
			
		||||
@@ -797,6 +797,9 @@ class Addon(AddonModel):
 | 
			
		||||
            await self.sys_addons.data.uninstall(self)
 | 
			
		||||
            raise AddonsError() from err
 | 
			
		||||
 | 
			
		||||
        # Finish initialization and set up listeners
 | 
			
		||||
        await self.load()
 | 
			
		||||
 | 
			
		||||
        # Add to addon manager
 | 
			
		||||
        self.sys_addons.local[self.slug] = self
 | 
			
		||||
 | 
			
		||||
 
 | 
			
		||||
@@ -9,8 +9,6 @@ from typing import Self, Union
 | 
			
		||||
 | 
			
		||||
from attr import evolve
 | 
			
		||||
 | 
			
		||||
from supervisor.jobs.const import JobConcurrency
 | 
			
		||||
 | 
			
		||||
from ..const import AddonBoot, AddonStartup, AddonState
 | 
			
		||||
from ..coresys import CoreSys, CoreSysAttributes
 | 
			
		||||
from ..exceptions import (
 | 
			
		||||
@@ -21,6 +19,8 @@ from ..exceptions import (
 | 
			
		||||
    DockerError,
 | 
			
		||||
    HassioError,
 | 
			
		||||
)
 | 
			
		||||
from ..jobs import ChildJobSyncFilter
 | 
			
		||||
from ..jobs.const import JobConcurrency
 | 
			
		||||
from ..jobs.decorator import Job, JobCondition
 | 
			
		||||
from ..resolution.const import ContextType, IssueType, SuggestionType
 | 
			
		||||
from ..store.addon import AddonStore
 | 
			
		||||
@@ -182,6 +182,9 @@ class AddonManager(CoreSysAttributes):
 | 
			
		||||
        conditions=ADDON_UPDATE_CONDITIONS,
 | 
			
		||||
        on_condition=AddonsJobError,
 | 
			
		||||
        concurrency=JobConcurrency.QUEUE,
 | 
			
		||||
        child_job_syncs=[
 | 
			
		||||
            ChildJobSyncFilter("docker_interface_install", progress_allocation=1.0)
 | 
			
		||||
        ],
 | 
			
		||||
    )
 | 
			
		||||
    async def install(
 | 
			
		||||
        self, slug: str, *, validation_complete: asyncio.Event | None = None
 | 
			
		||||
@@ -229,6 +232,13 @@ class AddonManager(CoreSysAttributes):
 | 
			
		||||
        name="addon_manager_update",
 | 
			
		||||
        conditions=ADDON_UPDATE_CONDITIONS,
 | 
			
		||||
        on_condition=AddonsJobError,
 | 
			
		||||
        # We assume for now the docker image pull is 100% of this task for progress
 | 
			
		||||
        # allocation. But from a user perspective that isn't true. Other steps
 | 
			
		||||
        # that take time which is not accounted for in progress include:
 | 
			
		||||
        # partial backup, image cleanup, apparmor update, and addon restart
 | 
			
		||||
        child_job_syncs=[
 | 
			
		||||
            ChildJobSyncFilter("docker_interface_install", progress_allocation=1.0)
 | 
			
		||||
        ],
 | 
			
		||||
    )
 | 
			
		||||
    async def update(
 | 
			
		||||
        self,
 | 
			
		||||
@@ -271,7 +281,10 @@ class AddonManager(CoreSysAttributes):
 | 
			
		||||
                addons=[addon.slug],
 | 
			
		||||
            )
 | 
			
		||||
 | 
			
		||||
        return await addon.update()
 | 
			
		||||
        task = await addon.update()
 | 
			
		||||
 | 
			
		||||
        _LOGGER.info("Add-on '%s' successfully updated", slug)
 | 
			
		||||
        return task
 | 
			
		||||
 | 
			
		||||
    @Job(
 | 
			
		||||
        name="addon_manager_rebuild",
 | 
			
		||||
 
 | 
			
		||||
@@ -72,6 +72,7 @@ from ..const import (
 | 
			
		||||
    ATTR_TYPE,
 | 
			
		||||
    ATTR_UART,
 | 
			
		||||
    ATTR_UDEV,
 | 
			
		||||
    ATTR_ULIMITS,
 | 
			
		||||
    ATTR_URL,
 | 
			
		||||
    ATTR_USB,
 | 
			
		||||
    ATTR_VERSION,
 | 
			
		||||
@@ -462,6 +463,11 @@ class AddonModel(JobGroup, ABC):
 | 
			
		||||
        """Return True if the add-on have his own udev."""
 | 
			
		||||
        return self.data[ATTR_UDEV]
 | 
			
		||||
 | 
			
		||||
    @property
 | 
			
		||||
    def ulimits(self) -> dict[str, Any]:
 | 
			
		||||
        """Return ulimits configuration."""
 | 
			
		||||
        return self.data[ATTR_ULIMITS]
 | 
			
		||||
 | 
			
		||||
    @property
 | 
			
		||||
    def with_kernel_modules(self) -> bool:
 | 
			
		||||
        """Return True if the add-on access to kernel modules."""
 | 
			
		||||
 
 | 
			
		||||
@@ -88,6 +88,7 @@ from ..const import (
 | 
			
		||||
    ATTR_TYPE,
 | 
			
		||||
    ATTR_UART,
 | 
			
		||||
    ATTR_UDEV,
 | 
			
		||||
    ATTR_ULIMITS,
 | 
			
		||||
    ATTR_URL,
 | 
			
		||||
    ATTR_USB,
 | 
			
		||||
    ATTR_USER,
 | 
			
		||||
@@ -423,6 +424,20 @@ _SCHEMA_ADDON_CONFIG = vol.Schema(
 | 
			
		||||
            False,
 | 
			
		||||
        ),
 | 
			
		||||
        vol.Optional(ATTR_IMAGE): docker_image,
 | 
			
		||||
        vol.Optional(ATTR_ULIMITS, default=dict): vol.Any(
 | 
			
		||||
            {str: vol.Coerce(int)},  # Simple format: {name: limit}
 | 
			
		||||
            {
 | 
			
		||||
                str: vol.Any(
 | 
			
		||||
                    vol.Coerce(int),  # Simple format for individual entries
 | 
			
		||||
                    vol.Schema(
 | 
			
		||||
                        {  # Detailed format for individual entries
 | 
			
		||||
                            vol.Required("soft"): vol.Coerce(int),
 | 
			
		||||
                            vol.Required("hard"): vol.Coerce(int),
 | 
			
		||||
                        }
 | 
			
		||||
                    ),
 | 
			
		||||
                )
 | 
			
		||||
            },
 | 
			
		||||
        ),
 | 
			
		||||
        vol.Optional(ATTR_TIMEOUT, default=10): vol.All(
 | 
			
		||||
            vol.Coerce(int), vol.Range(min=10, max=300)
 | 
			
		||||
        ),
 | 
			
		||||
 
 | 
			
		||||
@@ -348,6 +348,7 @@ ATTR_TRANSLATIONS = "translations"
 | 
			
		||||
ATTR_TYPE = "type"
 | 
			
		||||
ATTR_UART = "uart"
 | 
			
		||||
ATTR_UDEV = "udev"
 | 
			
		||||
ATTR_ULIMITS = "ulimits"
 | 
			
		||||
ATTR_UNHEALTHY = "unhealthy"
 | 
			
		||||
ATTR_UNSAVED = "unsaved"
 | 
			
		||||
ATTR_UNSUPPORTED = "unsupported"
 | 
			
		||||
 
 | 
			
		||||
@@ -318,7 +318,18 @@ class DockerAddon(DockerInterface):
 | 
			
		||||
            mem = 128 * 1024 * 1024
 | 
			
		||||
            limits.append(docker.types.Ulimit(name="memlock", soft=mem, hard=mem))
 | 
			
		||||
 | 
			
		||||
        # Return None if no capabilities is present
 | 
			
		||||
        # Add configurable ulimits from add-on config
 | 
			
		||||
        for name, config in self.addon.ulimits.items():
 | 
			
		||||
            if isinstance(config, int):
 | 
			
		||||
                # Simple format: both soft and hard limits are the same
 | 
			
		||||
                limits.append(docker.types.Ulimit(name=name, soft=config, hard=config))
 | 
			
		||||
            elif isinstance(config, dict):
 | 
			
		||||
                # Detailed format: both soft and hard limits are mandatory
 | 
			
		||||
                soft = config["soft"]
 | 
			
		||||
                hard = config["hard"]
 | 
			
		||||
                limits.append(docker.types.Ulimit(name=name, soft=soft, hard=hard))
 | 
			
		||||
 | 
			
		||||
        # Return None if no ulimits are present
 | 
			
		||||
        if limits:
 | 
			
		||||
            return limits
 | 
			
		||||
        return None
 | 
			
		||||
 
 | 
			
		||||
@@ -220,10 +220,12 @@ class DockerInterface(JobGroup, ABC):
 | 
			
		||||
 | 
			
		||||
        await self.sys_run_in_executor(self.sys_docker.docker.login, **credentials)
 | 
			
		||||
 | 
			
		||||
    def _process_pull_image_log(self, job_id: str, reference: PullLogEntry) -> None:
 | 
			
		||||
    def _process_pull_image_log(
 | 
			
		||||
        self, install_job_id: str, reference: PullLogEntry
 | 
			
		||||
    ) -> None:
 | 
			
		||||
        """Process events fired from a docker while pulling an image, filtered to a given job id."""
 | 
			
		||||
        if (
 | 
			
		||||
            reference.job_id != job_id
 | 
			
		||||
            reference.job_id != install_job_id
 | 
			
		||||
            or not reference.id
 | 
			
		||||
            or not reference.status
 | 
			
		||||
            or not (stage := PullImageLayerStage.from_status(reference.status))
 | 
			
		||||
@@ -237,21 +239,22 @@ class DockerInterface(JobGroup, ABC):
 | 
			
		||||
                name="Pulling container image layer",
 | 
			
		||||
                initial_stage=stage.status,
 | 
			
		||||
                reference=reference.id,
 | 
			
		||||
                parent_id=job_id,
 | 
			
		||||
                parent_id=install_job_id,
 | 
			
		||||
                internal=True,
 | 
			
		||||
            )
 | 
			
		||||
            job.done = False
 | 
			
		||||
            return
 | 
			
		||||
 | 
			
		||||
        # Find our sub job to update details of
 | 
			
		||||
        for j in self.sys_jobs.jobs:
 | 
			
		||||
            if j.parent_id == job_id and j.reference == reference.id:
 | 
			
		||||
            if j.parent_id == install_job_id and j.reference == reference.id:
 | 
			
		||||
                job = j
 | 
			
		||||
                break
 | 
			
		||||
 | 
			
		||||
        # This likely only occurs if the logs came in out of sync and we got progress before the Pulling FS Layer one
 | 
			
		||||
        if not job:
 | 
			
		||||
            raise DockerLogOutOfOrder(
 | 
			
		||||
                f"Received pull image log with status {reference.status} for image id {reference.id} and parent job {job_id} but could not find a matching job, skipping",
 | 
			
		||||
                f"Received pull image log with status {reference.status} for image id {reference.id} and parent job {install_job_id} but could not find a matching job, skipping",
 | 
			
		||||
                _LOGGER.debug,
 | 
			
		||||
            )
 | 
			
		||||
 | 
			
		||||
@@ -303,6 +306,8 @@ class DockerInterface(JobGroup, ABC):
 | 
			
		||||
        # Our filters have all passed. Time to update the job
 | 
			
		||||
        # Only downloading and extracting have progress details. Use that to set extra
 | 
			
		||||
        # We'll leave it around on later stages as the total bytes may be useful after that stage
 | 
			
		||||
        # Enforce range to prevent float drift error
 | 
			
		||||
        progress = max(0, min(progress, 100))
 | 
			
		||||
        if (
 | 
			
		||||
            stage in {PullImageLayerStage.DOWNLOADING, PullImageLayerStage.EXTRACTING}
 | 
			
		||||
            and reference.progress_detail
 | 
			
		||||
@@ -325,10 +330,56 @@ class DockerInterface(JobGroup, ABC):
 | 
			
		||||
                else job.extra,
 | 
			
		||||
            )
 | 
			
		||||
 | 
			
		||||
        # Once we have received a progress update for every child job, start to set status of the main one
 | 
			
		||||
        install_job = self.sys_jobs.get_job(install_job_id)
 | 
			
		||||
        layer_jobs = [
 | 
			
		||||
            job
 | 
			
		||||
            for job in self.sys_jobs.jobs
 | 
			
		||||
            if job.parent_id == install_job.uuid
 | 
			
		||||
            and job.name == "Pulling container image layer"
 | 
			
		||||
        ]
 | 
			
		||||
 | 
			
		||||
        # First set the total bytes to be downloaded/extracted on the main job
 | 
			
		||||
        if not install_job.extra:
 | 
			
		||||
            total = 0
 | 
			
		||||
            for job in layer_jobs:
 | 
			
		||||
                if not job.extra:
 | 
			
		||||
                    return
 | 
			
		||||
                total += job.extra["total"]
 | 
			
		||||
            install_job.extra = {"total": total}
 | 
			
		||||
        else:
 | 
			
		||||
            total = install_job.extra["total"]
 | 
			
		||||
 | 
			
		||||
        # Then determine total progress based on progress of each sub-job, factoring in size of each compared to total
 | 
			
		||||
        progress = 0.0
 | 
			
		||||
        stage = PullImageLayerStage.PULL_COMPLETE
 | 
			
		||||
        for job in layer_jobs:
 | 
			
		||||
            if not job.extra:
 | 
			
		||||
                return
 | 
			
		||||
            progress += job.progress * (job.extra["total"] / total)
 | 
			
		||||
            job_stage = PullImageLayerStage.from_status(cast(str, job.stage))
 | 
			
		||||
 | 
			
		||||
            if job_stage < PullImageLayerStage.EXTRACTING:
 | 
			
		||||
                stage = PullImageLayerStage.DOWNLOADING
 | 
			
		||||
            elif (
 | 
			
		||||
                stage == PullImageLayerStage.PULL_COMPLETE
 | 
			
		||||
                and job_stage < PullImageLayerStage.PULL_COMPLETE
 | 
			
		||||
            ):
 | 
			
		||||
                stage = PullImageLayerStage.EXTRACTING
 | 
			
		||||
 | 
			
		||||
        # Ensure progress is 100 at this point to prevent float drift
 | 
			
		||||
        if stage == PullImageLayerStage.PULL_COMPLETE:
 | 
			
		||||
            progress = 100
 | 
			
		||||
 | 
			
		||||
        # To reduce noise, limit updates to when result has changed by an entire percent or when stage changed
 | 
			
		||||
        if stage != install_job.stage or progress >= install_job.progress + 1:
 | 
			
		||||
            install_job.update(stage=stage.status, progress=max(0, min(progress, 100)))
 | 
			
		||||
 | 
			
		||||
    @Job(
 | 
			
		||||
        name="docker_interface_install",
 | 
			
		||||
        on_condition=DockerJobError,
 | 
			
		||||
        concurrency=JobConcurrency.GROUP_REJECT,
 | 
			
		||||
        internal=True,
 | 
			
		||||
    )
 | 
			
		||||
    async def install(
 | 
			
		||||
        self,
 | 
			
		||||
@@ -351,11 +402,11 @@ class DockerInterface(JobGroup, ABC):
 | 
			
		||||
                # Try login if we have defined credentials
 | 
			
		||||
                await self._docker_login(image)
 | 
			
		||||
 | 
			
		||||
            job_id = self.sys_jobs.current.uuid
 | 
			
		||||
            curr_job_id = self.sys_jobs.current.uuid
 | 
			
		||||
 | 
			
		||||
            async def process_pull_image_log(reference: PullLogEntry) -> None:
 | 
			
		||||
                try:
 | 
			
		||||
                    self._process_pull_image_log(job_id, reference)
 | 
			
		||||
                    self._process_pull_image_log(curr_job_id, reference)
 | 
			
		||||
                except DockerLogOutOfOrder as err:
 | 
			
		||||
                    # Send all these to sentry. Missing a few progress updates
 | 
			
		||||
                    # shouldn't matter to users but matters to us
 | 
			
		||||
@@ -629,7 +680,10 @@ class DockerInterface(JobGroup, ABC):
 | 
			
		||||
        concurrency=JobConcurrency.GROUP_REJECT,
 | 
			
		||||
    )
 | 
			
		||||
    async def update(
 | 
			
		||||
        self, version: AwesomeVersion, image: str | None = None, latest: bool = False
 | 
			
		||||
        self,
 | 
			
		||||
        version: AwesomeVersion,
 | 
			
		||||
        image: str | None = None,
 | 
			
		||||
        latest: bool = False,
 | 
			
		||||
    ) -> None:
 | 
			
		||||
        """Update a Docker image."""
 | 
			
		||||
        image = image or self.image
 | 
			
		||||
 
 | 
			
		||||
@@ -9,7 +9,12 @@ from typing import Any
 | 
			
		||||
from supervisor.resolution.const import UnhealthyReason
 | 
			
		||||
 | 
			
		||||
from ..coresys import CoreSys, CoreSysAttributes
 | 
			
		||||
from ..exceptions import DBusError, DBusObjectError, HardwareNotFound
 | 
			
		||||
from ..exceptions import (
 | 
			
		||||
    DBusError,
 | 
			
		||||
    DBusNotConnectedError,
 | 
			
		||||
    DBusObjectError,
 | 
			
		||||
    HardwareNotFound,
 | 
			
		||||
)
 | 
			
		||||
from .const import UdevSubsystem
 | 
			
		||||
from .data import Device
 | 
			
		||||
 | 
			
		||||
@@ -207,6 +212,8 @@ class HwDisk(CoreSysAttributes):
 | 
			
		||||
        try:
 | 
			
		||||
            block_device = self.sys_dbus.udisks2.get_block_device_by_path(device_path)
 | 
			
		||||
            drive = self.sys_dbus.udisks2.get_drive(block_device.drive)
 | 
			
		||||
        except DBusNotConnectedError:
 | 
			
		||||
            return None
 | 
			
		||||
        except DBusObjectError:
 | 
			
		||||
            _LOGGER.warning(
 | 
			
		||||
                "Unable to find UDisks2 drive for device at %s", device_path.as_posix()
 | 
			
		||||
 
 | 
			
		||||
@@ -28,6 +28,7 @@ from ..exceptions import (
 | 
			
		||||
    HomeAssistantUpdateError,
 | 
			
		||||
    JobException,
 | 
			
		||||
)
 | 
			
		||||
from ..jobs import ChildJobSyncFilter
 | 
			
		||||
from ..jobs.const import JOB_GROUP_HOME_ASSISTANT_CORE, JobConcurrency, JobThrottle
 | 
			
		||||
from ..jobs.decorator import Job, JobCondition
 | 
			
		||||
from ..jobs.job_group import JobGroup
 | 
			
		||||
@@ -224,6 +225,13 @@ class HomeAssistantCore(JobGroup):
 | 
			
		||||
        ],
 | 
			
		||||
        on_condition=HomeAssistantJobError,
 | 
			
		||||
        concurrency=JobConcurrency.GROUP_REJECT,
 | 
			
		||||
        # We assume for now the docker image pull is 100% of this task. But from
 | 
			
		||||
        # a user perspective that isn't true. Other steps that take time which
 | 
			
		||||
        # is not accounted for in progress include: partial backup, image
 | 
			
		||||
        # cleanup, and Home Assistant restart
 | 
			
		||||
        child_job_syncs=[
 | 
			
		||||
            ChildJobSyncFilter("docker_interface_install", progress_allocation=1.0)
 | 
			
		||||
        ],
 | 
			
		||||
    )
 | 
			
		||||
    async def update(
 | 
			
		||||
        self,
 | 
			
		||||
 
 | 
			
		||||
@@ -371,6 +371,12 @@ class HomeAssistant(FileConfiguration, CoreSysAttributes):
 | 
			
		||||
                _LOGGER.error,
 | 
			
		||||
            ) from err
 | 
			
		||||
 | 
			
		||||
        if not resp:
 | 
			
		||||
            raise HomeAssistantBackupError(
 | 
			
		||||
                "Preparing backup of Home Assistant Core failed. No response from HA Core.",
 | 
			
		||||
                _LOGGER.error,
 | 
			
		||||
            )
 | 
			
		||||
 | 
			
		||||
        if resp and not resp.get(ATTR_SUCCESS):
 | 
			
		||||
            raise HomeAssistantBackupError(
 | 
			
		||||
                f"Preparing backup of Home Assistant Core failed due to: {resp.get(ATTR_ERROR, {}).get(ATTR_MESSAGE, '')}. Check HA Core logs.",
 | 
			
		||||
 
 | 
			
		||||
@@ -225,6 +225,10 @@ class HomeAssistantWebSocket(CoreSysAttributes):
 | 
			
		||||
        # since it makes a new socket connection and we already have one.
 | 
			
		||||
        if not connected and not await self.sys_homeassistant.api.check_api_state():
 | 
			
		||||
            # No core access, don't try.
 | 
			
		||||
            _LOGGER.debug(
 | 
			
		||||
                "Home Assistant API is not accessible. Not sending WS message: %s",
 | 
			
		||||
                message,
 | 
			
		||||
            )
 | 
			
		||||
            return False
 | 
			
		||||
 | 
			
		||||
        if not self._client:
 | 
			
		||||
 
 | 
			
		||||
@@ -282,8 +282,10 @@ class JobManager(FileConfiguration, CoreSysAttributes):
 | 
			
		||||
                # reporting shouldn't raise and break the active job
 | 
			
		||||
                continue
 | 
			
		||||
 | 
			
		||||
            progress = sync.starting_progress + (
 | 
			
		||||
                sync.progress_allocation * job_data["progress"]
 | 
			
		||||
            progress = min(
 | 
			
		||||
                100,
 | 
			
		||||
                sync.starting_progress
 | 
			
		||||
                + (sync.progress_allocation * job_data["progress"]),
 | 
			
		||||
            )
 | 
			
		||||
            # Using max would always trigger on change even if progress was unchanged
 | 
			
		||||
            # pylint: disable-next=R1731
 | 
			
		||||
 
 | 
			
		||||
@@ -8,7 +8,7 @@ from ..const import UnsupportedReason
 | 
			
		||||
from .base import EvaluateBase
 | 
			
		||||
 | 
			
		||||
EXPECTED_LOGGING = "journald"
 | 
			
		||||
EXPECTED_STORAGE = "overlay2"
 | 
			
		||||
EXPECTED_STORAGE = ("overlay2", "overlayfs")
 | 
			
		||||
 | 
			
		||||
_LOGGER: logging.Logger = logging.getLogger(__name__)
 | 
			
		||||
 | 
			
		||||
@@ -41,14 +41,18 @@ class EvaluateDockerConfiguration(EvaluateBase):
 | 
			
		||||
        storage_driver = self.sys_docker.info.storage
 | 
			
		||||
        logging_driver = self.sys_docker.info.logging
 | 
			
		||||
 | 
			
		||||
        if storage_driver != EXPECTED_STORAGE:
 | 
			
		||||
        is_unsupported = False
 | 
			
		||||
 | 
			
		||||
        if storage_driver not in EXPECTED_STORAGE:
 | 
			
		||||
            is_unsupported = True
 | 
			
		||||
            _LOGGER.warning(
 | 
			
		||||
                "Docker storage driver %s is not supported!", storage_driver
 | 
			
		||||
            )
 | 
			
		||||
 | 
			
		||||
        if logging_driver != EXPECTED_LOGGING:
 | 
			
		||||
            is_unsupported = True
 | 
			
		||||
            _LOGGER.warning(
 | 
			
		||||
                "Docker logging driver %s is not supported!", logging_driver
 | 
			
		||||
            )
 | 
			
		||||
 | 
			
		||||
        return storage_driver != EXPECTED_STORAGE or logging_driver != EXPECTED_LOGGING
 | 
			
		||||
        return is_unsupported
 | 
			
		||||
 
 | 
			
		||||
@@ -13,6 +13,8 @@ import aiohttp
 | 
			
		||||
from aiohttp.client_exceptions import ClientError
 | 
			
		||||
from awesomeversion import AwesomeVersion, AwesomeVersionException
 | 
			
		||||
 | 
			
		||||
from supervisor.jobs import ChildJobSyncFilter
 | 
			
		||||
 | 
			
		||||
from .const import (
 | 
			
		||||
    ATTR_SUPERVISOR_INTERNET,
 | 
			
		||||
    SUPERVISOR_VERSION,
 | 
			
		||||
@@ -195,6 +197,15 @@ class Supervisor(CoreSysAttributes):
 | 
			
		||||
            if temp_dir:
 | 
			
		||||
                await self.sys_run_in_executor(temp_dir.cleanup)
 | 
			
		||||
 | 
			
		||||
    @Job(
 | 
			
		||||
        name="supervisor_update",
 | 
			
		||||
        # We assume for now the docker image pull is 100% of this task. But from
 | 
			
		||||
        # a user perspective that isn't true.  Other steps that take time which
 | 
			
		||||
        # is not accounted for in progress include: app armor update and restart
 | 
			
		||||
        child_job_syncs=[
 | 
			
		||||
            ChildJobSyncFilter("docker_interface_install", progress_allocation=1.0)
 | 
			
		||||
        ],
 | 
			
		||||
    )
 | 
			
		||||
    async def update(self, version: AwesomeVersion | None = None) -> None:
 | 
			
		||||
        """Update Supervisor version."""
 | 
			
		||||
        version = version or self.latest_version or self.version
 | 
			
		||||
@@ -221,6 +232,7 @@ class Supervisor(CoreSysAttributes):
 | 
			
		||||
 | 
			
		||||
        # Update container
 | 
			
		||||
        _LOGGER.info("Update Supervisor to version %s", version)
 | 
			
		||||
 | 
			
		||||
        try:
 | 
			
		||||
            await self.instance.install(version, image=image)
 | 
			
		||||
            await self.instance.update_start_tag(image, version)
 | 
			
		||||
 
 | 
			
		||||
@@ -419,3 +419,71 @@ def test_valid_schema():
 | 
			
		||||
    config["schema"] = {"field": "invalid"}
 | 
			
		||||
    with pytest.raises(vol.Invalid):
 | 
			
		||||
        assert vd.SCHEMA_ADDON_CONFIG(config)
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def test_ulimits_simple_format():
 | 
			
		||||
    """Test ulimits simple format validation."""
 | 
			
		||||
    config = load_json_fixture("basic-addon-config.json")
 | 
			
		||||
 | 
			
		||||
    config["ulimits"] = {"nofile": 65535, "nproc": 32768, "memlock": 134217728}
 | 
			
		||||
 | 
			
		||||
    valid_config = vd.SCHEMA_ADDON_CONFIG(config)
 | 
			
		||||
    assert valid_config["ulimits"]["nofile"] == 65535
 | 
			
		||||
    assert valid_config["ulimits"]["nproc"] == 32768
 | 
			
		||||
    assert valid_config["ulimits"]["memlock"] == 134217728
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def test_ulimits_detailed_format():
 | 
			
		||||
    """Test ulimits detailed format validation."""
 | 
			
		||||
    config = load_json_fixture("basic-addon-config.json")
 | 
			
		||||
 | 
			
		||||
    config["ulimits"] = {
 | 
			
		||||
        "nofile": {"soft": 20000, "hard": 40000},
 | 
			
		||||
        "nproc": 32768,  # Mixed format should work
 | 
			
		||||
        "memlock": {"soft": 67108864, "hard": 134217728},
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    valid_config = vd.SCHEMA_ADDON_CONFIG(config)
 | 
			
		||||
    assert valid_config["ulimits"]["nofile"]["soft"] == 20000
 | 
			
		||||
    assert valid_config["ulimits"]["nofile"]["hard"] == 40000
 | 
			
		||||
    assert valid_config["ulimits"]["nproc"] == 32768
 | 
			
		||||
    assert valid_config["ulimits"]["memlock"]["soft"] == 67108864
 | 
			
		||||
    assert valid_config["ulimits"]["memlock"]["hard"] == 134217728
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def test_ulimits_empty_dict():
 | 
			
		||||
    """Test ulimits with empty dict (default)."""
 | 
			
		||||
    config = load_json_fixture("basic-addon-config.json")
 | 
			
		||||
 | 
			
		||||
    valid_config = vd.SCHEMA_ADDON_CONFIG(config)
 | 
			
		||||
    assert valid_config["ulimits"] == {}
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def test_ulimits_invalid_values():
 | 
			
		||||
    """Test ulimits with invalid values."""
 | 
			
		||||
    config = load_json_fixture("basic-addon-config.json")
 | 
			
		||||
 | 
			
		||||
    # Invalid string values
 | 
			
		||||
    config["ulimits"] = {"nofile": "invalid"}
 | 
			
		||||
    with pytest.raises(vol.Invalid):
 | 
			
		||||
        vd.SCHEMA_ADDON_CONFIG(config)
 | 
			
		||||
 | 
			
		||||
    # Invalid detailed format
 | 
			
		||||
    config["ulimits"] = {"nofile": {"invalid_key": 1000}}
 | 
			
		||||
    with pytest.raises(vol.Invalid):
 | 
			
		||||
        vd.SCHEMA_ADDON_CONFIG(config)
 | 
			
		||||
 | 
			
		||||
    # Missing hard value in detailed format
 | 
			
		||||
    config["ulimits"] = {"nofile": {"soft": 1000}}
 | 
			
		||||
    with pytest.raises(vol.Invalid):
 | 
			
		||||
        vd.SCHEMA_ADDON_CONFIG(config)
 | 
			
		||||
 | 
			
		||||
    # Missing soft value in detailed format
 | 
			
		||||
    config["ulimits"] = {"nofile": {"hard": 1000}}
 | 
			
		||||
    with pytest.raises(vol.Invalid):
 | 
			
		||||
        vd.SCHEMA_ADDON_CONFIG(config)
 | 
			
		||||
 | 
			
		||||
    # Empty dict in detailed format
 | 
			
		||||
    config["ulimits"] = {"nofile": {}}
 | 
			
		||||
    with pytest.raises(vol.Invalid):
 | 
			
		||||
        vd.SCHEMA_ADDON_CONFIG(config)
 | 
			
		||||
 
 | 
			
		||||
@@ -2,16 +2,19 @@
 | 
			
		||||
 | 
			
		||||
import asyncio
 | 
			
		||||
from pathlib import Path
 | 
			
		||||
from unittest.mock import MagicMock, PropertyMock, patch
 | 
			
		||||
from unittest.mock import AsyncMock, MagicMock, PropertyMock, patch
 | 
			
		||||
 | 
			
		||||
from aiohttp.test_utils import TestClient
 | 
			
		||||
from awesomeversion import AwesomeVersion
 | 
			
		||||
import pytest
 | 
			
		||||
 | 
			
		||||
from supervisor.backups.manager import BackupManager
 | 
			
		||||
from supervisor.const import CoreState
 | 
			
		||||
from supervisor.coresys import CoreSys
 | 
			
		||||
from supervisor.docker.homeassistant import DockerHomeAssistant
 | 
			
		||||
from supervisor.docker.interface import DockerInterface
 | 
			
		||||
from supervisor.homeassistant.api import APIState
 | 
			
		||||
from supervisor.homeassistant.api import APIState, HomeAssistantAPI
 | 
			
		||||
from supervisor.homeassistant.const import WSEvent
 | 
			
		||||
from supervisor.homeassistant.core import HomeAssistantCore
 | 
			
		||||
from supervisor.homeassistant.module import HomeAssistant
 | 
			
		||||
 | 
			
		||||
@@ -271,3 +274,96 @@ async def test_background_home_assistant_update_fails_fast(
 | 
			
		||||
    assert resp.status == 400
 | 
			
		||||
    body = await resp.json()
 | 
			
		||||
    assert body["message"] == "Version 2025.8.3 is already installed"
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
@pytest.mark.usefixtures("tmp_supervisor_data")
 | 
			
		||||
async def test_api_progress_updates_home_assistant_update(
 | 
			
		||||
    api_client: TestClient, coresys: CoreSys, ha_ws_client: AsyncMock
 | 
			
		||||
):
 | 
			
		||||
    """Test progress updates sent to Home Assistant for updates."""
 | 
			
		||||
    coresys.hardware.disk.get_disk_free_space = lambda x: 5000
 | 
			
		||||
    coresys.core.set_state(CoreState.RUNNING)
 | 
			
		||||
    coresys.docker.docker.api.pull.return_value = load_json_fixture(
 | 
			
		||||
        "docker_pull_image_log.json"
 | 
			
		||||
    )
 | 
			
		||||
    coresys.homeassistant.version = AwesomeVersion("2025.8.0")
 | 
			
		||||
 | 
			
		||||
    with (
 | 
			
		||||
        patch.object(
 | 
			
		||||
            DockerHomeAssistant,
 | 
			
		||||
            "version",
 | 
			
		||||
            new=PropertyMock(return_value=AwesomeVersion("2025.8.0")),
 | 
			
		||||
        ),
 | 
			
		||||
        patch.object(
 | 
			
		||||
            HomeAssistantAPI, "get_config", return_value={"components": ["frontend"]}
 | 
			
		||||
        ),
 | 
			
		||||
    ):
 | 
			
		||||
        resp = await api_client.post("/core/update", json={"version": "2025.8.3"})
 | 
			
		||||
 | 
			
		||||
    assert resp.status == 200
 | 
			
		||||
 | 
			
		||||
    events = [
 | 
			
		||||
        {
 | 
			
		||||
            "stage": evt.args[0]["data"]["data"]["stage"],
 | 
			
		||||
            "progress": evt.args[0]["data"]["data"]["progress"],
 | 
			
		||||
            "done": evt.args[0]["data"]["data"]["done"],
 | 
			
		||||
        }
 | 
			
		||||
        for evt in ha_ws_client.async_send_command.call_args_list
 | 
			
		||||
        if "data" in evt.args[0]
 | 
			
		||||
        and evt.args[0]["data"]["event"] == WSEvent.JOB
 | 
			
		||||
        and evt.args[0]["data"]["data"]["name"] == "home_assistant_core_update"
 | 
			
		||||
    ]
 | 
			
		||||
    assert events[:5] == [
 | 
			
		||||
        {
 | 
			
		||||
            "stage": None,
 | 
			
		||||
            "progress": 0,
 | 
			
		||||
            "done": None,
 | 
			
		||||
        },
 | 
			
		||||
        {
 | 
			
		||||
            "stage": None,
 | 
			
		||||
            "progress": 0,
 | 
			
		||||
            "done": False,
 | 
			
		||||
        },
 | 
			
		||||
        {
 | 
			
		||||
            "stage": None,
 | 
			
		||||
            "progress": 0.1,
 | 
			
		||||
            "done": False,
 | 
			
		||||
        },
 | 
			
		||||
        {
 | 
			
		||||
            "stage": None,
 | 
			
		||||
            "progress": 1.2,
 | 
			
		||||
            "done": False,
 | 
			
		||||
        },
 | 
			
		||||
        {
 | 
			
		||||
            "stage": None,
 | 
			
		||||
            "progress": 2.8,
 | 
			
		||||
            "done": False,
 | 
			
		||||
        },
 | 
			
		||||
    ]
 | 
			
		||||
    assert events[-5:] == [
 | 
			
		||||
        {
 | 
			
		||||
            "stage": None,
 | 
			
		||||
            "progress": 97.2,
 | 
			
		||||
            "done": False,
 | 
			
		||||
        },
 | 
			
		||||
        {
 | 
			
		||||
            "stage": None,
 | 
			
		||||
            "progress": 98.4,
 | 
			
		||||
            "done": False,
 | 
			
		||||
        },
 | 
			
		||||
        {
 | 
			
		||||
            "stage": None,
 | 
			
		||||
            "progress": 99.4,
 | 
			
		||||
            "done": False,
 | 
			
		||||
        },
 | 
			
		||||
        {
 | 
			
		||||
            "stage": None,
 | 
			
		||||
            "progress": 100,
 | 
			
		||||
            "done": False,
 | 
			
		||||
        },
 | 
			
		||||
        {
 | 
			
		||||
            "stage": None,
 | 
			
		||||
            "progress": 100,
 | 
			
		||||
            "done": True,
 | 
			
		||||
        },
 | 
			
		||||
    ]
 | 
			
		||||
 
 | 
			
		||||
@@ -13,12 +13,13 @@ from supervisor.addons.addon import Addon
 | 
			
		||||
from supervisor.arch import CpuArch
 | 
			
		||||
from supervisor.backups.manager import BackupManager
 | 
			
		||||
from supervisor.config import CoreConfig
 | 
			
		||||
from supervisor.const import AddonState
 | 
			
		||||
from supervisor.const import AddonState, CoreState
 | 
			
		||||
from supervisor.coresys import CoreSys
 | 
			
		||||
from supervisor.docker.addon import DockerAddon
 | 
			
		||||
from supervisor.docker.const import ContainerState
 | 
			
		||||
from supervisor.docker.interface import DockerInterface
 | 
			
		||||
from supervisor.docker.monitor import DockerContainerStateEvent
 | 
			
		||||
from supervisor.homeassistant.const import WSEvent
 | 
			
		||||
from supervisor.homeassistant.module import HomeAssistant
 | 
			
		||||
from supervisor.store.addon import AddonStore
 | 
			
		||||
from supervisor.store.repository import Repository
 | 
			
		||||
@@ -709,3 +710,101 @@ async def test_api_store_addons_addon_availability_installed_addon(
 | 
			
		||||
        assert (
 | 
			
		||||
            "requires Home Assistant version 2023.1.1 or greater" in result["message"]
 | 
			
		||||
        )
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
@pytest.mark.parametrize(
 | 
			
		||||
    ("action", "job_name", "addon_slug"),
 | 
			
		||||
    [
 | 
			
		||||
        ("install", "addon_manager_install", "local_ssh"),
 | 
			
		||||
        ("update", "addon_manager_update", "local_example"),
 | 
			
		||||
    ],
 | 
			
		||||
)
 | 
			
		||||
@pytest.mark.usefixtures("tmp_supervisor_data")
 | 
			
		||||
async def test_api_progress_updates_addon_install_update(
 | 
			
		||||
    api_client: TestClient,
 | 
			
		||||
    coresys: CoreSys,
 | 
			
		||||
    ha_ws_client: AsyncMock,
 | 
			
		||||
    install_addon_example: Addon,
 | 
			
		||||
    action: str,
 | 
			
		||||
    job_name: str,
 | 
			
		||||
    addon_slug: str,
 | 
			
		||||
):
 | 
			
		||||
    """Test progress updates sent to Home Assistant for installs/updates."""
 | 
			
		||||
    coresys.hardware.disk.get_disk_free_space = lambda x: 5000
 | 
			
		||||
    coresys.core.set_state(CoreState.RUNNING)
 | 
			
		||||
    coresys.docker.docker.api.pull.return_value = load_json_fixture(
 | 
			
		||||
        "docker_pull_image_log.json"
 | 
			
		||||
    )
 | 
			
		||||
    coresys.arch._supported_arch = ["amd64"]  # pylint: disable=protected-access
 | 
			
		||||
    install_addon_example.data_store["version"] = AwesomeVersion("2.0.0")
 | 
			
		||||
 | 
			
		||||
    with (
 | 
			
		||||
        patch.object(Addon, "load"),
 | 
			
		||||
        patch.object(Addon, "need_build", new=PropertyMock(return_value=False)),
 | 
			
		||||
        patch.object(Addon, "latest_need_build", new=PropertyMock(return_value=False)),
 | 
			
		||||
    ):
 | 
			
		||||
        resp = await api_client.post(f"/store/addons/{addon_slug}/{action}")
 | 
			
		||||
 | 
			
		||||
    assert resp.status == 200
 | 
			
		||||
 | 
			
		||||
    events = [
 | 
			
		||||
        {
 | 
			
		||||
            "stage": evt.args[0]["data"]["data"]["stage"],
 | 
			
		||||
            "progress": evt.args[0]["data"]["data"]["progress"],
 | 
			
		||||
            "done": evt.args[0]["data"]["data"]["done"],
 | 
			
		||||
        }
 | 
			
		||||
        for evt in ha_ws_client.async_send_command.call_args_list
 | 
			
		||||
        if "data" in evt.args[0]
 | 
			
		||||
        and evt.args[0]["data"]["event"] == WSEvent.JOB
 | 
			
		||||
        and evt.args[0]["data"]["data"]["name"] == job_name
 | 
			
		||||
        and evt.args[0]["data"]["data"]["reference"] == addon_slug
 | 
			
		||||
    ]
 | 
			
		||||
    assert events[:4] == [
 | 
			
		||||
        {
 | 
			
		||||
            "stage": None,
 | 
			
		||||
            "progress": 0,
 | 
			
		||||
            "done": False,
 | 
			
		||||
        },
 | 
			
		||||
        {
 | 
			
		||||
            "stage": None,
 | 
			
		||||
            "progress": 0.1,
 | 
			
		||||
            "done": False,
 | 
			
		||||
        },
 | 
			
		||||
        {
 | 
			
		||||
            "stage": None,
 | 
			
		||||
            "progress": 1.2,
 | 
			
		||||
            "done": False,
 | 
			
		||||
        },
 | 
			
		||||
        {
 | 
			
		||||
            "stage": None,
 | 
			
		||||
            "progress": 2.8,
 | 
			
		||||
            "done": False,
 | 
			
		||||
        },
 | 
			
		||||
    ]
 | 
			
		||||
    assert events[-5:] == [
 | 
			
		||||
        {
 | 
			
		||||
            "stage": None,
 | 
			
		||||
            "progress": 97.2,
 | 
			
		||||
            "done": False,
 | 
			
		||||
        },
 | 
			
		||||
        {
 | 
			
		||||
            "stage": None,
 | 
			
		||||
            "progress": 98.4,
 | 
			
		||||
            "done": False,
 | 
			
		||||
        },
 | 
			
		||||
        {
 | 
			
		||||
            "stage": None,
 | 
			
		||||
            "progress": 99.4,
 | 
			
		||||
            "done": False,
 | 
			
		||||
        },
 | 
			
		||||
        {
 | 
			
		||||
            "stage": None,
 | 
			
		||||
            "progress": 100,
 | 
			
		||||
            "done": False,
 | 
			
		||||
        },
 | 
			
		||||
        {
 | 
			
		||||
            "stage": None,
 | 
			
		||||
            "progress": 100,
 | 
			
		||||
            "done": True,
 | 
			
		||||
        },
 | 
			
		||||
    ]
 | 
			
		||||
 
 | 
			
		||||
@@ -2,17 +2,24 @@
 | 
			
		||||
 | 
			
		||||
# pylint: disable=protected-access
 | 
			
		||||
import time
 | 
			
		||||
from unittest.mock import AsyncMock, MagicMock, patch
 | 
			
		||||
from unittest.mock import AsyncMock, MagicMock, PropertyMock, patch
 | 
			
		||||
 | 
			
		||||
from aiohttp.test_utils import TestClient
 | 
			
		||||
from awesomeversion import AwesomeVersion
 | 
			
		||||
from blockbuster import BlockingError
 | 
			
		||||
import pytest
 | 
			
		||||
 | 
			
		||||
from supervisor.const import CoreState
 | 
			
		||||
from supervisor.core import Core
 | 
			
		||||
from supervisor.coresys import CoreSys
 | 
			
		||||
from supervisor.exceptions import HassioError, HostNotSupportedError, StoreGitError
 | 
			
		||||
from supervisor.homeassistant.const import WSEvent
 | 
			
		||||
from supervisor.store.repository import Repository
 | 
			
		||||
from supervisor.supervisor import Supervisor
 | 
			
		||||
from supervisor.updater import Updater
 | 
			
		||||
 | 
			
		||||
from tests.api import common_test_api_advanced_logs
 | 
			
		||||
from tests.common import load_json_fixture
 | 
			
		||||
from tests.dbus_service_mocks.base import DBusServiceMock
 | 
			
		||||
from tests.dbus_service_mocks.os_agent import OSAgent as OSAgentService
 | 
			
		||||
 | 
			
		||||
@@ -316,3 +323,97 @@ async def test_api_supervisor_options_blocking_io(
 | 
			
		||||
 | 
			
		||||
    # This should not raise blocking error anymore
 | 
			
		||||
    time.sleep(0)
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
@pytest.mark.usefixtures("tmp_supervisor_data")
 | 
			
		||||
async def test_api_progress_updates_supervisor_update(
 | 
			
		||||
    api_client: TestClient, coresys: CoreSys, ha_ws_client: AsyncMock
 | 
			
		||||
):
 | 
			
		||||
    """Test progress updates sent to Home Assistant for updates."""
 | 
			
		||||
    coresys.hardware.disk.get_disk_free_space = lambda x: 5000
 | 
			
		||||
    coresys.core.set_state(CoreState.RUNNING)
 | 
			
		||||
    coresys.docker.docker.api.pull.return_value = load_json_fixture(
 | 
			
		||||
        "docker_pull_image_log.json"
 | 
			
		||||
    )
 | 
			
		||||
 | 
			
		||||
    with (
 | 
			
		||||
        patch.object(
 | 
			
		||||
            Supervisor,
 | 
			
		||||
            "version",
 | 
			
		||||
            new=PropertyMock(return_value=AwesomeVersion("2025.08.0")),
 | 
			
		||||
        ),
 | 
			
		||||
        patch.object(
 | 
			
		||||
            Updater,
 | 
			
		||||
            "version_supervisor",
 | 
			
		||||
            new=PropertyMock(return_value=AwesomeVersion("2025.08.3")),
 | 
			
		||||
        ),
 | 
			
		||||
        patch.object(
 | 
			
		||||
            Updater, "image_supervisor", new=PropertyMock(return_value="supervisor")
 | 
			
		||||
        ),
 | 
			
		||||
        patch.object(Supervisor, "update_apparmor"),
 | 
			
		||||
        patch.object(Core, "stop"),
 | 
			
		||||
    ):
 | 
			
		||||
        resp = await api_client.post("/supervisor/update")
 | 
			
		||||
 | 
			
		||||
    assert resp.status == 200
 | 
			
		||||
 | 
			
		||||
    events = [
 | 
			
		||||
        {
 | 
			
		||||
            "stage": evt.args[0]["data"]["data"]["stage"],
 | 
			
		||||
            "progress": evt.args[0]["data"]["data"]["progress"],
 | 
			
		||||
            "done": evt.args[0]["data"]["data"]["done"],
 | 
			
		||||
        }
 | 
			
		||||
        for evt in ha_ws_client.async_send_command.call_args_list
 | 
			
		||||
        if "data" in evt.args[0]
 | 
			
		||||
        and evt.args[0]["data"]["event"] == WSEvent.JOB
 | 
			
		||||
        and evt.args[0]["data"]["data"]["name"] == "supervisor_update"
 | 
			
		||||
    ]
 | 
			
		||||
    assert events[:4] == [
 | 
			
		||||
        {
 | 
			
		||||
            "stage": None,
 | 
			
		||||
            "progress": 0,
 | 
			
		||||
            "done": False,
 | 
			
		||||
        },
 | 
			
		||||
        {
 | 
			
		||||
            "stage": None,
 | 
			
		||||
            "progress": 0.1,
 | 
			
		||||
            "done": False,
 | 
			
		||||
        },
 | 
			
		||||
        {
 | 
			
		||||
            "stage": None,
 | 
			
		||||
            "progress": 1.2,
 | 
			
		||||
            "done": False,
 | 
			
		||||
        },
 | 
			
		||||
        {
 | 
			
		||||
            "stage": None,
 | 
			
		||||
            "progress": 2.8,
 | 
			
		||||
            "done": False,
 | 
			
		||||
        },
 | 
			
		||||
    ]
 | 
			
		||||
    assert events[-5:] == [
 | 
			
		||||
        {
 | 
			
		||||
            "stage": None,
 | 
			
		||||
            "progress": 97.2,
 | 
			
		||||
            "done": False,
 | 
			
		||||
        },
 | 
			
		||||
        {
 | 
			
		||||
            "stage": None,
 | 
			
		||||
            "progress": 98.4,
 | 
			
		||||
            "done": False,
 | 
			
		||||
        },
 | 
			
		||||
        {
 | 
			
		||||
            "stage": None,
 | 
			
		||||
            "progress": 99.4,
 | 
			
		||||
            "done": False,
 | 
			
		||||
        },
 | 
			
		||||
        {
 | 
			
		||||
            "stage": None,
 | 
			
		||||
            "progress": 100,
 | 
			
		||||
            "done": False,
 | 
			
		||||
        },
 | 
			
		||||
        {
 | 
			
		||||
            "stage": None,
 | 
			
		||||
            "progress": 100,
 | 
			
		||||
            "done": True,
 | 
			
		||||
        },
 | 
			
		||||
    ]
 | 
			
		||||
 
 | 
			
		||||
@@ -503,3 +503,93 @@ async def test_addon_new_device_no_haos(
 | 
			
		||||
    await install_addon_ssh.stop()
 | 
			
		||||
    assert coresys.resolution.issues == []
 | 
			
		||||
    assert coresys.resolution.suggestions == []
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
async def test_ulimits_integration(
 | 
			
		||||
    coresys: CoreSys,
 | 
			
		||||
    install_addon_ssh: Addon,
 | 
			
		||||
):
 | 
			
		||||
    """Test ulimits integration with Docker addon."""
 | 
			
		||||
    docker_addon = DockerAddon(coresys, install_addon_ssh)
 | 
			
		||||
 | 
			
		||||
    # Test default case (no ulimits, no realtime)
 | 
			
		||||
    assert docker_addon.ulimits is None
 | 
			
		||||
 | 
			
		||||
    # Test with realtime enabled (should have built-in ulimits)
 | 
			
		||||
    install_addon_ssh.data["realtime"] = True
 | 
			
		||||
    ulimits = docker_addon.ulimits
 | 
			
		||||
    assert ulimits is not None
 | 
			
		||||
    assert len(ulimits) == 2
 | 
			
		||||
    # Check for rtprio limit
 | 
			
		||||
    rtprio_limit = next((u for u in ulimits if u.name == "rtprio"), None)
 | 
			
		||||
    assert rtprio_limit is not None
 | 
			
		||||
    assert rtprio_limit.soft == 90
 | 
			
		||||
    assert rtprio_limit.hard == 99
 | 
			
		||||
    # Check for memlock limit
 | 
			
		||||
    memlock_limit = next((u for u in ulimits if u.name == "memlock"), None)
 | 
			
		||||
    assert memlock_limit is not None
 | 
			
		||||
    assert memlock_limit.soft == 128 * 1024 * 1024
 | 
			
		||||
    assert memlock_limit.hard == 128 * 1024 * 1024
 | 
			
		||||
 | 
			
		||||
    # Test with configurable ulimits (simple format)
 | 
			
		||||
    install_addon_ssh.data["realtime"] = False
 | 
			
		||||
    install_addon_ssh.data["ulimits"] = {"nofile": 65535, "nproc": 32768}
 | 
			
		||||
    ulimits = docker_addon.ulimits
 | 
			
		||||
    assert ulimits is not None
 | 
			
		||||
    assert len(ulimits) == 2
 | 
			
		||||
 | 
			
		||||
    nofile_limit = next((u for u in ulimits if u.name == "nofile"), None)
 | 
			
		||||
    assert nofile_limit is not None
 | 
			
		||||
    assert nofile_limit.soft == 65535
 | 
			
		||||
    assert nofile_limit.hard == 65535
 | 
			
		||||
 | 
			
		||||
    nproc_limit = next((u for u in ulimits if u.name == "nproc"), None)
 | 
			
		||||
    assert nproc_limit is not None
 | 
			
		||||
    assert nproc_limit.soft == 32768
 | 
			
		||||
    assert nproc_limit.hard == 32768
 | 
			
		||||
 | 
			
		||||
    # Test with configurable ulimits (detailed format)
 | 
			
		||||
    install_addon_ssh.data["ulimits"] = {
 | 
			
		||||
        "nofile": {"soft": 20000, "hard": 40000},
 | 
			
		||||
        "memlock": {"soft": 67108864, "hard": 134217728},
 | 
			
		||||
    }
 | 
			
		||||
    ulimits = docker_addon.ulimits
 | 
			
		||||
    assert ulimits is not None
 | 
			
		||||
    assert len(ulimits) == 2
 | 
			
		||||
 | 
			
		||||
    nofile_limit = next((u for u in ulimits if u.name == "nofile"), None)
 | 
			
		||||
    assert nofile_limit is not None
 | 
			
		||||
    assert nofile_limit.soft == 20000
 | 
			
		||||
    assert nofile_limit.hard == 40000
 | 
			
		||||
 | 
			
		||||
    memlock_limit = next((u for u in ulimits if u.name == "memlock"), None)
 | 
			
		||||
    assert memlock_limit is not None
 | 
			
		||||
    assert memlock_limit.soft == 67108864
 | 
			
		||||
    assert memlock_limit.hard == 134217728
 | 
			
		||||
 | 
			
		||||
    # Test mixed format and realtime (realtime + custom ulimits)
 | 
			
		||||
    install_addon_ssh.data["realtime"] = True
 | 
			
		||||
    install_addon_ssh.data["ulimits"] = {
 | 
			
		||||
        "nofile": 65535,
 | 
			
		||||
        "core": {"soft": 0, "hard": 0},  # Disable core dumps
 | 
			
		||||
    }
 | 
			
		||||
    ulimits = docker_addon.ulimits
 | 
			
		||||
    assert ulimits is not None
 | 
			
		||||
    assert (
 | 
			
		||||
        len(ulimits) == 4
 | 
			
		||||
    )  # rtprio, memlock (from realtime) + nofile, core (from config)
 | 
			
		||||
 | 
			
		||||
    # Check realtime limits still present
 | 
			
		||||
    rtprio_limit = next((u for u in ulimits if u.name == "rtprio"), None)
 | 
			
		||||
    assert rtprio_limit is not None
 | 
			
		||||
 | 
			
		||||
    # Check custom limits added
 | 
			
		||||
    nofile_limit = next((u for u in ulimits if u.name == "nofile"), None)
 | 
			
		||||
    assert nofile_limit is not None
 | 
			
		||||
    assert nofile_limit.soft == 65535
 | 
			
		||||
    assert nofile_limit.hard == 65535
 | 
			
		||||
 | 
			
		||||
    core_limit = next((u for u in ulimits if u.name == "core"), None)
 | 
			
		||||
    assert core_limit is not None
 | 
			
		||||
    assert core_limit.soft == 0
 | 
			
		||||
    assert core_limit.hard == 0
 | 
			
		||||
 
 | 
			
		||||
@@ -26,7 +26,6 @@ from supervisor.exceptions import (
 | 
			
		||||
    DockerNotFound,
 | 
			
		||||
    DockerRequestError,
 | 
			
		||||
)
 | 
			
		||||
from supervisor.homeassistant.const import WSEvent
 | 
			
		||||
from supervisor.jobs import JobSchedulerOptions, SupervisorJob
 | 
			
		||||
 | 
			
		||||
from tests.common import load_json_fixture
 | 
			
		||||
@@ -417,196 +416,17 @@ async def test_install_fires_progress_events(
 | 
			
		||||
    ]
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
async def test_install_sends_progress_to_home_assistant(
 | 
			
		||||
    coresys: CoreSys, test_docker_interface: DockerInterface, ha_ws_client: AsyncMock
 | 
			
		||||
):
 | 
			
		||||
    """Test progress events are sent as job updates to Home Assistant."""
 | 
			
		||||
    coresys.core.set_state(CoreState.RUNNING)
 | 
			
		||||
    coresys.docker.docker.api.pull.return_value = load_json_fixture(
 | 
			
		||||
        "docker_pull_image_log.json"
 | 
			
		||||
    )
 | 
			
		||||
 | 
			
		||||
    with (
 | 
			
		||||
        patch.object(
 | 
			
		||||
            type(coresys.supervisor), "arch", PropertyMock(return_value="i386")
 | 
			
		||||
        ),
 | 
			
		||||
    ):
 | 
			
		||||
        # Schedule job so we can listen for the end. Then we can assert against the WS mock
 | 
			
		||||
        event = asyncio.Event()
 | 
			
		||||
        job, install_task = coresys.jobs.schedule_job(
 | 
			
		||||
            test_docker_interface.install,
 | 
			
		||||
            JobSchedulerOptions(),
 | 
			
		||||
            AwesomeVersion("1.2.3"),
 | 
			
		||||
            "test",
 | 
			
		||||
        )
 | 
			
		||||
 | 
			
		||||
        async def listen_for_job_end(reference: SupervisorJob):
 | 
			
		||||
            if reference.uuid != job.uuid:
 | 
			
		||||
                return
 | 
			
		||||
            event.set()
 | 
			
		||||
 | 
			
		||||
        coresys.bus.register_event(BusEvent.SUPERVISOR_JOB_END, listen_for_job_end)
 | 
			
		||||
        await install_task
 | 
			
		||||
        await event.wait()
 | 
			
		||||
 | 
			
		||||
    events = [
 | 
			
		||||
        evt.args[0]["data"]["data"]
 | 
			
		||||
        for evt in ha_ws_client.async_send_command.call_args_list
 | 
			
		||||
        if "data" in evt.args[0] and evt.args[0]["data"]["event"] == WSEvent.JOB
 | 
			
		||||
    ]
 | 
			
		||||
    assert events[0]["name"] == "docker_interface_install"
 | 
			
		||||
    assert events[0]["uuid"] == job.uuid
 | 
			
		||||
    assert events[0]["done"] is None
 | 
			
		||||
    assert events[1]["name"] == "docker_interface_install"
 | 
			
		||||
    assert events[1]["uuid"] == job.uuid
 | 
			
		||||
    assert events[1]["done"] is False
 | 
			
		||||
    assert events[-1]["name"] == "docker_interface_install"
 | 
			
		||||
    assert events[-1]["uuid"] == job.uuid
 | 
			
		||||
    assert events[-1]["done"] is True
 | 
			
		||||
 | 
			
		||||
    def make_sub_log(layer_id: str):
 | 
			
		||||
        return [
 | 
			
		||||
            {
 | 
			
		||||
                "stage": evt["stage"],
 | 
			
		||||
                "progress": evt["progress"],
 | 
			
		||||
                "done": evt["done"],
 | 
			
		||||
                "extra": evt["extra"],
 | 
			
		||||
            }
 | 
			
		||||
            for evt in events
 | 
			
		||||
            if evt["name"] == "Pulling container image layer"
 | 
			
		||||
            and evt["reference"] == layer_id
 | 
			
		||||
            and evt["parent_id"] == job.uuid
 | 
			
		||||
        ]
 | 
			
		||||
 | 
			
		||||
    layer_1_log = make_sub_log("1e214cd6d7d0")
 | 
			
		||||
    layer_2_log = make_sub_log("1a38e1d5e18d")
 | 
			
		||||
    assert len(layer_1_log) == 20
 | 
			
		||||
    assert len(layer_2_log) == 19
 | 
			
		||||
    assert len(events) == 42
 | 
			
		||||
    assert layer_1_log == [
 | 
			
		||||
        {"stage": "Pulling fs layer", "progress": 0, "done": False, "extra": None},
 | 
			
		||||
        {
 | 
			
		||||
            "stage": "Downloading",
 | 
			
		||||
            "progress": 0.1,
 | 
			
		||||
            "done": False,
 | 
			
		||||
            "extra": {"current": 539462, "total": 436480882},
 | 
			
		||||
        },
 | 
			
		||||
        {
 | 
			
		||||
            "stage": "Downloading",
 | 
			
		||||
            "progress": 0.6,
 | 
			
		||||
            "done": False,
 | 
			
		||||
            "extra": {"current": 4864838, "total": 436480882},
 | 
			
		||||
        },
 | 
			
		||||
        {
 | 
			
		||||
            "stage": "Downloading",
 | 
			
		||||
            "progress": 0.9,
 | 
			
		||||
            "done": False,
 | 
			
		||||
            "extra": {"current": 7552896, "total": 436480882},
 | 
			
		||||
        },
 | 
			
		||||
        {
 | 
			
		||||
            "stage": "Downloading",
 | 
			
		||||
            "progress": 1.2,
 | 
			
		||||
            "done": False,
 | 
			
		||||
            "extra": {"current": 10252544, "total": 436480882},
 | 
			
		||||
        },
 | 
			
		||||
        {
 | 
			
		||||
            "stage": "Downloading",
 | 
			
		||||
            "progress": 2.9,
 | 
			
		||||
            "done": False,
 | 
			
		||||
            "extra": {"current": 25369792, "total": 436480882},
 | 
			
		||||
        },
 | 
			
		||||
        {
 | 
			
		||||
            "stage": "Downloading",
 | 
			
		||||
            "progress": 11.9,
 | 
			
		||||
            "done": False,
 | 
			
		||||
            "extra": {"current": 103619904, "total": 436480882},
 | 
			
		||||
        },
 | 
			
		||||
        {
 | 
			
		||||
            "stage": "Downloading",
 | 
			
		||||
            "progress": 26.1,
 | 
			
		||||
            "done": False,
 | 
			
		||||
            "extra": {"current": 227726144, "total": 436480882},
 | 
			
		||||
        },
 | 
			
		||||
        {
 | 
			
		||||
            "stage": "Downloading",
 | 
			
		||||
            "progress": 49.6,
 | 
			
		||||
            "done": False,
 | 
			
		||||
            "extra": {"current": 433170048, "total": 436480882},
 | 
			
		||||
        },
 | 
			
		||||
        {
 | 
			
		||||
            "stage": "Verifying Checksum",
 | 
			
		||||
            "progress": 50,
 | 
			
		||||
            "done": False,
 | 
			
		||||
            "extra": {"current": 433170048, "total": 436480882},
 | 
			
		||||
        },
 | 
			
		||||
        {
 | 
			
		||||
            "stage": "Download complete",
 | 
			
		||||
            "progress": 50,
 | 
			
		||||
            "done": False,
 | 
			
		||||
            "extra": {"current": 433170048, "total": 436480882},
 | 
			
		||||
        },
 | 
			
		||||
        {
 | 
			
		||||
            "stage": "Extracting",
 | 
			
		||||
            "progress": 50.1,
 | 
			
		||||
            "done": False,
 | 
			
		||||
            "extra": {"current": 557056, "total": 436480882},
 | 
			
		||||
        },
 | 
			
		||||
        {
 | 
			
		||||
            "stage": "Extracting",
 | 
			
		||||
            "progress": 60.3,
 | 
			
		||||
            "done": False,
 | 
			
		||||
            "extra": {"current": 89686016, "total": 436480882},
 | 
			
		||||
        },
 | 
			
		||||
        {
 | 
			
		||||
            "stage": "Extracting",
 | 
			
		||||
            "progress": 70.0,
 | 
			
		||||
            "done": False,
 | 
			
		||||
            "extra": {"current": 174358528, "total": 436480882},
 | 
			
		||||
        },
 | 
			
		||||
        {
 | 
			
		||||
            "stage": "Extracting",
 | 
			
		||||
            "progress": 80.0,
 | 
			
		||||
            "done": False,
 | 
			
		||||
            "extra": {"current": 261816320, "total": 436480882},
 | 
			
		||||
        },
 | 
			
		||||
        {
 | 
			
		||||
            "stage": "Extracting",
 | 
			
		||||
            "progress": 88.4,
 | 
			
		||||
            "done": False,
 | 
			
		||||
            "extra": {"current": 334790656, "total": 436480882},
 | 
			
		||||
        },
 | 
			
		||||
        {
 | 
			
		||||
            "stage": "Extracting",
 | 
			
		||||
            "progress": 94.0,
 | 
			
		||||
            "done": False,
 | 
			
		||||
            "extra": {"current": 383811584, "total": 436480882},
 | 
			
		||||
        },
 | 
			
		||||
        {
 | 
			
		||||
            "stage": "Extracting",
 | 
			
		||||
            "progress": 99.9,
 | 
			
		||||
            "done": False,
 | 
			
		||||
            "extra": {"current": 435617792, "total": 436480882},
 | 
			
		||||
        },
 | 
			
		||||
        {
 | 
			
		||||
            "stage": "Extracting",
 | 
			
		||||
            "progress": 100.0,
 | 
			
		||||
            "done": False,
 | 
			
		||||
            "extra": {"current": 436480882, "total": 436480882},
 | 
			
		||||
        },
 | 
			
		||||
        {
 | 
			
		||||
            "stage": "Pull complete",
 | 
			
		||||
            "progress": 100.0,
 | 
			
		||||
            "done": True,
 | 
			
		||||
            "extra": {"current": 436480882, "total": 436480882},
 | 
			
		||||
        },
 | 
			
		||||
    ]
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
async def test_install_progress_rounding_does_not_cause_misses(
 | 
			
		||||
    coresys: CoreSys, test_docker_interface: DockerInterface, ha_ws_client: AsyncMock
 | 
			
		||||
    coresys: CoreSys,
 | 
			
		||||
    test_docker_interface: DockerInterface,
 | 
			
		||||
    ha_ws_client: AsyncMock,
 | 
			
		||||
    capture_exception: Mock,
 | 
			
		||||
):
 | 
			
		||||
    """Test extremely close progress events do not create rounding issues."""
 | 
			
		||||
    coresys.core.set_state(CoreState.RUNNING)
 | 
			
		||||
    # Current numbers chosen to create a rounding issue with original code
 | 
			
		||||
    # Where a progress update came in with a value between the actual previous
 | 
			
		||||
    # value and what it was rounded to. It should not raise an out of order exception
 | 
			
		||||
    coresys.docker.docker.api.pull.return_value = [
 | 
			
		||||
        {
 | 
			
		||||
            "status": "Pulling from home-assistant/odroid-n2-homeassistant",
 | 
			
		||||
@@ -671,65 +491,7 @@ async def test_install_progress_rounding_does_not_cause_misses(
 | 
			
		||||
        await install_task
 | 
			
		||||
        await event.wait()
 | 
			
		||||
 | 
			
		||||
    events = [
 | 
			
		||||
        evt.args[0]["data"]["data"]
 | 
			
		||||
        for evt in ha_ws_client.async_send_command.call_args_list
 | 
			
		||||
        if "data" in evt.args[0]
 | 
			
		||||
        and evt.args[0]["data"]["event"] == WSEvent.JOB
 | 
			
		||||
        and evt.args[0]["data"]["data"]["reference"] == "1e214cd6d7d0"
 | 
			
		||||
        and evt.args[0]["data"]["data"]["stage"] in {"Downloading", "Extracting"}
 | 
			
		||||
    ]
 | 
			
		||||
 | 
			
		||||
    assert events == [
 | 
			
		||||
        {
 | 
			
		||||
            "name": "Pulling container image layer",
 | 
			
		||||
            "stage": "Downloading",
 | 
			
		||||
            "progress": 49.6,
 | 
			
		||||
            "done": False,
 | 
			
		||||
            "extra": {"current": 432700000, "total": 436480882},
 | 
			
		||||
            "reference": "1e214cd6d7d0",
 | 
			
		||||
            "parent_id": job.uuid,
 | 
			
		||||
            "errors": [],
 | 
			
		||||
            "uuid": ANY,
 | 
			
		||||
            "created": ANY,
 | 
			
		||||
        },
 | 
			
		||||
        {
 | 
			
		||||
            "name": "Pulling container image layer",
 | 
			
		||||
            "stage": "Downloading",
 | 
			
		||||
            "progress": 49.6,
 | 
			
		||||
            "done": False,
 | 
			
		||||
            "extra": {"current": 432800000, "total": 436480882},
 | 
			
		||||
            "reference": "1e214cd6d7d0",
 | 
			
		||||
            "parent_id": job.uuid,
 | 
			
		||||
            "errors": [],
 | 
			
		||||
            "uuid": ANY,
 | 
			
		||||
            "created": ANY,
 | 
			
		||||
        },
 | 
			
		||||
        {
 | 
			
		||||
            "name": "Pulling container image layer",
 | 
			
		||||
            "stage": "Extracting",
 | 
			
		||||
            "progress": 99.6,
 | 
			
		||||
            "done": False,
 | 
			
		||||
            "extra": {"current": 432700000, "total": 436480882},
 | 
			
		||||
            "reference": "1e214cd6d7d0",
 | 
			
		||||
            "parent_id": job.uuid,
 | 
			
		||||
            "errors": [],
 | 
			
		||||
            "uuid": ANY,
 | 
			
		||||
            "created": ANY,
 | 
			
		||||
        },
 | 
			
		||||
        {
 | 
			
		||||
            "name": "Pulling container image layer",
 | 
			
		||||
            "stage": "Extracting",
 | 
			
		||||
            "progress": 99.6,
 | 
			
		||||
            "done": False,
 | 
			
		||||
            "extra": {"current": 432800000, "total": 436480882},
 | 
			
		||||
            "reference": "1e214cd6d7d0",
 | 
			
		||||
            "parent_id": job.uuid,
 | 
			
		||||
            "errors": [],
 | 
			
		||||
            "uuid": ANY,
 | 
			
		||||
            "created": ANY,
 | 
			
		||||
        },
 | 
			
		||||
    ]
 | 
			
		||||
    capture_exception.assert_not_called()
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
@pytest.mark.parametrize(
 | 
			
		||||
@@ -779,10 +541,15 @@ async def test_install_raises_on_pull_error(
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
async def test_install_progress_handles_download_restart(
 | 
			
		||||
    coresys: CoreSys, test_docker_interface: DockerInterface, ha_ws_client: AsyncMock
 | 
			
		||||
    coresys: CoreSys,
 | 
			
		||||
    test_docker_interface: DockerInterface,
 | 
			
		||||
    ha_ws_client: AsyncMock,
 | 
			
		||||
    capture_exception: Mock,
 | 
			
		||||
):
 | 
			
		||||
    """Test install handles docker progress events that include a download restart."""
 | 
			
		||||
    coresys.core.set_state(CoreState.RUNNING)
 | 
			
		||||
    # Fixture emulates a download restart as it docker logs it
 | 
			
		||||
    # A log out of order exception should not be raised
 | 
			
		||||
    coresys.docker.docker.api.pull.return_value = load_json_fixture(
 | 
			
		||||
        "docker_pull_image_log_restart.json"
 | 
			
		||||
    )
 | 
			
		||||
@@ -810,106 +577,4 @@ async def test_install_progress_handles_download_restart(
 | 
			
		||||
        await install_task
 | 
			
		||||
        await event.wait()
 | 
			
		||||
 | 
			
		||||
    events = [
 | 
			
		||||
        evt.args[0]["data"]["data"]
 | 
			
		||||
        for evt in ha_ws_client.async_send_command.call_args_list
 | 
			
		||||
        if "data" in evt.args[0] and evt.args[0]["data"]["event"] == WSEvent.JOB
 | 
			
		||||
    ]
 | 
			
		||||
 | 
			
		||||
    def make_sub_log(layer_id: str):
 | 
			
		||||
        return [
 | 
			
		||||
            {
 | 
			
		||||
                "stage": evt["stage"],
 | 
			
		||||
                "progress": evt["progress"],
 | 
			
		||||
                "done": evt["done"],
 | 
			
		||||
                "extra": evt["extra"],
 | 
			
		||||
            }
 | 
			
		||||
            for evt in events
 | 
			
		||||
            if evt["name"] == "Pulling container image layer"
 | 
			
		||||
            and evt["reference"] == layer_id
 | 
			
		||||
            and evt["parent_id"] == job.uuid
 | 
			
		||||
        ]
 | 
			
		||||
 | 
			
		||||
    layer_1_log = make_sub_log("1e214cd6d7d0")
 | 
			
		||||
    assert len(layer_1_log) == 14
 | 
			
		||||
    assert layer_1_log == [
 | 
			
		||||
        {"stage": "Pulling fs layer", "progress": 0, "done": False, "extra": None},
 | 
			
		||||
        {
 | 
			
		||||
            "stage": "Downloading",
 | 
			
		||||
            "progress": 11.9,
 | 
			
		||||
            "done": False,
 | 
			
		||||
            "extra": {"current": 103619904, "total": 436480882},
 | 
			
		||||
        },
 | 
			
		||||
        {
 | 
			
		||||
            "stage": "Downloading",
 | 
			
		||||
            "progress": 26.1,
 | 
			
		||||
            "done": False,
 | 
			
		||||
            "extra": {"current": 227726144, "total": 436480882},
 | 
			
		||||
        },
 | 
			
		||||
        {
 | 
			
		||||
            "stage": "Downloading",
 | 
			
		||||
            "progress": 49.6,
 | 
			
		||||
            "done": False,
 | 
			
		||||
            "extra": {"current": 433170048, "total": 436480882},
 | 
			
		||||
        },
 | 
			
		||||
        {
 | 
			
		||||
            "stage": "Retrying download",
 | 
			
		||||
            "progress": 0,
 | 
			
		||||
            "done": False,
 | 
			
		||||
            "extra": None,
 | 
			
		||||
        },
 | 
			
		||||
        {
 | 
			
		||||
            "stage": "Retrying download",
 | 
			
		||||
            "progress": 0,
 | 
			
		||||
            "done": False,
 | 
			
		||||
            "extra": None,
 | 
			
		||||
        },
 | 
			
		||||
        {
 | 
			
		||||
            "stage": "Downloading",
 | 
			
		||||
            "progress": 11.9,
 | 
			
		||||
            "done": False,
 | 
			
		||||
            "extra": {"current": 103619904, "total": 436480882},
 | 
			
		||||
        },
 | 
			
		||||
        {
 | 
			
		||||
            "stage": "Downloading",
 | 
			
		||||
            "progress": 26.1,
 | 
			
		||||
            "done": False,
 | 
			
		||||
            "extra": {"current": 227726144, "total": 436480882},
 | 
			
		||||
        },
 | 
			
		||||
        {
 | 
			
		||||
            "stage": "Downloading",
 | 
			
		||||
            "progress": 49.6,
 | 
			
		||||
            "done": False,
 | 
			
		||||
            "extra": {"current": 433170048, "total": 436480882},
 | 
			
		||||
        },
 | 
			
		||||
        {
 | 
			
		||||
            "stage": "Verifying Checksum",
 | 
			
		||||
            "progress": 50,
 | 
			
		||||
            "done": False,
 | 
			
		||||
            "extra": {"current": 433170048, "total": 436480882},
 | 
			
		||||
        },
 | 
			
		||||
        {
 | 
			
		||||
            "stage": "Download complete",
 | 
			
		||||
            "progress": 50,
 | 
			
		||||
            "done": False,
 | 
			
		||||
            "extra": {"current": 433170048, "total": 436480882},
 | 
			
		||||
        },
 | 
			
		||||
        {
 | 
			
		||||
            "stage": "Extracting",
 | 
			
		||||
            "progress": 80.0,
 | 
			
		||||
            "done": False,
 | 
			
		||||
            "extra": {"current": 261816320, "total": 436480882},
 | 
			
		||||
        },
 | 
			
		||||
        {
 | 
			
		||||
            "stage": "Extracting",
 | 
			
		||||
            "progress": 100.0,
 | 
			
		||||
            "done": False,
 | 
			
		||||
            "extra": {"current": 436480882, "total": 436480882},
 | 
			
		||||
        },
 | 
			
		||||
        {
 | 
			
		||||
            "stage": "Pull complete",
 | 
			
		||||
            "progress": 100.0,
 | 
			
		||||
            "done": True,
 | 
			
		||||
            "extra": {"current": 436480882, "total": 436480882},
 | 
			
		||||
        },
 | 
			
		||||
    ]
 | 
			
		||||
    capture_exception.assert_not_called()
 | 
			
		||||
 
 | 
			
		||||
@@ -376,3 +376,14 @@ async def test_try_get_nvme_life_time_missing_percent_used(
 | 
			
		||||
        coresys.config.path_supervisor
 | 
			
		||||
    )
 | 
			
		||||
    assert lifetime is None
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
async def test_try_get_nvme_life_time_dbus_not_connected(coresys: CoreSys):
 | 
			
		||||
    """Test getting lifetime info from an NVMe when DBUS is not connected."""
 | 
			
		||||
    # Set the dbus for udisks2 bus to be None, to make it forcibly disconnected.
 | 
			
		||||
    coresys.dbus.udisks2.dbus = None
 | 
			
		||||
 | 
			
		||||
    lifetime = await coresys.hardware.disk.get_disk_life_time(
 | 
			
		||||
        coresys.config.path_supervisor
 | 
			
		||||
    )
 | 
			
		||||
    assert lifetime is None
 | 
			
		||||
 
 | 
			
		||||
@@ -25,13 +25,18 @@ async def test_evaluation(coresys: CoreSys):
 | 
			
		||||
    assert docker_configuration.reason in coresys.resolution.unsupported
 | 
			
		||||
    coresys.resolution.unsupported.clear()
 | 
			
		||||
 | 
			
		||||
    coresys.docker.info.storage = EXPECTED_STORAGE
 | 
			
		||||
    coresys.docker.info.storage = EXPECTED_STORAGE[0]
 | 
			
		||||
    coresys.docker.info.logging = "unsupported"
 | 
			
		||||
    await docker_configuration()
 | 
			
		||||
    assert docker_configuration.reason in coresys.resolution.unsupported
 | 
			
		||||
    coresys.resolution.unsupported.clear()
 | 
			
		||||
 | 
			
		||||
    coresys.docker.info.storage = EXPECTED_STORAGE
 | 
			
		||||
    coresys.docker.info.storage = "overlay2"
 | 
			
		||||
    coresys.docker.info.logging = EXPECTED_LOGGING
 | 
			
		||||
    await docker_configuration()
 | 
			
		||||
    assert docker_configuration.reason not in coresys.resolution.unsupported
 | 
			
		||||
 | 
			
		||||
    coresys.docker.info.storage = "overlayfs"
 | 
			
		||||
    coresys.docker.info.logging = EXPECTED_LOGGING
 | 
			
		||||
    await docker_configuration()
 | 
			
		||||
    assert docker_configuration.reason not in coresys.resolution.unsupported
 | 
			
		||||
 
 | 
			
		||||
		Reference in New Issue
	
	Block a user