Merge branch 'dev' into freeze_timeout_installing_packages

This commit is contained in:
J. Nick Koston 2025-04-16 15:39:23 -10:00 committed by GitHub
commit 837d20ab1b
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
3280 changed files with 209158 additions and 41922 deletions

View File

@ -1,5 +1,6 @@
name: Report an issue with Home Assistant Core name: Report an issue with Home Assistant Core
description: Report an issue with Home Assistant Core. description: Report an issue with Home Assistant Core.
type: Bug
body: body:
- type: markdown - type: markdown
attributes: attributes:

100
.github/copilot-instructions.md vendored Normal file
View File

@ -0,0 +1,100 @@
# Instructions for GitHub Copilot
This repository holds the core of Home Assistant, a Python 3 based home
automation application.
- Python code must be compatible with Python 3.13
- Use the newest Python language features if possible:
- Pattern matching
- Type hints
- f-strings for string formatting over `%` or `.format()`
- Dataclasses
- Walrus operator
- Code quality tools:
- Formatting: Ruff
- Linting: PyLint and Ruff
- Type checking: MyPy
- Testing: pytest with plain functions and fixtures
- Inline code documentation:
- File headers should be short and concise:
```python
"""Integration for Peblar EV chargers."""
```
- Every method and function needs a docstring:
```python
async def async_setup_entry(hass: HomeAssistant, entry: PeblarConfigEntry) -> bool:
"""Set up Peblar from a config entry."""
...
```
- All code and comments and other text are written in American English
- Follow existing code style patterns as much as possible
- Core locations:
- Shared constants: `homeassistant/const.py`, use them instead of hardcoding
strings or creating duplicate integration constants.
- Integration files:
- Constants: `homeassistant/components/{domain}/const.py`
- Models: `homeassistant/components/{domain}/models.py`
- Coordinator: `homeassistant/components/{domain}/coordinator.py`
- Config flow: `homeassistant/components/{domain}/config_flow.py`
- Platform code: `homeassistant/components/{domain}/{platform}.py`
- All external I/O operations must be async
- Async patterns:
- Avoid sleeping in loops
- Avoid awaiting in loops, gather instead
- No blocking calls
- Polling:
- Follow update coordinator pattern, when possible
- Polling interval may not be configurable by the user
- For local network polling, the minimum interval is 5 seconds
- For cloud polling, the minimum interval is 60 seconds
- Error handling:
- Use specific exceptions from `homeassistant.exceptions`
- Setup failures:
- Temporary: Raise `ConfigEntryNotReady`
- Permanent: Use `ConfigEntryError`
- Logging:
- Message format:
- No periods at end
- No integration names or domains (added automatically)
- No sensitive data (keys, tokens, passwords), even when those are incorrect.
- Be very restrictive on the use of logging info messages, use debug for
anything which is not targeting the user.
- Use lazy logging (no f-strings):
```python
_LOGGER.debug("This is a log message with %s", variable)
```
- Entities:
- Ensure unique IDs for state persistence:
- Unique IDs should not contain values that are subject to user or network change.
- An ID needs to be unique per platform, not per integration.
- The ID does not have to contain the integration domain or platform.
- Acceptable examples:
- Serial number of a device
- MAC address of a device formatted using `homeassistant.helpers.device_registry.format_mac`
Do not obtain the MAC address through arp cache of local network access,
only use the MAC address provided by discovery or the device itself.
- Unique identifier that is physically printed on the device or burned into an EEPROM
- Not acceptable examples:
- IP Address
- Device name
- Hostname
- URL
- Email address
- Username
- For entities that are setup by a config entry, the config entry ID
can be used as a last resort if no other Unique ID is available.
For example: `f"{entry.entry_id}-battery"`
- If the state value is unknown, use `None`
- Do not use the `unavailable` string as a state value,
implement the `available()` property method instead
- Do not use the `unknown` string as a state value, use `None` instead
- Extra entity state attributes:
- The keys of all state attributes should always be present
- If the value is unknown, use `None`
- Provide descriptive state attributes
- Testing:
- Test location: `tests/components/{domain}/`
- Use pytest fixtures from `tests.common`
- Mock external dependencies
- Use snapshots for complex data
- Follow existing test patterns

View File

@ -32,7 +32,7 @@ jobs:
fetch-depth: 0 fetch-depth: 0
- name: Set up Python ${{ env.DEFAULT_PYTHON }} - name: Set up Python ${{ env.DEFAULT_PYTHON }}
uses: actions/setup-python@v5.4.0 uses: actions/setup-python@v5.5.0
with: with:
python-version: ${{ env.DEFAULT_PYTHON }} python-version: ${{ env.DEFAULT_PYTHON }}
@ -69,7 +69,7 @@ jobs:
run: find ./homeassistant/components/*/translations -name "*.json" | tar zcvf translations.tar.gz -T - run: find ./homeassistant/components/*/translations -name "*.json" | tar zcvf translations.tar.gz -T -
- name: Upload translations - name: Upload translations
uses: actions/upload-artifact@v4.6.0 uses: actions/upload-artifact@v4.6.2
with: with:
name: translations name: translations
path: translations.tar.gz path: translations.tar.gz
@ -94,7 +94,7 @@ jobs:
- name: Download nightly wheels of frontend - name: Download nightly wheels of frontend
if: needs.init.outputs.channel == 'dev' if: needs.init.outputs.channel == 'dev'
uses: dawidd6/action-download-artifact@v8 uses: dawidd6/action-download-artifact@v9
with: with:
github_token: ${{secrets.GITHUB_TOKEN}} github_token: ${{secrets.GITHUB_TOKEN}}
repo: home-assistant/frontend repo: home-assistant/frontend
@ -105,7 +105,7 @@ jobs:
- name: Download nightly wheels of intents - name: Download nightly wheels of intents
if: needs.init.outputs.channel == 'dev' if: needs.init.outputs.channel == 'dev'
uses: dawidd6/action-download-artifact@v8 uses: dawidd6/action-download-artifact@v9
with: with:
github_token: ${{secrets.GITHUB_TOKEN}} github_token: ${{secrets.GITHUB_TOKEN}}
repo: home-assistant/intents-package repo: home-assistant/intents-package
@ -116,7 +116,7 @@ jobs:
- name: Set up Python ${{ env.DEFAULT_PYTHON }} - name: Set up Python ${{ env.DEFAULT_PYTHON }}
if: needs.init.outputs.channel == 'dev' if: needs.init.outputs.channel == 'dev'
uses: actions/setup-python@v5.4.0 uses: actions/setup-python@v5.5.0
with: with:
python-version: ${{ env.DEFAULT_PYTHON }} python-version: ${{ env.DEFAULT_PYTHON }}
@ -175,7 +175,7 @@ jobs:
sed -i "s|pykrakenapi|# pykrakenapi|g" requirements_all.txt sed -i "s|pykrakenapi|# pykrakenapi|g" requirements_all.txt
- name: Download translations - name: Download translations
uses: actions/download-artifact@v4.1.8 uses: actions/download-artifact@v4.2.1
with: with:
name: translations name: translations
@ -190,14 +190,14 @@ jobs:
echo "${{ github.sha }};${{ github.ref }};${{ github.event_name }};${{ github.actor }}" > rootfs/OFFICIAL_IMAGE echo "${{ github.sha }};${{ github.ref }};${{ github.event_name }};${{ github.actor }}" > rootfs/OFFICIAL_IMAGE
- name: Login to GitHub Container Registry - name: Login to GitHub Container Registry
uses: docker/login-action@v3.3.0 uses: docker/login-action@v3.4.0
with: with:
registry: ghcr.io registry: ghcr.io
username: ${{ github.repository_owner }} username: ${{ github.repository_owner }}
password: ${{ secrets.GITHUB_TOKEN }} password: ${{ secrets.GITHUB_TOKEN }}
- name: Build base image - name: Build base image
uses: home-assistant/builder@2024.08.2 uses: home-assistant/builder@2025.03.0
with: with:
args: | args: |
$BUILD_ARGS \ $BUILD_ARGS \
@ -256,14 +256,14 @@ jobs:
fi fi
- name: Login to GitHub Container Registry - name: Login to GitHub Container Registry
uses: docker/login-action@v3.3.0 uses: docker/login-action@v3.4.0
with: with:
registry: ghcr.io registry: ghcr.io
username: ${{ github.repository_owner }} username: ${{ github.repository_owner }}
password: ${{ secrets.GITHUB_TOKEN }} password: ${{ secrets.GITHUB_TOKEN }}
- name: Build base image - name: Build base image
uses: home-assistant/builder@2024.08.2 uses: home-assistant/builder@2025.03.0
with: with:
args: | args: |
$BUILD_ARGS \ $BUILD_ARGS \
@ -324,20 +324,20 @@ jobs:
uses: actions/checkout@v4.2.2 uses: actions/checkout@v4.2.2
- name: Install Cosign - name: Install Cosign
uses: sigstore/cosign-installer@v3.8.0 uses: sigstore/cosign-installer@v3.8.1
with: with:
cosign-release: "v2.2.3" cosign-release: "v2.2.3"
- name: Login to DockerHub - name: Login to DockerHub
if: matrix.registry == 'docker.io/homeassistant' if: matrix.registry == 'docker.io/homeassistant'
uses: docker/login-action@v3.3.0 uses: docker/login-action@v3.4.0
with: with:
username: ${{ secrets.DOCKERHUB_USERNAME }} username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }} password: ${{ secrets.DOCKERHUB_TOKEN }}
- name: Login to GitHub Container Registry - name: Login to GitHub Container Registry
if: matrix.registry == 'ghcr.io/home-assistant' if: matrix.registry == 'ghcr.io/home-assistant'
uses: docker/login-action@v3.3.0 uses: docker/login-action@v3.4.0
with: with:
registry: ghcr.io registry: ghcr.io
username: ${{ github.repository_owner }} username: ${{ github.repository_owner }}
@ -448,18 +448,21 @@ jobs:
environment: ${{ needs.init.outputs.channel }} environment: ${{ needs.init.outputs.channel }}
needs: ["init", "build_base"] needs: ["init", "build_base"]
runs-on: ubuntu-latest runs-on: ubuntu-latest
permissions:
contents: read
id-token: write
if: github.repository_owner == 'home-assistant' && needs.init.outputs.publish == 'true' if: github.repository_owner == 'home-assistant' && needs.init.outputs.publish == 'true'
steps: steps:
- name: Checkout the repository - name: Checkout the repository
uses: actions/checkout@v4.2.2 uses: actions/checkout@v4.2.2
- name: Set up Python ${{ env.DEFAULT_PYTHON }} - name: Set up Python ${{ env.DEFAULT_PYTHON }}
uses: actions/setup-python@v5.4.0 uses: actions/setup-python@v5.5.0
with: with:
python-version: ${{ env.DEFAULT_PYTHON }} python-version: ${{ env.DEFAULT_PYTHON }}
- name: Download translations - name: Download translations
uses: actions/download-artifact@v4.1.8 uses: actions/download-artifact@v4.2.1
with: with:
name: translations name: translations
@ -473,16 +476,13 @@ jobs:
run: | run: |
# Remove dist, build, and homeassistant.egg-info # Remove dist, build, and homeassistant.egg-info
# when build locally for testing! # when build locally for testing!
pip install twine build pip install build
python -m build python -m build
- name: Upload package - name: Upload package to PyPI
shell: bash uses: pypa/gh-action-pypi-publish@v1.12.4
run: | with:
export TWINE_USERNAME="__token__" skip-existing: true
export TWINE_PASSWORD="${{ secrets.TWINE_TOKEN }}"
twine upload dist/* --skip-existing
hassfest-image: hassfest-image:
name: Build and test hassfest image name: Build and test hassfest image
@ -502,14 +502,14 @@ jobs:
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
- name: Login to GitHub Container Registry - name: Login to GitHub Container Registry
uses: docker/login-action@9780b0c442fbb1117ed29e0efdff1e18412f7567 # v3.3.0 uses: docker/login-action@74a5d142397b4f367a81961eba4e8cd7edddf772 # v3.4.0
with: with:
registry: ghcr.io registry: ghcr.io
username: ${{ github.repository_owner }} username: ${{ github.repository_owner }}
password: ${{ secrets.GITHUB_TOKEN }} password: ${{ secrets.GITHUB_TOKEN }}
- name: Build Docker image - name: Build Docker image
uses: docker/build-push-action@ca877d9245402d1537745e0e356eab47c3520991 # v6.13.0 uses: docker/build-push-action@471d1dc4e07e5cdedd4c2171150001c434f0b7a4 # v6.15.0
with: with:
context: . # So action will not pull the repository again context: . # So action will not pull the repository again
file: ./script/hassfest/docker/Dockerfile file: ./script/hassfest/docker/Dockerfile
@ -522,7 +522,7 @@ jobs:
- name: Push Docker image - name: Push Docker image
if: needs.init.outputs.channel != 'dev' && needs.init.outputs.publish == 'true' if: needs.init.outputs.channel != 'dev' && needs.init.outputs.publish == 'true'
id: push id: push
uses: docker/build-push-action@ca877d9245402d1537745e0e356eab47c3520991 # v6.13.0 uses: docker/build-push-action@471d1dc4e07e5cdedd4c2171150001c434f0b7a4 # v6.15.0
with: with:
context: . # So action will not pull the repository again context: . # So action will not pull the repository again
file: ./script/hassfest/docker/Dockerfile file: ./script/hassfest/docker/Dockerfile
@ -531,7 +531,7 @@ jobs:
- name: Generate artifact attestation - name: Generate artifact attestation
if: needs.init.outputs.channel != 'dev' && needs.init.outputs.publish == 'true' if: needs.init.outputs.channel != 'dev' && needs.init.outputs.publish == 'true'
uses: actions/attest-build-provenance@520d128f165991a6c774bcb264f323e3d70747f4 # v2.2.0 uses: actions/attest-build-provenance@c074443f1aee8d4aeeae555aebba3282517141b2 # v2.2.3
with: with:
subject-name: ${{ env.HASSFEST_IMAGE_NAME }} subject-name: ${{ env.HASSFEST_IMAGE_NAME }}
subject-digest: ${{ steps.push.outputs.digest }} subject-digest: ${{ steps.push.outputs.digest }}

View File

@ -37,10 +37,10 @@ on:
type: boolean type: boolean
env: env:
CACHE_VERSION: 11 CACHE_VERSION: 12
UV_CACHE_VERSION: 1 UV_CACHE_VERSION: 1
MYPY_CACHE_VERSION: 9 MYPY_CACHE_VERSION: 9
HA_SHORT_VERSION: "2025.3" HA_SHORT_VERSION: "2025.5"
DEFAULT_PYTHON: "3.13" DEFAULT_PYTHON: "3.13"
ALL_PYTHON_VERSIONS: "['3.13']" ALL_PYTHON_VERSIONS: "['3.13']"
# 10.3 is the oldest supported version # 10.3 is the oldest supported version
@ -89,6 +89,7 @@ jobs:
test_groups: ${{ steps.info.outputs.test_groups }} test_groups: ${{ steps.info.outputs.test_groups }}
tests_glob: ${{ steps.info.outputs.tests_glob }} tests_glob: ${{ steps.info.outputs.tests_glob }}
tests: ${{ steps.info.outputs.tests }} tests: ${{ steps.info.outputs.tests }}
lint_only: ${{ steps.info.outputs.lint_only }}
skip_coverage: ${{ steps.info.outputs.skip_coverage }} skip_coverage: ${{ steps.info.outputs.skip_coverage }}
runs-on: ubuntu-24.04 runs-on: ubuntu-24.04
steps: steps:
@ -142,6 +143,7 @@ jobs:
test_group_count=10 test_group_count=10
tests="[]" tests="[]"
tests_glob="" tests_glob=""
lint_only=""
skip_coverage="" skip_coverage=""
if [[ "${{ steps.integrations.outputs.changes }}" != "[]" ]]; if [[ "${{ steps.integrations.outputs.changes }}" != "[]" ]];
@ -192,6 +194,17 @@ jobs:
test_full_suite="true" test_full_suite="true"
fi fi
if [[ "${{ github.event.inputs.lint-only }}" == "true" ]] \
|| [[ "${{ github.event.inputs.pylint-only }}" == "true" ]] \
|| [[ "${{ github.event.inputs.mypy-only }}" == "true" ]] \
|| [[ "${{ github.event.inputs.audit-licenses-only }}" == "true" ]] \
|| [[ "${{ github.event_name }}" == "push" \
&& "${{ github.event.repository.full_name }}" != "home-assistant/core" ]];
then
lint_only="true"
skip_coverage="true"
fi
if [[ "${{ github.event.inputs.skip-coverage }}" == "true" ]] \ if [[ "${{ github.event.inputs.skip-coverage }}" == "true" ]] \
|| [[ "${{ contains(github.event.pull_request.labels.*.name, 'ci-skip-coverage') }}" == "true" ]]; || [[ "${{ contains(github.event.pull_request.labels.*.name, 'ci-skip-coverage') }}" == "true" ]];
then then
@ -217,6 +230,8 @@ jobs:
echo "tests=${tests}" >> $GITHUB_OUTPUT echo "tests=${tests}" >> $GITHUB_OUTPUT
echo "tests_glob: ${tests_glob}" echo "tests_glob: ${tests_glob}"
echo "tests_glob=${tests_glob}" >> $GITHUB_OUTPUT echo "tests_glob=${tests_glob}" >> $GITHUB_OUTPUT
echo "lint_only": ${lint_only}
echo "lint_only=${lint_only}" >> $GITHUB_OUTPUT
echo "skip_coverage: ${skip_coverage}" echo "skip_coverage: ${skip_coverage}"
echo "skip_coverage=${skip_coverage}" >> $GITHUB_OUTPUT echo "skip_coverage=${skip_coverage}" >> $GITHUB_OUTPUT
@ -234,13 +249,13 @@ jobs:
uses: actions/checkout@v4.2.2 uses: actions/checkout@v4.2.2
- name: Set up Python ${{ env.DEFAULT_PYTHON }} - name: Set up Python ${{ env.DEFAULT_PYTHON }}
id: python id: python
uses: actions/setup-python@v5.4.0 uses: actions/setup-python@v5.5.0
with: with:
python-version: ${{ env.DEFAULT_PYTHON }} python-version: ${{ env.DEFAULT_PYTHON }}
check-latest: true check-latest: true
- name: Restore base Python virtual environment - name: Restore base Python virtual environment
id: cache-venv id: cache-venv
uses: actions/cache@v4.2.0 uses: actions/cache@v4.2.3
with: with:
path: venv path: venv
key: >- key: >-
@ -256,7 +271,7 @@ jobs:
uv pip install "$(cat requirements_test.txt | grep pre-commit)" uv pip install "$(cat requirements_test.txt | grep pre-commit)"
- name: Restore pre-commit environment from cache - name: Restore pre-commit environment from cache
id: cache-precommit id: cache-precommit
uses: actions/cache@v4.2.0 uses: actions/cache@v4.2.3
with: with:
path: ${{ env.PRE_COMMIT_CACHE }} path: ${{ env.PRE_COMMIT_CACHE }}
lookup-only: true lookup-only: true
@ -279,14 +294,14 @@ jobs:
- name: Check out code from GitHub - name: Check out code from GitHub
uses: actions/checkout@v4.2.2 uses: actions/checkout@v4.2.2
- name: Set up Python ${{ env.DEFAULT_PYTHON }} - name: Set up Python ${{ env.DEFAULT_PYTHON }}
uses: actions/setup-python@v5.4.0 uses: actions/setup-python@v5.5.0
id: python id: python
with: with:
python-version: ${{ env.DEFAULT_PYTHON }} python-version: ${{ env.DEFAULT_PYTHON }}
check-latest: true check-latest: true
- name: Restore base Python virtual environment - name: Restore base Python virtual environment
id: cache-venv id: cache-venv
uses: actions/cache/restore@v4.2.0 uses: actions/cache/restore@v4.2.3
with: with:
path: venv path: venv
fail-on-cache-miss: true fail-on-cache-miss: true
@ -295,7 +310,7 @@ jobs:
needs.info.outputs.pre-commit_cache_key }} needs.info.outputs.pre-commit_cache_key }}
- name: Restore pre-commit environment from cache - name: Restore pre-commit environment from cache
id: cache-precommit id: cache-precommit
uses: actions/cache/restore@v4.2.0 uses: actions/cache/restore@v4.2.3
with: with:
path: ${{ env.PRE_COMMIT_CACHE }} path: ${{ env.PRE_COMMIT_CACHE }}
fail-on-cache-miss: true fail-on-cache-miss: true
@ -319,14 +334,14 @@ jobs:
- name: Check out code from GitHub - name: Check out code from GitHub
uses: actions/checkout@v4.2.2 uses: actions/checkout@v4.2.2
- name: Set up Python ${{ env.DEFAULT_PYTHON }} - name: Set up Python ${{ env.DEFAULT_PYTHON }}
uses: actions/setup-python@v5.4.0 uses: actions/setup-python@v5.5.0
id: python id: python
with: with:
python-version: ${{ env.DEFAULT_PYTHON }} python-version: ${{ env.DEFAULT_PYTHON }}
check-latest: true check-latest: true
- name: Restore base Python virtual environment - name: Restore base Python virtual environment
id: cache-venv id: cache-venv
uses: actions/cache/restore@v4.2.0 uses: actions/cache/restore@v4.2.3
with: with:
path: venv path: venv
fail-on-cache-miss: true fail-on-cache-miss: true
@ -335,7 +350,7 @@ jobs:
needs.info.outputs.pre-commit_cache_key }} needs.info.outputs.pre-commit_cache_key }}
- name: Restore pre-commit environment from cache - name: Restore pre-commit environment from cache
id: cache-precommit id: cache-precommit
uses: actions/cache/restore@v4.2.0 uses: actions/cache/restore@v4.2.3
with: with:
path: ${{ env.PRE_COMMIT_CACHE }} path: ${{ env.PRE_COMMIT_CACHE }}
fail-on-cache-miss: true fail-on-cache-miss: true
@ -359,14 +374,14 @@ jobs:
- name: Check out code from GitHub - name: Check out code from GitHub
uses: actions/checkout@v4.2.2 uses: actions/checkout@v4.2.2
- name: Set up Python ${{ env.DEFAULT_PYTHON }} - name: Set up Python ${{ env.DEFAULT_PYTHON }}
uses: actions/setup-python@v5.4.0 uses: actions/setup-python@v5.5.0
id: python id: python
with: with:
python-version: ${{ env.DEFAULT_PYTHON }} python-version: ${{ env.DEFAULT_PYTHON }}
check-latest: true check-latest: true
- name: Restore base Python virtual environment - name: Restore base Python virtual environment
id: cache-venv id: cache-venv
uses: actions/cache/restore@v4.2.0 uses: actions/cache/restore@v4.2.3
with: with:
path: venv path: venv
fail-on-cache-miss: true fail-on-cache-miss: true
@ -375,7 +390,7 @@ jobs:
needs.info.outputs.pre-commit_cache_key }} needs.info.outputs.pre-commit_cache_key }}
- name: Restore pre-commit environment from cache - name: Restore pre-commit environment from cache
id: cache-precommit id: cache-precommit
uses: actions/cache/restore@v4.2.0 uses: actions/cache/restore@v4.2.3
with: with:
path: ${{ env.PRE_COMMIT_CACHE }} path: ${{ env.PRE_COMMIT_CACHE }}
fail-on-cache-miss: true fail-on-cache-miss: true
@ -469,7 +484,7 @@ jobs:
uses: actions/checkout@v4.2.2 uses: actions/checkout@v4.2.2
- name: Set up Python ${{ matrix.python-version }} - name: Set up Python ${{ matrix.python-version }}
id: python id: python
uses: actions/setup-python@v5.4.0 uses: actions/setup-python@v5.5.0
with: with:
python-version: ${{ matrix.python-version }} python-version: ${{ matrix.python-version }}
check-latest: true check-latest: true
@ -482,7 +497,7 @@ jobs:
env.HA_SHORT_VERSION }}-$(date -u '+%Y-%m-%dT%H:%M:%s')" >> $GITHUB_OUTPUT env.HA_SHORT_VERSION }}-$(date -u '+%Y-%m-%dT%H:%M:%s')" >> $GITHUB_OUTPUT
- name: Restore base Python virtual environment - name: Restore base Python virtual environment
id: cache-venv id: cache-venv
uses: actions/cache@v4.2.0 uses: actions/cache@v4.2.3
with: with:
path: venv path: venv
key: >- key: >-
@ -490,7 +505,7 @@ jobs:
needs.info.outputs.python_cache_key }} needs.info.outputs.python_cache_key }}
- name: Restore uv wheel cache - name: Restore uv wheel cache
if: steps.cache-venv.outputs.cache-hit != 'true' if: steps.cache-venv.outputs.cache-hit != 'true'
uses: actions/cache@v4.2.0 uses: actions/cache@v4.2.3
with: with:
path: ${{ env.UV_CACHE_DIR }} path: ${{ env.UV_CACHE_DIR }}
key: >- key: >-
@ -537,7 +552,7 @@ jobs:
python --version python --version
uv pip freeze >> pip_freeze.txt uv pip freeze >> pip_freeze.txt
- name: Upload pip_freeze artifact - name: Upload pip_freeze artifact
uses: actions/upload-artifact@v4.6.0 uses: actions/upload-artifact@v4.6.2
with: with:
name: pip-freeze-${{ matrix.python-version }} name: pip-freeze-${{ matrix.python-version }}
path: pip_freeze.txt path: pip_freeze.txt
@ -572,13 +587,13 @@ jobs:
uses: actions/checkout@v4.2.2 uses: actions/checkout@v4.2.2
- name: Set up Python ${{ env.DEFAULT_PYTHON }} - name: Set up Python ${{ env.DEFAULT_PYTHON }}
id: python id: python
uses: actions/setup-python@v5.4.0 uses: actions/setup-python@v5.5.0
with: with:
python-version: ${{ env.DEFAULT_PYTHON }} python-version: ${{ env.DEFAULT_PYTHON }}
check-latest: true check-latest: true
- name: Restore full Python ${{ env.DEFAULT_PYTHON }} virtual environment - name: Restore full Python ${{ env.DEFAULT_PYTHON }} virtual environment
id: cache-venv id: cache-venv
uses: actions/cache/restore@v4.2.0 uses: actions/cache/restore@v4.2.3
with: with:
path: venv path: venv
fail-on-cache-miss: true fail-on-cache-miss: true
@ -605,13 +620,13 @@ jobs:
uses: actions/checkout@v4.2.2 uses: actions/checkout@v4.2.2
- name: Set up Python ${{ env.DEFAULT_PYTHON }} - name: Set up Python ${{ env.DEFAULT_PYTHON }}
id: python id: python
uses: actions/setup-python@v5.4.0 uses: actions/setup-python@v5.5.0
with: with:
python-version: ${{ env.DEFAULT_PYTHON }} python-version: ${{ env.DEFAULT_PYTHON }}
check-latest: true check-latest: true
- name: Restore base Python virtual environment - name: Restore base Python virtual environment
id: cache-venv id: cache-venv
uses: actions/cache/restore@v4.2.0 uses: actions/cache/restore@v4.2.3
with: with:
path: venv path: venv
fail-on-cache-miss: true fail-on-cache-miss: true
@ -623,6 +638,25 @@ jobs:
. venv/bin/activate . venv/bin/activate
python -m script.gen_requirements_all validate python -m script.gen_requirements_all validate
dependency-review:
name: Dependency review
runs-on: ubuntu-24.04
needs:
- info
- base
if: |
github.event.inputs.pylint-only != 'true'
&& github.event.inputs.mypy-only != 'true'
&& needs.info.outputs.requirements == 'true'
&& github.event_name == 'pull_request'
steps:
- name: Check out code from GitHub
uses: actions/checkout@v4.2.2
- name: Dependency review
uses: actions/dependency-review-action@v4.6.0
with:
license-check: false # We use our own license audit checks
audit-licenses: audit-licenses:
name: Audit licenses name: Audit licenses
runs-on: ubuntu-24.04 runs-on: ubuntu-24.04
@ -643,13 +677,13 @@ jobs:
uses: actions/checkout@v4.2.2 uses: actions/checkout@v4.2.2
- name: Set up Python ${{ matrix.python-version }} - name: Set up Python ${{ matrix.python-version }}
id: python id: python
uses: actions/setup-python@v5.4.0 uses: actions/setup-python@v5.5.0
with: with:
python-version: ${{ matrix.python-version }} python-version: ${{ matrix.python-version }}
check-latest: true check-latest: true
- name: Restore full Python ${{ matrix.python-version }} virtual environment - name: Restore full Python ${{ matrix.python-version }} virtual environment
id: cache-venv id: cache-venv
uses: actions/cache/restore@v4.2.0 uses: actions/cache/restore@v4.2.3
with: with:
path: venv path: venv
fail-on-cache-miss: true fail-on-cache-miss: true
@ -661,7 +695,7 @@ jobs:
. venv/bin/activate . venv/bin/activate
python -m script.licenses extract --output-file=licenses-${{ matrix.python-version }}.json python -m script.licenses extract --output-file=licenses-${{ matrix.python-version }}.json
- name: Upload licenses - name: Upload licenses
uses: actions/upload-artifact@v4.6.0 uses: actions/upload-artifact@v4.6.2
with: with:
name: licenses-${{ github.run_number }}-${{ matrix.python-version }} name: licenses-${{ github.run_number }}-${{ matrix.python-version }}
path: licenses-${{ matrix.python-version }}.json path: licenses-${{ matrix.python-version }}.json
@ -686,13 +720,13 @@ jobs:
uses: actions/checkout@v4.2.2 uses: actions/checkout@v4.2.2
- name: Set up Python ${{ env.DEFAULT_PYTHON }} - name: Set up Python ${{ env.DEFAULT_PYTHON }}
id: python id: python
uses: actions/setup-python@v5.4.0 uses: actions/setup-python@v5.5.0
with: with:
python-version: ${{ env.DEFAULT_PYTHON }} python-version: ${{ env.DEFAULT_PYTHON }}
check-latest: true check-latest: true
- name: Restore full Python ${{ env.DEFAULT_PYTHON }} virtual environment - name: Restore full Python ${{ env.DEFAULT_PYTHON }} virtual environment
id: cache-venv id: cache-venv
uses: actions/cache/restore@v4.2.0 uses: actions/cache/restore@v4.2.3
with: with:
path: venv path: venv
fail-on-cache-miss: true fail-on-cache-miss: true
@ -733,13 +767,13 @@ jobs:
uses: actions/checkout@v4.2.2 uses: actions/checkout@v4.2.2
- name: Set up Python ${{ env.DEFAULT_PYTHON }} - name: Set up Python ${{ env.DEFAULT_PYTHON }}
id: python id: python
uses: actions/setup-python@v5.4.0 uses: actions/setup-python@v5.5.0
with: with:
python-version: ${{ env.DEFAULT_PYTHON }} python-version: ${{ env.DEFAULT_PYTHON }}
check-latest: true check-latest: true
- name: Restore full Python ${{ env.DEFAULT_PYTHON }} virtual environment - name: Restore full Python ${{ env.DEFAULT_PYTHON }} virtual environment
id: cache-venv id: cache-venv
uses: actions/cache/restore@v4.2.0 uses: actions/cache/restore@v4.2.3
with: with:
path: venv path: venv
fail-on-cache-miss: true fail-on-cache-miss: true
@ -778,7 +812,7 @@ jobs:
uses: actions/checkout@v4.2.2 uses: actions/checkout@v4.2.2
- name: Set up Python ${{ env.DEFAULT_PYTHON }} - name: Set up Python ${{ env.DEFAULT_PYTHON }}
id: python id: python
uses: actions/setup-python@v5.4.0 uses: actions/setup-python@v5.5.0
with: with:
python-version: ${{ env.DEFAULT_PYTHON }} python-version: ${{ env.DEFAULT_PYTHON }}
check-latest: true check-latest: true
@ -791,7 +825,7 @@ jobs:
env.HA_SHORT_VERSION }}-$(date -u '+%Y-%m-%dT%H:%M:%s')" >> $GITHUB_OUTPUT env.HA_SHORT_VERSION }}-$(date -u '+%Y-%m-%dT%H:%M:%s')" >> $GITHUB_OUTPUT
- name: Restore full Python ${{ env.DEFAULT_PYTHON }} virtual environment - name: Restore full Python ${{ env.DEFAULT_PYTHON }} virtual environment
id: cache-venv id: cache-venv
uses: actions/cache/restore@v4.2.0 uses: actions/cache/restore@v4.2.3
with: with:
path: venv path: venv
fail-on-cache-miss: true fail-on-cache-miss: true
@ -799,7 +833,7 @@ jobs:
${{ runner.os }}-${{ steps.python.outputs.python-version }}-${{ ${{ runner.os }}-${{ steps.python.outputs.python-version }}-${{
needs.info.outputs.python_cache_key }} needs.info.outputs.python_cache_key }}
- name: Restore mypy cache - name: Restore mypy cache
uses: actions/cache@v4.2.0 uses: actions/cache@v4.2.3
with: with:
path: .mypy_cache path: .mypy_cache
key: >- key: >-
@ -829,11 +863,7 @@ jobs:
prepare-pytest-full: prepare-pytest-full:
runs-on: ubuntu-24.04 runs-on: ubuntu-24.04
if: | if: |
(github.event_name != 'push' || github.event.repository.full_name == 'home-assistant/core') needs.info.outputs.lint_only != 'true'
&& github.event.inputs.lint-only != 'true'
&& github.event.inputs.pylint-only != 'true'
&& github.event.inputs.mypy-only != 'true'
&& github.event.inputs.audit-licenses-only != 'true'
&& needs.info.outputs.test_full_suite == 'true' && needs.info.outputs.test_full_suite == 'true'
needs: needs:
- info - info
@ -859,13 +889,13 @@ jobs:
uses: actions/checkout@v4.2.2 uses: actions/checkout@v4.2.2
- name: Set up Python ${{ env.DEFAULT_PYTHON }} - name: Set up Python ${{ env.DEFAULT_PYTHON }}
id: python id: python
uses: actions/setup-python@v5.4.0 uses: actions/setup-python@v5.5.0
with: with:
python-version: ${{ env.DEFAULT_PYTHON }} python-version: ${{ env.DEFAULT_PYTHON }}
check-latest: true check-latest: true
- name: Restore base Python virtual environment - name: Restore base Python virtual environment
id: cache-venv id: cache-venv
uses: actions/cache/restore@v4.2.0 uses: actions/cache/restore@v4.2.3
with: with:
path: venv path: venv
fail-on-cache-miss: true fail-on-cache-miss: true
@ -877,7 +907,7 @@ jobs:
. venv/bin/activate . venv/bin/activate
python -m script.split_tests ${{ needs.info.outputs.test_group_count }} tests python -m script.split_tests ${{ needs.info.outputs.test_group_count }} tests
- name: Upload pytest_buckets - name: Upload pytest_buckets
uses: actions/upload-artifact@v4.6.0 uses: actions/upload-artifact@v4.6.2
with: with:
name: pytest_buckets name: pytest_buckets
path: pytest_buckets.txt path: pytest_buckets.txt
@ -886,11 +916,7 @@ jobs:
pytest-full: pytest-full:
runs-on: ubuntu-24.04 runs-on: ubuntu-24.04
if: | if: |
(github.event_name != 'push' || github.event.repository.full_name == 'home-assistant/core') needs.info.outputs.lint_only != 'true'
&& github.event.inputs.lint-only != 'true'
&& github.event.inputs.pylint-only != 'true'
&& github.event.inputs.mypy-only != 'true'
&& github.event.inputs.audit-licenses-only != 'true'
&& needs.info.outputs.test_full_suite == 'true' && needs.info.outputs.test_full_suite == 'true'
needs: needs:
- info - info
@ -923,13 +949,13 @@ jobs:
uses: actions/checkout@v4.2.2 uses: actions/checkout@v4.2.2
- name: Set up Python ${{ matrix.python-version }} - name: Set up Python ${{ matrix.python-version }}
id: python id: python
uses: actions/setup-python@v5.4.0 uses: actions/setup-python@v5.5.0
with: with:
python-version: ${{ matrix.python-version }} python-version: ${{ matrix.python-version }}
check-latest: true check-latest: true
- name: Restore full Python ${{ matrix.python-version }} virtual environment - name: Restore full Python ${{ matrix.python-version }} virtual environment
id: cache-venv id: cache-venv
uses: actions/cache/restore@v4.2.0 uses: actions/cache/restore@v4.2.3
with: with:
path: venv path: venv
fail-on-cache-miss: true fail-on-cache-miss: true
@ -942,7 +968,7 @@ jobs:
run: | run: |
echo "::add-matcher::.github/workflows/matchers/pytest-slow.json" echo "::add-matcher::.github/workflows/matchers/pytest-slow.json"
- name: Download pytest_buckets - name: Download pytest_buckets
uses: actions/download-artifact@v4.1.8 uses: actions/download-artifact@v4.2.1
with: with:
name: pytest_buckets name: pytest_buckets
- name: Compile English translations - name: Compile English translations
@ -962,6 +988,7 @@ jobs:
if [[ "${{ needs.info.outputs.skip_coverage }}" != "true" ]]; then if [[ "${{ needs.info.outputs.skip_coverage }}" != "true" ]]; then
cov_params+=(--cov="homeassistant") cov_params+=(--cov="homeassistant")
cov_params+=(--cov-report=xml) cov_params+=(--cov-report=xml)
cov_params+=(--junitxml=junit.xml -o junit_family=legacy)
fi fi
echo "Test group ${{ matrix.group }}: $(sed -n "${{ matrix.group }},1p" pytest_buckets.txt)" echo "Test group ${{ matrix.group }}: $(sed -n "${{ matrix.group }},1p" pytest_buckets.txt)"
@ -980,18 +1007,24 @@ jobs:
2>&1 | tee pytest-${{ matrix.python-version }}-${{ matrix.group }}.txt 2>&1 | tee pytest-${{ matrix.python-version }}-${{ matrix.group }}.txt
- name: Upload pytest output - name: Upload pytest output
if: success() || failure() && steps.pytest-full.conclusion == 'failure' if: success() || failure() && steps.pytest-full.conclusion == 'failure'
uses: actions/upload-artifact@v4.6.0 uses: actions/upload-artifact@v4.6.2
with: with:
name: pytest-${{ github.run_number }}-${{ matrix.python-version }}-${{ matrix.group }} name: pytest-${{ github.run_number }}-${{ matrix.python-version }}-${{ matrix.group }}
path: pytest-*.txt path: pytest-*.txt
overwrite: true overwrite: true
- name: Upload coverage artifact - name: Upload coverage artifact
if: needs.info.outputs.skip_coverage != 'true' if: needs.info.outputs.skip_coverage != 'true'
uses: actions/upload-artifact@v4.6.0 uses: actions/upload-artifact@v4.6.2
with: with:
name: coverage-${{ matrix.python-version }}-${{ matrix.group }} name: coverage-${{ matrix.python-version }}-${{ matrix.group }}
path: coverage.xml path: coverage.xml
overwrite: true overwrite: true
- name: Upload test results artifact
if: needs.info.outputs.skip_coverage != 'true' && !cancelled()
uses: actions/upload-artifact@v4.6.2
with:
name: test-results-full-${{ matrix.python-version }}-${{ matrix.group }}
path: junit.xml
- name: Remove pytest_buckets - name: Remove pytest_buckets
run: rm pytest_buckets.txt run: rm pytest_buckets.txt
- name: Check dirty - name: Check dirty
@ -1009,11 +1042,7 @@ jobs:
MYSQL_ROOT_PASSWORD: password MYSQL_ROOT_PASSWORD: password
options: --health-cmd="mysqladmin ping -uroot -ppassword" --health-interval=5s --health-timeout=2s --health-retries=3 options: --health-cmd="mysqladmin ping -uroot -ppassword" --health-interval=5s --health-timeout=2s --health-retries=3
if: | if: |
(github.event_name != 'push' || github.event.repository.full_name == 'home-assistant/core') needs.info.outputs.lint_only != 'true'
&& github.event.inputs.lint-only != 'true'
&& github.event.inputs.pylint-only != 'true'
&& github.event.inputs.mypy-only != 'true'
&& github.event.inputs.audit-licenses-only != 'true'
&& needs.info.outputs.mariadb_groups != '[]' && needs.info.outputs.mariadb_groups != '[]'
needs: needs:
- info - info
@ -1045,13 +1074,13 @@ jobs:
uses: actions/checkout@v4.2.2 uses: actions/checkout@v4.2.2
- name: Set up Python ${{ matrix.python-version }} - name: Set up Python ${{ matrix.python-version }}
id: python id: python
uses: actions/setup-python@v5.4.0 uses: actions/setup-python@v5.5.0
with: with:
python-version: ${{ matrix.python-version }} python-version: ${{ matrix.python-version }}
check-latest: true check-latest: true
- name: Restore full Python ${{ matrix.python-version }} virtual environment - name: Restore full Python ${{ matrix.python-version }} virtual environment
id: cache-venv id: cache-venv
uses: actions/cache/restore@v4.2.0 uses: actions/cache/restore@v4.2.3
with: with:
path: venv path: venv
fail-on-cache-miss: true fail-on-cache-miss: true
@ -1088,6 +1117,7 @@ jobs:
cov_params+=(--cov="homeassistant.components.recorder") cov_params+=(--cov="homeassistant.components.recorder")
cov_params+=(--cov-report=xml) cov_params+=(--cov-report=xml)
cov_params+=(--cov-report=term-missing) cov_params+=(--cov-report=term-missing)
cov_params+=(--junitxml=junit.xml -o junit_family=legacy)
fi fi
python3 -b -X dev -m pytest \ python3 -b -X dev -m pytest \
@ -1108,7 +1138,7 @@ jobs:
2>&1 | tee pytest-${{ matrix.python-version }}-${mariadb}.txt 2>&1 | tee pytest-${{ matrix.python-version }}-${mariadb}.txt
- name: Upload pytest output - name: Upload pytest output
if: success() || failure() && steps.pytest-partial.conclusion == 'failure' if: success() || failure() && steps.pytest-partial.conclusion == 'failure'
uses: actions/upload-artifact@v4.6.0 uses: actions/upload-artifact@v4.6.2
with: with:
name: pytest-${{ github.run_number }}-${{ matrix.python-version }}-${{ name: pytest-${{ github.run_number }}-${{ matrix.python-version }}-${{
steps.pytest-partial.outputs.mariadb }} steps.pytest-partial.outputs.mariadb }}
@ -1116,12 +1146,19 @@ jobs:
overwrite: true overwrite: true
- name: Upload coverage artifact - name: Upload coverage artifact
if: needs.info.outputs.skip_coverage != 'true' if: needs.info.outputs.skip_coverage != 'true'
uses: actions/upload-artifact@v4.6.0 uses: actions/upload-artifact@v4.6.2
with: with:
name: coverage-${{ matrix.python-version }}-${{ name: coverage-${{ matrix.python-version }}-${{
steps.pytest-partial.outputs.mariadb }} steps.pytest-partial.outputs.mariadb }}
path: coverage.xml path: coverage.xml
overwrite: true overwrite: true
- name: Upload test results artifact
if: needs.info.outputs.skip_coverage != 'true' && !cancelled()
uses: actions/upload-artifact@v4.6.2
with:
name: test-results-mariadb-${{ matrix.python-version }}-${{
steps.pytest-partial.outputs.mariadb }}
path: junit.xml
- name: Check dirty - name: Check dirty
run: | run: |
./script/check_dirty ./script/check_dirty
@ -1137,11 +1174,7 @@ jobs:
POSTGRES_PASSWORD: password POSTGRES_PASSWORD: password
options: --health-cmd="pg_isready -hlocalhost -Upostgres" --health-interval=5s --health-timeout=2s --health-retries=3 options: --health-cmd="pg_isready -hlocalhost -Upostgres" --health-interval=5s --health-timeout=2s --health-retries=3
if: | if: |
(github.event_name != 'push' || github.event.repository.full_name == 'home-assistant/core') needs.info.outputs.lint_only != 'true'
&& github.event.inputs.lint-only != 'true'
&& github.event.inputs.pylint-only != 'true'
&& github.event.inputs.mypy-only != 'true'
&& github.event.inputs.audit-licenses-only != 'true'
&& needs.info.outputs.postgresql_groups != '[]' && needs.info.outputs.postgresql_groups != '[]'
needs: needs:
- info - info
@ -1175,13 +1208,13 @@ jobs:
uses: actions/checkout@v4.2.2 uses: actions/checkout@v4.2.2
- name: Set up Python ${{ matrix.python-version }} - name: Set up Python ${{ matrix.python-version }}
id: python id: python
uses: actions/setup-python@v5.4.0 uses: actions/setup-python@v5.5.0
with: with:
python-version: ${{ matrix.python-version }} python-version: ${{ matrix.python-version }}
check-latest: true check-latest: true
- name: Restore full Python ${{ matrix.python-version }} virtual environment - name: Restore full Python ${{ matrix.python-version }} virtual environment
id: cache-venv id: cache-venv
uses: actions/cache/restore@v4.2.0 uses: actions/cache/restore@v4.2.3
with: with:
path: venv path: venv
fail-on-cache-miss: true fail-on-cache-miss: true
@ -1218,6 +1251,7 @@ jobs:
cov_params+=(--cov="homeassistant.components.recorder") cov_params+=(--cov="homeassistant.components.recorder")
cov_params+=(--cov-report=xml) cov_params+=(--cov-report=xml)
cov_params+=(--cov-report=term-missing) cov_params+=(--cov-report=term-missing)
cov_params+=(--junitxml=junit.xml -o junit_family=legacy)
fi fi
python3 -b -X dev -m pytest \ python3 -b -X dev -m pytest \
@ -1239,7 +1273,7 @@ jobs:
2>&1 | tee pytest-${{ matrix.python-version }}-${postgresql}.txt 2>&1 | tee pytest-${{ matrix.python-version }}-${postgresql}.txt
- name: Upload pytest output - name: Upload pytest output
if: success() || failure() && steps.pytest-partial.conclusion == 'failure' if: success() || failure() && steps.pytest-partial.conclusion == 'failure'
uses: actions/upload-artifact@v4.6.0 uses: actions/upload-artifact@v4.6.2
with: with:
name: pytest-${{ github.run_number }}-${{ matrix.python-version }}-${{ name: pytest-${{ github.run_number }}-${{ matrix.python-version }}-${{
steps.pytest-partial.outputs.postgresql }} steps.pytest-partial.outputs.postgresql }}
@ -1247,12 +1281,19 @@ jobs:
overwrite: true overwrite: true
- name: Upload coverage artifact - name: Upload coverage artifact
if: needs.info.outputs.skip_coverage != 'true' if: needs.info.outputs.skip_coverage != 'true'
uses: actions/upload-artifact@v4.6.0 uses: actions/upload-artifact@v4.6.2
with: with:
name: coverage-${{ matrix.python-version }}-${{ name: coverage-${{ matrix.python-version }}-${{
steps.pytest-partial.outputs.postgresql }} steps.pytest-partial.outputs.postgresql }}
path: coverage.xml path: coverage.xml
overwrite: true overwrite: true
- name: Upload test results artifact
if: needs.info.outputs.skip_coverage != 'true' && !cancelled()
uses: actions/upload-artifact@v4.6.2
with:
name: test-results-postgres-${{ matrix.python-version }}-${{
steps.pytest-partial.outputs.postgresql }}
path: junit.xml
- name: Check dirty - name: Check dirty
run: | run: |
./script/check_dirty ./script/check_dirty
@ -1271,12 +1312,12 @@ jobs:
- name: Check out code from GitHub - name: Check out code from GitHub
uses: actions/checkout@v4.2.2 uses: actions/checkout@v4.2.2
- name: Download all coverage artifacts - name: Download all coverage artifacts
uses: actions/download-artifact@v4.1.8 uses: actions/download-artifact@v4.2.1
with: with:
pattern: coverage-* pattern: coverage-*
- name: Upload coverage to Codecov - name: Upload coverage to Codecov
if: needs.info.outputs.test_full_suite == 'true' if: needs.info.outputs.test_full_suite == 'true'
uses: codecov/codecov-action@v5.3.1 uses: codecov/codecov-action@v5.4.2
with: with:
fail_ci_if_error: true fail_ci_if_error: true
flags: full-suite flags: full-suite
@ -1285,11 +1326,7 @@ jobs:
pytest-partial: pytest-partial:
runs-on: ubuntu-24.04 runs-on: ubuntu-24.04
if: | if: |
(github.event_name != 'push' || github.event.repository.full_name == 'home-assistant/core') needs.info.outputs.lint_only != 'true'
&& github.event.inputs.lint-only != 'true'
&& github.event.inputs.pylint-only != 'true'
&& github.event.inputs.mypy-only != 'true'
&& github.event.inputs.audit-licenses-only != 'true'
&& needs.info.outputs.tests_glob && needs.info.outputs.tests_glob
&& needs.info.outputs.test_full_suite == 'false' && needs.info.outputs.test_full_suite == 'false'
needs: needs:
@ -1322,13 +1359,13 @@ jobs:
uses: actions/checkout@v4.2.2 uses: actions/checkout@v4.2.2
- name: Set up Python ${{ matrix.python-version }} - name: Set up Python ${{ matrix.python-version }}
id: python id: python
uses: actions/setup-python@v5.4.0 uses: actions/setup-python@v5.5.0
with: with:
python-version: ${{ matrix.python-version }} python-version: ${{ matrix.python-version }}
check-latest: true check-latest: true
- name: Restore full Python ${{ matrix.python-version }} virtual environment - name: Restore full Python ${{ matrix.python-version }} virtual environment
id: cache-venv id: cache-venv
uses: actions/cache/restore@v4.2.0 uses: actions/cache/restore@v4.2.3
with: with:
path: venv path: venv
fail-on-cache-miss: true fail-on-cache-miss: true
@ -1365,6 +1402,7 @@ jobs:
cov_params+=(--cov="homeassistant.components.${{ matrix.group }}") cov_params+=(--cov="homeassistant.components.${{ matrix.group }}")
cov_params+=(--cov-report=xml) cov_params+=(--cov-report=xml)
cov_params+=(--cov-report=term-missing) cov_params+=(--cov-report=term-missing)
cov_params+=(--junitxml=junit.xml -o junit_family=legacy)
fi fi
python3 -b -X dev -m pytest \ python3 -b -X dev -m pytest \
@ -1382,18 +1420,24 @@ jobs:
2>&1 | tee pytest-${{ matrix.python-version }}-${{ matrix.group }}.txt 2>&1 | tee pytest-${{ matrix.python-version }}-${{ matrix.group }}.txt
- name: Upload pytest output - name: Upload pytest output
if: success() || failure() && steps.pytest-partial.conclusion == 'failure' if: success() || failure() && steps.pytest-partial.conclusion == 'failure'
uses: actions/upload-artifact@v4.6.0 uses: actions/upload-artifact@v4.6.2
with: with:
name: pytest-${{ github.run_number }}-${{ matrix.python-version }}-${{ matrix.group }} name: pytest-${{ github.run_number }}-${{ matrix.python-version }}-${{ matrix.group }}
path: pytest-*.txt path: pytest-*.txt
overwrite: true overwrite: true
- name: Upload coverage artifact - name: Upload coverage artifact
if: needs.info.outputs.skip_coverage != 'true' if: needs.info.outputs.skip_coverage != 'true'
uses: actions/upload-artifact@v4.6.0 uses: actions/upload-artifact@v4.6.2
with: with:
name: coverage-${{ matrix.python-version }}-${{ matrix.group }} name: coverage-${{ matrix.python-version }}-${{ matrix.group }}
path: coverage.xml path: coverage.xml
overwrite: true overwrite: true
- name: Upload test results artifact
if: needs.info.outputs.skip_coverage != 'true' && !cancelled()
uses: actions/upload-artifact@v4.6.2
with:
name: test-results-partial-${{ matrix.python-version }}-${{ matrix.group }}
path: junit.xml
- name: Check dirty - name: Check dirty
run: | run: |
./script/check_dirty ./script/check_dirty
@ -1410,12 +1454,37 @@ jobs:
- name: Check out code from GitHub - name: Check out code from GitHub
uses: actions/checkout@v4.2.2 uses: actions/checkout@v4.2.2
- name: Download all coverage artifacts - name: Download all coverage artifacts
uses: actions/download-artifact@v4.1.8 uses: actions/download-artifact@v4.2.1
with: with:
pattern: coverage-* pattern: coverage-*
- name: Upload coverage to Codecov - name: Upload coverage to Codecov
if: needs.info.outputs.test_full_suite == 'false' if: needs.info.outputs.test_full_suite == 'false'
uses: codecov/codecov-action@v5.3.1 uses: codecov/codecov-action@v5.4.2
with: with:
fail_ci_if_error: true fail_ci_if_error: true
token: ${{ secrets.CODECOV_TOKEN }} token: ${{ secrets.CODECOV_TOKEN }}
upload-test-results:
name: Upload test results to Codecov
# codecov/test-results-action currently doesn't support tokenless uploads
# therefore we can't run it on forks
if: ${{ (github.event_name != 'pull_request' || !github.event.pull_request.head.repo.fork) && needs.info.outputs.skip_coverage != 'true' && !cancelled() }}
runs-on: ubuntu-24.04
needs:
- info
- pytest-partial
- pytest-full
- pytest-postgres
- pytest-mariadb
timeout-minutes: 10
steps:
- name: Download all coverage artifacts
uses: actions/download-artifact@v4.2.1
with:
pattern: test-results-*
- name: Upload test results to Codecov
uses: codecov/test-results-action@v1
with:
fail_ci_if_error: true
verbose: true
token: ${{ secrets.CODECOV_TOKEN }}

View File

@ -24,11 +24,11 @@ jobs:
uses: actions/checkout@v4.2.2 uses: actions/checkout@v4.2.2
- name: Initialize CodeQL - name: Initialize CodeQL
uses: github/codeql-action/init@v3.28.9 uses: github/codeql-action/init@v3.28.15
with: with:
languages: python languages: python
- name: Perform CodeQL Analysis - name: Perform CodeQL Analysis
uses: github/codeql-action/analyze@v3.28.9 uses: github/codeql-action/analyze@v3.28.15
with: with:
category: "/language:python" category: "/language:python"

View File

@ -22,7 +22,7 @@ jobs:
uses: actions/checkout@v4.2.2 uses: actions/checkout@v4.2.2
- name: Set up Python ${{ env.DEFAULT_PYTHON }} - name: Set up Python ${{ env.DEFAULT_PYTHON }}
uses: actions/setup-python@v5.4.0 uses: actions/setup-python@v5.5.0
with: with:
python-version: ${{ env.DEFAULT_PYTHON }} python-version: ${{ env.DEFAULT_PYTHON }}

View File

@ -36,7 +36,7 @@ jobs:
- name: Set up Python ${{ env.DEFAULT_PYTHON }} - name: Set up Python ${{ env.DEFAULT_PYTHON }}
id: python id: python
uses: actions/setup-python@v5.4.0 uses: actions/setup-python@v5.5.0
with: with:
python-version: ${{ env.DEFAULT_PYTHON }} python-version: ${{ env.DEFAULT_PYTHON }}
check-latest: true check-latest: true
@ -91,7 +91,7 @@ jobs:
) > build_constraints.txt ) > build_constraints.txt
- name: Upload env_file - name: Upload env_file
uses: actions/upload-artifact@v4.6.0 uses: actions/upload-artifact@v4.6.2
with: with:
name: env_file name: env_file
path: ./.env_file path: ./.env_file
@ -99,14 +99,14 @@ jobs:
overwrite: true overwrite: true
- name: Upload build_constraints - name: Upload build_constraints
uses: actions/upload-artifact@v4.6.0 uses: actions/upload-artifact@v4.6.2
with: with:
name: build_constraints name: build_constraints
path: ./build_constraints.txt path: ./build_constraints.txt
overwrite: true overwrite: true
- name: Upload requirements_diff - name: Upload requirements_diff
uses: actions/upload-artifact@v4.6.0 uses: actions/upload-artifact@v4.6.2
with: with:
name: requirements_diff name: requirements_diff
path: ./requirements_diff.txt path: ./requirements_diff.txt
@ -118,7 +118,7 @@ jobs:
python -m script.gen_requirements_all ci python -m script.gen_requirements_all ci
- name: Upload requirements_all_wheels - name: Upload requirements_all_wheels
uses: actions/upload-artifact@v4.6.0 uses: actions/upload-artifact@v4.6.2
with: with:
name: requirements_all_wheels name: requirements_all_wheels
path: ./requirements_all_wheels_*.txt path: ./requirements_all_wheels_*.txt
@ -138,17 +138,17 @@ jobs:
uses: actions/checkout@v4.2.2 uses: actions/checkout@v4.2.2
- name: Download env_file - name: Download env_file
uses: actions/download-artifact@v4.1.8 uses: actions/download-artifact@v4.2.1
with: with:
name: env_file name: env_file
- name: Download build_constraints - name: Download build_constraints
uses: actions/download-artifact@v4.1.8 uses: actions/download-artifact@v4.2.1
with: with:
name: build_constraints name: build_constraints
- name: Download requirements_diff - name: Download requirements_diff
uses: actions/download-artifact@v4.1.8 uses: actions/download-artifact@v4.2.1
with: with:
name: requirements_diff name: requirements_diff
@ -159,7 +159,7 @@ jobs:
sed -i "/uv/d" requirements_diff.txt sed -i "/uv/d" requirements_diff.txt
- name: Build wheels - name: Build wheels
uses: home-assistant/wheels@2024.11.0 uses: home-assistant/wheels@2025.03.0
with: with:
abi: ${{ matrix.abi }} abi: ${{ matrix.abi }}
tag: musllinux_1_2 tag: musllinux_1_2
@ -187,22 +187,22 @@ jobs:
uses: actions/checkout@v4.2.2 uses: actions/checkout@v4.2.2
- name: Download env_file - name: Download env_file
uses: actions/download-artifact@v4.1.8 uses: actions/download-artifact@v4.2.1
with: with:
name: env_file name: env_file
- name: Download build_constraints - name: Download build_constraints
uses: actions/download-artifact@v4.1.8 uses: actions/download-artifact@v4.2.1
with: with:
name: build_constraints name: build_constraints
- name: Download requirements_diff - name: Download requirements_diff
uses: actions/download-artifact@v4.1.8 uses: actions/download-artifact@v4.2.1
with: with:
name: requirements_diff name: requirements_diff
- name: Download requirements_all_wheels - name: Download requirements_all_wheels
uses: actions/download-artifact@v4.1.8 uses: actions/download-artifact@v4.2.1
with: with:
name: requirements_all_wheels name: requirements_all_wheels
@ -218,16 +218,8 @@ jobs:
sed -i "/uv/d" requirements.txt sed -i "/uv/d" requirements.txt
sed -i "/uv/d" requirements_diff.txt sed -i "/uv/d" requirements_diff.txt
- name: Split requirements all - name: Build wheels
run: | uses: home-assistant/wheels@2025.03.0
# We split requirements all into multiple files.
# This is to prevent the build from running out of memory when
# resolving packages on 32-bits systems (like armhf, armv7).
split -l $(expr $(expr $(cat requirements_all.txt | wc -l) + 1) / 3) requirements_all_wheels_${{ matrix.arch }}.txt requirements_all.txt
- name: Build wheels (part 1)
uses: home-assistant/wheels@2024.11.0
with: with:
abi: ${{ matrix.abi }} abi: ${{ matrix.abi }}
tag: musllinux_1_2 tag: musllinux_1_2
@ -238,32 +230,4 @@ jobs:
skip-binary: aiohttp;charset-normalizer;grpcio;multidict;SQLAlchemy;propcache;protobuf;pymicro-vad;yarl skip-binary: aiohttp;charset-normalizer;grpcio;multidict;SQLAlchemy;propcache;protobuf;pymicro-vad;yarl
constraints: "homeassistant/package_constraints.txt" constraints: "homeassistant/package_constraints.txt"
requirements-diff: "requirements_diff.txt" requirements-diff: "requirements_diff.txt"
requirements: "requirements_all.txtaa" requirements: "requirements_all.txt"
- name: Build wheels (part 2)
uses: home-assistant/wheels@2024.11.0
with:
abi: ${{ matrix.abi }}
tag: musllinux_1_2
arch: ${{ matrix.arch }}
wheels-key: ${{ secrets.WHEELS_KEY }}
env-file: true
apk: "bluez-dev;libffi-dev;openssl-dev;glib-dev;eudev-dev;libxml2-dev;libxslt-dev;libpng-dev;libjpeg-turbo-dev;tiff-dev;cups-dev;gmp-dev;mpfr-dev;mpc1-dev;ffmpeg-dev;gammu-dev;yaml-dev;openblas-dev;fftw-dev;lapack-dev;gfortran;blas-dev;eigen-dev;freetype-dev;glew-dev;harfbuzz-dev;hdf5-dev;libdc1394-dev;libtbb-dev;mesa-dev;openexr-dev;openjpeg-dev;uchardet-dev;nasm;zlib-ng-dev"
skip-binary: aiohttp;charset-normalizer;grpcio;multidict;SQLAlchemy;propcache;protobuf;pymicro-vad;yarl
constraints: "homeassistant/package_constraints.txt"
requirements-diff: "requirements_diff.txt"
requirements: "requirements_all.txtab"
- name: Build wheels (part 3)
uses: home-assistant/wheels@2024.11.0
with:
abi: ${{ matrix.abi }}
tag: musllinux_1_2
arch: ${{ matrix.arch }}
wheels-key: ${{ secrets.WHEELS_KEY }}
env-file: true
apk: "bluez-dev;libffi-dev;openssl-dev;glib-dev;eudev-dev;libxml2-dev;libxslt-dev;libpng-dev;libjpeg-turbo-dev;tiff-dev;cups-dev;gmp-dev;mpfr-dev;mpc1-dev;ffmpeg-dev;gammu-dev;yaml-dev;openblas-dev;fftw-dev;lapack-dev;gfortran;blas-dev;eigen-dev;freetype-dev;glew-dev;harfbuzz-dev;hdf5-dev;libdc1394-dev;libtbb-dev;mesa-dev;openexr-dev;openjpeg-dev;uchardet-dev;nasm;zlib-ng-dev"
skip-binary: aiohttp;charset-normalizer;grpcio;multidict;SQLAlchemy;propcache;protobuf;pymicro-vad;yarl
constraints: "homeassistant/package_constraints.txt"
requirements-diff: "requirements_diff.txt"
requirements: "requirements_all.txtac"

1
.gitignore vendored
View File

@ -69,6 +69,7 @@ test-reports/
test-results.xml test-results.xml
test-output.xml test-output.xml
pytest-*.txt pytest-*.txt
junit.xml
# Translations # Translations
*.mo *.mo

View File

@ -1,6 +1,6 @@
repos: repos:
- repo: https://github.com/astral-sh/ruff-pre-commit - repo: https://github.com/astral-sh/ruff-pre-commit
rev: v0.9.1 rev: v0.11.0
hooks: hooks:
- id: ruff - id: ruff
args: args:

View File

@ -103,6 +103,7 @@ homeassistant.components.auth.*
homeassistant.components.automation.* homeassistant.components.automation.*
homeassistant.components.awair.* homeassistant.components.awair.*
homeassistant.components.axis.* homeassistant.components.axis.*
homeassistant.components.azure_storage.*
homeassistant.components.backup.* homeassistant.components.backup.*
homeassistant.components.baf.* homeassistant.components.baf.*
homeassistant.components.bang_olufsen.* homeassistant.components.bang_olufsen.*
@ -118,6 +119,7 @@ homeassistant.components.bluetooth_adapters.*
homeassistant.components.bluetooth_tracker.* homeassistant.components.bluetooth_tracker.*
homeassistant.components.bmw_connected_drive.* homeassistant.components.bmw_connected_drive.*
homeassistant.components.bond.* homeassistant.components.bond.*
homeassistant.components.bosch_alarm.*
homeassistant.components.braviatv.* homeassistant.components.braviatv.*
homeassistant.components.bring.* homeassistant.components.bring.*
homeassistant.components.brother.* homeassistant.components.brother.*
@ -135,6 +137,7 @@ homeassistant.components.clicksend.*
homeassistant.components.climate.* homeassistant.components.climate.*
homeassistant.components.cloud.* homeassistant.components.cloud.*
homeassistant.components.co2signal.* homeassistant.components.co2signal.*
homeassistant.components.comelit.*
homeassistant.components.command_line.* homeassistant.components.command_line.*
homeassistant.components.config.* homeassistant.components.config.*
homeassistant.components.configurator.* homeassistant.components.configurator.*
@ -234,6 +237,7 @@ homeassistant.components.here_travel_time.*
homeassistant.components.history.* homeassistant.components.history.*
homeassistant.components.history_stats.* homeassistant.components.history_stats.*
homeassistant.components.holiday.* homeassistant.components.holiday.*
homeassistant.components.home_connect.*
homeassistant.components.homeassistant.* homeassistant.components.homeassistant.*
homeassistant.components.homeassistant_alerts.* homeassistant.components.homeassistant_alerts.*
homeassistant.components.homeassistant_green.* homeassistant.components.homeassistant_green.*
@ -287,6 +291,7 @@ homeassistant.components.kaleidescape.*
homeassistant.components.knocki.* homeassistant.components.knocki.*
homeassistant.components.knx.* homeassistant.components.knx.*
homeassistant.components.kraken.* homeassistant.components.kraken.*
homeassistant.components.kulersky.*
homeassistant.components.lacrosse.* homeassistant.components.lacrosse.*
homeassistant.components.lacrosse_view.* homeassistant.components.lacrosse_view.*
homeassistant.components.lamarzocco.* homeassistant.components.lamarzocco.*
@ -360,6 +365,7 @@ homeassistant.components.notify.*
homeassistant.components.notion.* homeassistant.components.notion.*
homeassistant.components.number.* homeassistant.components.number.*
homeassistant.components.nut.* homeassistant.components.nut.*
homeassistant.components.ohme.*
homeassistant.components.onboarding.* homeassistant.components.onboarding.*
homeassistant.components.oncue.* homeassistant.components.oncue.*
homeassistant.components.onedrive.* homeassistant.components.onedrive.*
@ -394,6 +400,7 @@ homeassistant.components.pure_energie.*
homeassistant.components.purpleair.* homeassistant.components.purpleair.*
homeassistant.components.pushbullet.* homeassistant.components.pushbullet.*
homeassistant.components.pvoutput.* homeassistant.components.pvoutput.*
homeassistant.components.pyload.*
homeassistant.components.python_script.* homeassistant.components.python_script.*
homeassistant.components.qbus.* homeassistant.components.qbus.*
homeassistant.components.qnap_qsw.* homeassistant.components.qnap_qsw.*
@ -406,7 +413,9 @@ homeassistant.components.raspberry_pi.*
homeassistant.components.rdw.* homeassistant.components.rdw.*
homeassistant.components.recollect_waste.* homeassistant.components.recollect_waste.*
homeassistant.components.recorder.* homeassistant.components.recorder.*
homeassistant.components.remember_the_milk.*
homeassistant.components.remote.* homeassistant.components.remote.*
homeassistant.components.remote_calendar.*
homeassistant.components.renault.* homeassistant.components.renault.*
homeassistant.components.reolink.* homeassistant.components.reolink.*
homeassistant.components.repairs.* homeassistant.components.repairs.*
@ -437,6 +446,7 @@ homeassistant.components.select.*
homeassistant.components.sensibo.* homeassistant.components.sensibo.*
homeassistant.components.sensirion_ble.* homeassistant.components.sensirion_ble.*
homeassistant.components.sensor.* homeassistant.components.sensor.*
homeassistant.components.sensorpush_cloud.*
homeassistant.components.sensoterra.* homeassistant.components.sensoterra.*
homeassistant.components.senz.* homeassistant.components.senz.*
homeassistant.components.sfr_box.* homeassistant.components.sfr_box.*
@ -524,6 +534,7 @@ homeassistant.components.vallox.*
homeassistant.components.valve.* homeassistant.components.valve.*
homeassistant.components.velbus.* homeassistant.components.velbus.*
homeassistant.components.vlc_telnet.* homeassistant.components.vlc_telnet.*
homeassistant.components.vodafone_station.*
homeassistant.components.wake_on_lan.* homeassistant.components.wake_on_lan.*
homeassistant.components.wake_word.* homeassistant.components.wake_word.*
homeassistant.components.wallbox.* homeassistant.components.wallbox.*

11
.vscode/launch.json vendored
View File

@ -38,10 +38,17 @@
"module": "pytest", "module": "pytest",
"justMyCode": false, "justMyCode": false,
"args": [ "args": [
"--timeout=10",
"--picked" "--picked"
], ],
}, },
{
"name": "Home Assistant: Debug Current Test File",
"type": "debugpy",
"request": "launch",
"module": "pytest",
"console": "integratedTerminal",
"args": ["-vv", "${file}"]
},
{ {
// Debug by attaching to local Home Assistant server using Remote Python Debugger. // Debug by attaching to local Home Assistant server using Remote Python Debugger.
// See https://www.home-assistant.io/integrations/debugpy/ // See https://www.home-assistant.io/integrations/debugpy/
@ -77,4 +84,4 @@
] ]
} }
] ]
} }

2
.vscode/tasks.json vendored
View File

@ -4,7 +4,7 @@
{ {
"label": "Run Home Assistant Core", "label": "Run Home Assistant Core",
"type": "shell", "type": "shell",
"command": "hass -c ./config", "command": "${command:python.interpreterPath} -m homeassistant -c ./config",
"group": "test", "group": "test",
"presentation": { "presentation": {
"reveal": "always", "reveal": "always",

45
CODEOWNERS generated
View File

@ -180,6 +180,8 @@ build.json @home-assistant/supervisor
/homeassistant/components/azure_event_hub/ @eavanvalkenburg /homeassistant/components/azure_event_hub/ @eavanvalkenburg
/tests/components/azure_event_hub/ @eavanvalkenburg /tests/components/azure_event_hub/ @eavanvalkenburg
/homeassistant/components/azure_service_bus/ @hfurubotten /homeassistant/components/azure_service_bus/ @hfurubotten
/homeassistant/components/azure_storage/ @zweckj
/tests/components/azure_storage/ @zweckj
/homeassistant/components/backup/ @home-assistant/core /homeassistant/components/backup/ @home-assistant/core
/tests/components/backup/ @home-assistant/core /tests/components/backup/ @home-assistant/core
/homeassistant/components/baf/ @bdraco @jfroy /homeassistant/components/baf/ @bdraco @jfroy
@ -214,6 +216,8 @@ build.json @home-assistant/supervisor
/tests/components/bmw_connected_drive/ @gerard33 @rikroe /tests/components/bmw_connected_drive/ @gerard33 @rikroe
/homeassistant/components/bond/ @bdraco @prystupa @joshs85 @marciogranzotto /homeassistant/components/bond/ @bdraco @prystupa @joshs85 @marciogranzotto
/tests/components/bond/ @bdraco @prystupa @joshs85 @marciogranzotto /tests/components/bond/ @bdraco @prystupa @joshs85 @marciogranzotto
/homeassistant/components/bosch_alarm/ @mag1024 @sanjay900
/tests/components/bosch_alarm/ @mag1024 @sanjay900
/homeassistant/components/bosch_shc/ @tschamm /homeassistant/components/bosch_shc/ @tschamm
/tests/components/bosch_shc/ @tschamm /tests/components/bosch_shc/ @tschamm
/homeassistant/components/braviatv/ @bieniu @Drafteed /homeassistant/components/braviatv/ @bieniu @Drafteed
@ -428,7 +432,7 @@ build.json @home-assistant/supervisor
/homeassistant/components/entur_public_transport/ @hfurubotten /homeassistant/components/entur_public_transport/ @hfurubotten
/homeassistant/components/environment_canada/ @gwww @michaeldavie /homeassistant/components/environment_canada/ @gwww @michaeldavie
/tests/components/environment_canada/ @gwww @michaeldavie /tests/components/environment_canada/ @gwww @michaeldavie
/homeassistant/components/ephember/ @ttroy50 /homeassistant/components/ephember/ @ttroy50 @roberty99
/homeassistant/components/epic_games_store/ @hacf-fr @Quentame /homeassistant/components/epic_games_store/ @hacf-fr @Quentame
/tests/components/epic_games_store/ @hacf-fr @Quentame /tests/components/epic_games_store/ @hacf-fr @Quentame
/homeassistant/components/epion/ @lhgravendeel /homeassistant/components/epion/ @lhgravendeel
@ -568,8 +572,8 @@ build.json @home-assistant/supervisor
/tests/components/google_cloud/ @lufton @tronikos /tests/components/google_cloud/ @lufton @tronikos
/homeassistant/components/google_drive/ @tronikos /homeassistant/components/google_drive/ @tronikos
/tests/components/google_drive/ @tronikos /tests/components/google_drive/ @tronikos
/homeassistant/components/google_generative_ai_conversation/ @tronikos /homeassistant/components/google_generative_ai_conversation/ @tronikos @ivanlh
/tests/components/google_generative_ai_conversation/ @tronikos /tests/components/google_generative_ai_conversation/ @tronikos @ivanlh
/homeassistant/components/google_mail/ @tkdrob /homeassistant/components/google_mail/ @tkdrob
/tests/components/google_mail/ @tkdrob /tests/components/google_mail/ @tkdrob
/homeassistant/components/google_photos/ @allenporter /homeassistant/components/google_photos/ @allenporter
@ -700,6 +704,8 @@ build.json @home-assistant/supervisor
/tests/components/image_upload/ @home-assistant/core /tests/components/image_upload/ @home-assistant/core
/homeassistant/components/imap/ @jbouwh /homeassistant/components/imap/ @jbouwh
/tests/components/imap/ @jbouwh /tests/components/imap/ @jbouwh
/homeassistant/components/imeon_inverter/ @Imeon-Energy
/tests/components/imeon_inverter/ @Imeon-Energy
/homeassistant/components/imgw_pib/ @bieniu /homeassistant/components/imgw_pib/ @bieniu
/tests/components/imgw_pib/ @bieniu /tests/components/imgw_pib/ @bieniu
/homeassistant/components/improv_ble/ @emontnemery /homeassistant/components/improv_ble/ @emontnemery
@ -931,6 +937,8 @@ build.json @home-assistant/supervisor
/tests/components/metoffice/ @MrHarcombe @avee87 /tests/components/metoffice/ @MrHarcombe @avee87
/homeassistant/components/microbees/ @microBeesTech /homeassistant/components/microbees/ @microBeesTech
/tests/components/microbees/ @microBeesTech /tests/components/microbees/ @microBeesTech
/homeassistant/components/miele/ @astrandb
/tests/components/miele/ @astrandb
/homeassistant/components/mikrotik/ @engrbm87 /homeassistant/components/mikrotik/ @engrbm87
/tests/components/mikrotik/ @engrbm87 /tests/components/mikrotik/ @engrbm87
/homeassistant/components/mill/ @danielhiversen /homeassistant/components/mill/ @danielhiversen
@ -967,8 +975,8 @@ build.json @home-assistant/supervisor
/tests/components/motionblinds_ble/ @LennP @jerrybboy /tests/components/motionblinds_ble/ @LennP @jerrybboy
/homeassistant/components/motioneye/ @dermotduffy /homeassistant/components/motioneye/ @dermotduffy
/tests/components/motioneye/ @dermotduffy /tests/components/motioneye/ @dermotduffy
/homeassistant/components/motionmount/ @RJPoelstra /homeassistant/components/motionmount/ @laiho-vogels
/tests/components/motionmount/ @RJPoelstra /tests/components/motionmount/ @laiho-vogels
/homeassistant/components/mqtt/ @emontnemery @jbouwh @bdraco /homeassistant/components/mqtt/ @emontnemery @jbouwh @bdraco
/tests/components/mqtt/ @emontnemery @jbouwh @bdraco /tests/components/mqtt/ @emontnemery @jbouwh @bdraco
/homeassistant/components/msteams/ @peroyvind /homeassistant/components/msteams/ @peroyvind
@ -1051,8 +1059,8 @@ build.json @home-assistant/supervisor
/tests/components/numato/ @clssn /tests/components/numato/ @clssn
/homeassistant/components/number/ @home-assistant/core @Shulyaka /homeassistant/components/number/ @home-assistant/core @Shulyaka
/tests/components/number/ @home-assistant/core @Shulyaka /tests/components/number/ @home-assistant/core @Shulyaka
/homeassistant/components/nut/ @bdraco @ollo69 @pestevez /homeassistant/components/nut/ @bdraco @ollo69 @pestevez @tdfountain
/tests/components/nut/ @bdraco @ollo69 @pestevez /tests/components/nut/ @bdraco @ollo69 @pestevez @tdfountain
/homeassistant/components/nws/ @MatthewFlamm @kamiyo /homeassistant/components/nws/ @MatthewFlamm @kamiyo
/tests/components/nws/ @MatthewFlamm @kamiyo /tests/components/nws/ @MatthewFlamm @kamiyo
/homeassistant/components/nyt_games/ @joostlek /homeassistant/components/nyt_games/ @joostlek
@ -1144,8 +1152,8 @@ build.json @home-assistant/supervisor
/tests/components/philips_js/ @elupus /tests/components/philips_js/ @elupus
/homeassistant/components/pi_hole/ @shenxn /homeassistant/components/pi_hole/ @shenxn
/tests/components/pi_hole/ @shenxn /tests/components/pi_hole/ @shenxn
/homeassistant/components/picnic/ @corneyl /homeassistant/components/picnic/ @corneyl @codesalatdev
/tests/components/picnic/ @corneyl /tests/components/picnic/ @corneyl @codesalatdev
/homeassistant/components/ping/ @jpbede /homeassistant/components/ping/ @jpbede
/tests/components/ping/ @jpbede /tests/components/ping/ @jpbede
/homeassistant/components/plaato/ @JohNan /homeassistant/components/plaato/ @JohNan
@ -1181,6 +1189,8 @@ build.json @home-assistant/supervisor
/tests/components/prusalink/ @balloob /tests/components/prusalink/ @balloob
/homeassistant/components/ps4/ @ktnrg45 /homeassistant/components/ps4/ @ktnrg45
/tests/components/ps4/ @ktnrg45 /tests/components/ps4/ @ktnrg45
/homeassistant/components/pterodactyl/ @elmurato
/tests/components/pterodactyl/ @elmurato
/homeassistant/components/pure_energie/ @klaasnicolaas /homeassistant/components/pure_energie/ @klaasnicolaas
/tests/components/pure_energie/ @klaasnicolaas /tests/components/pure_energie/ @klaasnicolaas
/homeassistant/components/purpleair/ @bachya /homeassistant/components/purpleair/ @bachya
@ -1250,6 +1260,8 @@ build.json @home-assistant/supervisor
/tests/components/refoss/ @ashionky /tests/components/refoss/ @ashionky
/homeassistant/components/remote/ @home-assistant/core /homeassistant/components/remote/ @home-assistant/core
/tests/components/remote/ @home-assistant/core /tests/components/remote/ @home-assistant/core
/homeassistant/components/remote_calendar/ @Thomas55555
/tests/components/remote_calendar/ @Thomas55555
/homeassistant/components/renault/ @epenet /homeassistant/components/renault/ @epenet
/tests/components/renault/ @epenet /tests/components/renault/ @epenet
/homeassistant/components/renson/ @jimmyd-be /homeassistant/components/renson/ @jimmyd-be
@ -1342,6 +1354,8 @@ build.json @home-assistant/supervisor
/tests/components/sensorpro/ @bdraco /tests/components/sensorpro/ @bdraco
/homeassistant/components/sensorpush/ @bdraco /homeassistant/components/sensorpush/ @bdraco
/tests/components/sensorpush/ @bdraco /tests/components/sensorpush/ @bdraco
/homeassistant/components/sensorpush_cloud/ @sstallion
/tests/components/sensorpush_cloud/ @sstallion
/homeassistant/components/sensoterra/ @markruys /homeassistant/components/sensoterra/ @markruys
/tests/components/sensoterra/ @markruys /tests/components/sensoterra/ @markruys
/homeassistant/components/sentry/ @dcramer @frenck /homeassistant/components/sentry/ @dcramer @frenck
@ -1377,7 +1391,6 @@ build.json @home-assistant/supervisor
/homeassistant/components/siren/ @home-assistant/core @raman325 /homeassistant/components/siren/ @home-assistant/core @raman325
/tests/components/siren/ @home-assistant/core @raman325 /tests/components/siren/ @home-assistant/core @raman325
/homeassistant/components/sisyphus/ @jkeljo /homeassistant/components/sisyphus/ @jkeljo
/homeassistant/components/sky_hub/ @rogerselwyn
/homeassistant/components/sky_remote/ @dunnmj @saty9 /homeassistant/components/sky_remote/ @dunnmj @saty9
/tests/components/sky_remote/ @dunnmj @saty9 /tests/components/sky_remote/ @dunnmj @saty9
/homeassistant/components/skybell/ @tkdrob /homeassistant/components/skybell/ @tkdrob
@ -1397,6 +1410,8 @@ build.json @home-assistant/supervisor
/tests/components/smappee/ @bsmappee /tests/components/smappee/ @bsmappee
/homeassistant/components/smart_meter_texas/ @grahamwetzler /homeassistant/components/smart_meter_texas/ @grahamwetzler
/tests/components/smart_meter_texas/ @grahamwetzler /tests/components/smart_meter_texas/ @grahamwetzler
/homeassistant/components/smartthings/ @joostlek
/tests/components/smartthings/ @joostlek
/homeassistant/components/smarttub/ @mdz /homeassistant/components/smarttub/ @mdz
/tests/components/smarttub/ @mdz /tests/components/smarttub/ @mdz
/homeassistant/components/smarty/ @z0mbieprocess /homeassistant/components/smarty/ @z0mbieprocess
@ -1411,6 +1426,8 @@ build.json @home-assistant/supervisor
/tests/components/snapcast/ @luar123 /tests/components/snapcast/ @luar123
/homeassistant/components/snmp/ @nmaggioni /homeassistant/components/snmp/ @nmaggioni
/tests/components/snmp/ @nmaggioni /tests/components/snmp/ @nmaggioni
/homeassistant/components/snoo/ @Lash-L
/tests/components/snoo/ @Lash-L
/homeassistant/components/snooz/ @AustinBrunkhorst /homeassistant/components/snooz/ @AustinBrunkhorst
/tests/components/snooz/ @AustinBrunkhorst /tests/components/snooz/ @AustinBrunkhorst
/homeassistant/components/solaredge/ @frenck @bdraco /homeassistant/components/solaredge/ @frenck @bdraco
@ -1466,8 +1483,6 @@ build.json @home-assistant/supervisor
/tests/components/suez_water/ @ooii @jb101010-2 /tests/components/suez_water/ @ooii @jb101010-2
/homeassistant/components/sun/ @Swamp-Ig /homeassistant/components/sun/ @Swamp-Ig
/tests/components/sun/ @Swamp-Ig /tests/components/sun/ @Swamp-Ig
/homeassistant/components/sunweg/ @rokam
/tests/components/sunweg/ @rokam
/homeassistant/components/supla/ @mwegrzynek /homeassistant/components/supla/ @mwegrzynek
/homeassistant/components/surepetcare/ @benleb @danielhiversen /homeassistant/components/surepetcare/ @benleb @danielhiversen
/tests/components/surepetcare/ @benleb @danielhiversen /tests/components/surepetcare/ @benleb @danielhiversen
@ -1521,8 +1536,8 @@ build.json @home-assistant/supervisor
/tests/components/tedee/ @patrickhilker @zweckj /tests/components/tedee/ @patrickhilker @zweckj
/homeassistant/components/tellduslive/ @fredrike /homeassistant/components/tellduslive/ @fredrike
/tests/components/tellduslive/ @fredrike /tests/components/tellduslive/ @fredrike
/homeassistant/components/template/ @PhracturedBlue @home-assistant/core /homeassistant/components/template/ @Petro31 @PhracturedBlue @home-assistant/core
/tests/components/template/ @PhracturedBlue @home-assistant/core /tests/components/template/ @Petro31 @PhracturedBlue @home-assistant/core
/homeassistant/components/tesla_fleet/ @Bre77 /homeassistant/components/tesla_fleet/ @Bre77
/tests/components/tesla_fleet/ @Bre77 /tests/components/tesla_fleet/ @Bre77
/homeassistant/components/tesla_wall_connector/ @einarhauks /homeassistant/components/tesla_wall_connector/ @einarhauks
@ -1691,6 +1706,8 @@ build.json @home-assistant/supervisor
/tests/components/weatherflow_cloud/ @jeeftor /tests/components/weatherflow_cloud/ @jeeftor
/homeassistant/components/weatherkit/ @tjhorner /homeassistant/components/weatherkit/ @tjhorner
/tests/components/weatherkit/ @tjhorner /tests/components/weatherkit/ @tjhorner
/homeassistant/components/webdav/ @jpbede
/tests/components/webdav/ @jpbede
/homeassistant/components/webhook/ @home-assistant/core /homeassistant/components/webhook/ @home-assistant/core
/tests/components/webhook/ @home-assistant/core /tests/components/webhook/ @home-assistant/core
/homeassistant/components/webmin/ @autinerd /homeassistant/components/webmin/ @autinerd

38
Dockerfile generated
View File

@ -12,8 +12,26 @@ ENV \
ARG QEMU_CPU ARG QEMU_CPU
# Home Assistant S6-Overlay
COPY rootfs /
# Needs to be redefined inside the FROM statement to be set for RUN commands
ARG BUILD_ARCH
# Get go2rtc binary
RUN \
case "${BUILD_ARCH}" in \
"aarch64") go2rtc_suffix='arm64' ;; \
"armhf") go2rtc_suffix='armv6' ;; \
"armv7") go2rtc_suffix='arm' ;; \
*) go2rtc_suffix=${BUILD_ARCH} ;; \
esac \
&& curl -L https://github.com/AlexxIT/go2rtc/releases/download/v1.9.9/go2rtc_linux_${go2rtc_suffix} --output /bin/go2rtc \
&& chmod +x /bin/go2rtc \
# Verify go2rtc can be executed
&& go2rtc --version
# Install uv # Install uv
RUN pip3 install uv==0.5.27 RUN pip3 install uv==0.6.10
WORKDIR /usr/src WORKDIR /usr/src
@ -42,22 +60,4 @@ RUN \
&& python3 -m compileall \ && python3 -m compileall \
homeassistant/homeassistant homeassistant/homeassistant
# Home Assistant S6-Overlay
COPY rootfs /
# Needs to be redefined inside the FROM statement to be set for RUN commands
ARG BUILD_ARCH
# Get go2rtc binary
RUN \
case "${BUILD_ARCH}" in \
"aarch64") go2rtc_suffix='arm64' ;; \
"armhf") go2rtc_suffix='armv6' ;; \
"armv7") go2rtc_suffix='arm' ;; \
*) go2rtc_suffix=${BUILD_ARCH} ;; \
esac \
&& curl -L https://github.com/AlexxIT/go2rtc/releases/download/v1.9.8/go2rtc_linux_${go2rtc_suffix} --output /bin/go2rtc \
&& chmod +x /bin/go2rtc \
# Verify go2rtc can be executed
&& go2rtc --version
WORKDIR /config WORKDIR /config

View File

@ -1,10 +1,10 @@
image: ghcr.io/home-assistant/{arch}-homeassistant image: ghcr.io/home-assistant/{arch}-homeassistant
build_from: build_from:
aarch64: ghcr.io/home-assistant/aarch64-homeassistant-base:2024.12.0 aarch64: ghcr.io/home-assistant/aarch64-homeassistant-base:2025.02.1
armhf: ghcr.io/home-assistant/armhf-homeassistant-base:2024.12.0 armhf: ghcr.io/home-assistant/armhf-homeassistant-base:2025.02.1
armv7: ghcr.io/home-assistant/armv7-homeassistant-base:2024.12.0 armv7: ghcr.io/home-assistant/armv7-homeassistant-base:2025.02.1
amd64: ghcr.io/home-assistant/amd64-homeassistant-base:2024.12.0 amd64: ghcr.io/home-assistant/amd64-homeassistant-base:2025.02.1
i386: ghcr.io/home-assistant/i386-homeassistant-base:2024.12.0 i386: ghcr.io/home-assistant/i386-homeassistant-base:2025.02.1
codenotary: codenotary:
signer: notary@home-assistant.io signer: notary@home-assistant.io
base_image: notary@home-assistant.io base_image: notary@home-assistant.io
@ -19,4 +19,4 @@ labels:
org.opencontainers.image.authors: The Home Assistant Authors org.opencontainers.image.authors: The Home Assistant Authors
org.opencontainers.image.url: https://www.home-assistant.io/ org.opencontainers.image.url: https://www.home-assistant.io/
org.opencontainers.image.documentation: https://www.home-assistant.io/docs/ org.opencontainers.image.documentation: https://www.home-assistant.io/docs/
org.opencontainers.image.licenses: Apache License 2.0 org.opencontainers.image.licenses: Apache-2.0

View File

@ -178,6 +178,15 @@ _BLOCKING_CALLS: tuple[BlockingCall, ...] = (
strict_core=False, strict_core=False,
skip_for_tests=True, skip_for_tests=True,
), ),
BlockingCall(
original_func=SSLContext.set_default_verify_paths,
object=SSLContext,
function="set_default_verify_paths",
check_allowed=None,
strict=False,
strict_core=False,
skip_for_tests=True,
),
BlockingCall( BlockingCall(
original_func=Path.open, original_func=Path.open,
object=Path, object=Path,

View File

@ -53,6 +53,7 @@ from .components import (
logbook as logbook_pre_import, # noqa: F401 logbook as logbook_pre_import, # noqa: F401
lovelace as lovelace_pre_import, # noqa: F401 lovelace as lovelace_pre_import, # noqa: F401
onboarding as onboarding_pre_import, # noqa: F401 onboarding as onboarding_pre_import, # noqa: F401
person as person_pre_import, # noqa: F401
recorder as recorder_import, # noqa: F401 - not named pre_import since it has requirements recorder as recorder_import, # noqa: F401 - not named pre_import since it has requirements
repairs as repairs_pre_import, # noqa: F401 repairs as repairs_pre_import, # noqa: F401
search as search_pre_import, # noqa: F401 search as search_pre_import, # noqa: F401
@ -74,12 +75,14 @@ from .core_config import async_process_ha_core_config
from .exceptions import HomeAssistantError from .exceptions import HomeAssistantError
from .helpers import ( from .helpers import (
area_registry, area_registry,
backup,
category_registry, category_registry,
config_validation as cv, config_validation as cv,
device_registry, device_registry,
entity, entity,
entity_registry, entity_registry,
floor_registry, floor_registry,
frame,
issue_registry, issue_registry,
label_registry, label_registry,
recorder, recorder,
@ -91,6 +94,7 @@ from .helpers.dispatcher import async_dispatcher_send_internal
from .helpers.storage import get_internal_store_manager from .helpers.storage import get_internal_store_manager
from .helpers.system_info import async_get_system_info from .helpers.system_info import async_get_system_info
from .helpers.typing import ConfigType from .helpers.typing import ConfigType
from .loader import Integration
from .setup import ( from .setup import (
# _setup_started is marked as protected to make it clear # _setup_started is marked as protected to make it clear
# that it is not part of the public API and should not be used # that it is not part of the public API and should not be used
@ -163,16 +167,6 @@ FRONTEND_INTEGRATIONS = {
# integrations can be removed and database migration status is # integrations can be removed and database migration status is
# visible in frontend # visible in frontend
"frontend", "frontend",
# Hassio is an after dependency of backup, after dependencies
# are not promoted from stage 2 to earlier stages, so we need to
# add it here. Hassio needs to be setup before backup, otherwise
# the backup integration will think we are a container/core install
# when using HAOS or Supervised install.
"hassio",
# Backup is an after dependency of frontend, after dependencies
# are not promoted from stage 2 to earlier stages, so we need to
# add it here.
"backup",
} }
# Stage 0 is divided into substages. Each substage has a name, a set of integrations and a timeout. # Stage 0 is divided into substages. Each substage has a name, a set of integrations and a timeout.
# The substage containing recorder should have no timeout, as it could cancel a database migration. # The substage containing recorder should have no timeout, as it could cancel a database migration.
@ -206,6 +200,8 @@ STAGE_1_INTEGRATIONS = {
"mqtt_eventstream", "mqtt_eventstream",
# To provide account link implementations # To provide account link implementations
"cloud", "cloud",
# Ensure supervisor is available
"hassio",
} }
DEFAULT_INTEGRATIONS = { DEFAULT_INTEGRATIONS = {
@ -305,14 +301,6 @@ async def async_setup_hass(
return hass return hass
async def stop_hass(hass: core.HomeAssistant) -> None:
"""Stop hass."""
# Ask integrations to shut down. It's messy but we can't
# do a clean stop without knowing what is broken
with contextlib.suppress(TimeoutError):
async with hass.timeout.async_timeout(10):
await hass.async_stop()
hass = await create_hass() hass = await create_hass()
if runtime_config.skip_pip or runtime_config.skip_pip_packages: if runtime_config.skip_pip or runtime_config.skip_pip_packages:
@ -328,10 +316,10 @@ async def async_setup_hass(
block_async_io.enable() block_async_io.enable()
config_dict = None
basic_setup_success = False
if not (recovery_mode := runtime_config.recovery_mode): if not (recovery_mode := runtime_config.recovery_mode):
config_dict = None
basic_setup_success = False
await hass.async_add_executor_job(conf_util.process_ha_config_upgrade, hass) await hass.async_add_executor_job(conf_util.process_ha_config_upgrade, hass)
try: try:
@ -349,39 +337,43 @@ async def async_setup_hass(
await async_from_config_dict(config_dict, hass) is not None await async_from_config_dict(config_dict, hass) is not None
) )
if config_dict is None: if config_dict is None:
recovery_mode = True recovery_mode = True
await stop_hass(hass) await hass.async_stop(force=True)
hass = await create_hass() hass = await create_hass()
elif not basic_setup_success: elif not basic_setup_success:
_LOGGER.warning("Unable to set up core integrations. Activating recovery mode") _LOGGER.warning(
recovery_mode = True "Unable to set up core integrations. Activating recovery mode"
await stop_hass(hass) )
hass = await create_hass() recovery_mode = True
await hass.async_stop(force=True)
hass = await create_hass()
elif any(domain not in hass.config.components for domain in CRITICAL_INTEGRATIONS): elif any(
_LOGGER.warning( domain not in hass.config.components for domain in CRITICAL_INTEGRATIONS
"Detected that %s did not load. Activating recovery mode", ):
",".join(CRITICAL_INTEGRATIONS), _LOGGER.warning(
) "Detected that %s did not load. Activating recovery mode",
",".join(CRITICAL_INTEGRATIONS),
)
old_config = hass.config old_config = hass.config
old_logging = hass.data.get(DATA_LOGGING) old_logging = hass.data.get(DATA_LOGGING)
recovery_mode = True recovery_mode = True
await stop_hass(hass) await hass.async_stop(force=True)
hass = await create_hass() hass = await create_hass()
if old_logging: if old_logging:
hass.data[DATA_LOGGING] = old_logging hass.data[DATA_LOGGING] = old_logging
hass.config.debug = old_config.debug hass.config.debug = old_config.debug
hass.config.skip_pip = old_config.skip_pip hass.config.skip_pip = old_config.skip_pip
hass.config.skip_pip_packages = old_config.skip_pip_packages hass.config.skip_pip_packages = old_config.skip_pip_packages
hass.config.internal_url = old_config.internal_url hass.config.internal_url = old_config.internal_url
hass.config.external_url = old_config.external_url hass.config.external_url = old_config.external_url
# Setup loader cache after the config dir has been set # Setup loader cache after the config dir has been set
loader.async_setup(hass) loader.async_setup(hass)
if recovery_mode: if recovery_mode:
_LOGGER.info("Starting in recovery mode") _LOGGER.info("Starting in recovery mode")
@ -444,9 +436,10 @@ async def async_load_base_functionality(hass: core.HomeAssistant) -> None:
if DATA_REGISTRIES_LOADED in hass.data: if DATA_REGISTRIES_LOADED in hass.data:
return return
hass.data[DATA_REGISTRIES_LOADED] = None hass.data[DATA_REGISTRIES_LOADED] = None
translation.async_setup(hass)
entity.async_setup(hass) entity.async_setup(hass)
frame.async_setup(hass)
template.async_setup(hass) template.async_setup(hass)
translation.async_setup(hass)
await asyncio.gather( await asyncio.gather(
create_eager_task(get_internal_store_manager(hass).async_initialize()), create_eager_task(get_internal_store_manager(hass).async_initialize()),
create_eager_task(area_registry.async_load(hass)), create_eager_task(area_registry.async_load(hass)),
@ -667,11 +660,10 @@ def _create_log_file(
err_handler = _RotatingFileHandlerWithoutShouldRollOver( err_handler = _RotatingFileHandlerWithoutShouldRollOver(
err_log_path, backupCount=1 err_log_path, backupCount=1
) )
try:
try: err_handler.doRollover()
err_handler.doRollover() except OSError as err:
except OSError as err: _LOGGER.error("Error rolling over log file: %s", err)
_LOGGER.error("Error rolling over log file: %s", err)
return err_handler return err_handler
@ -721,20 +713,25 @@ def _get_domains(hass: core.HomeAssistant, config: dict[str, Any]) -> set[str]:
return domains return domains
async def _async_resolve_domains_to_setup( async def _async_resolve_domains_and_preload(
hass: core.HomeAssistant, config: dict[str, Any] hass: core.HomeAssistant, config: dict[str, Any]
) -> tuple[set[str], dict[str, loader.Integration]]: ) -> tuple[dict[str, Integration], dict[str, Integration]]:
"""Resolve all dependencies and return list of domains to set up.""" """Resolve all dependencies and return integrations to set up.
The return value is a tuple of two dictionaries:
- The first dictionary contains integrations
specified by the configuration (including config entries).
- The second dictionary contains the same integrations as the first dictionary
together with all their dependencies.
"""
domains_to_setup = _get_domains(hass, config) domains_to_setup = _get_domains(hass, config)
needed_requirements: set[str] = set()
platform_integrations = conf_util.extract_platform_integrations( platform_integrations = conf_util.extract_platform_integrations(
config, BASE_PLATFORMS config, BASE_PLATFORMS
) )
# Ensure base platforms that have platform integrations are added to # Ensure base platforms that have platform integrations are added to `domains`,
# to `domains_to_setup so they can be setup first instead of # so they can be setup first instead of discovering them later when a config
# discovering them when later when a config entry setup task # entry setup task notices that it's needed and there is already a long line
# notices its needed and there is already a long line to use # to use the import executor.
# the import executor.
# #
# For example if we have # For example if we have
# sensor: # sensor:
@ -750,111 +747,78 @@ async def _async_resolve_domains_to_setup(
# so this will be less of a problem in the future. # so this will be less of a problem in the future.
domains_to_setup.update(platform_integrations) domains_to_setup.update(platform_integrations)
# Load manifests for base platforms and platform based integrations # Additionally process base platforms since we do not require the manifest
# that are defined under base platforms right away since we do not require # to list them as dependencies.
# the manifest to list them as dependencies and we want to avoid the lock # We want to later avoid lock contention when multiple integrations try to load
# contention when multiple integrations try to load them at once # their manifests at once.
additional_manifests_to_load = { # Also process integrations that are defined under base platforms
# to speed things up.
additional_domains_to_process = {
*BASE_PLATFORMS, *BASE_PLATFORMS,
*chain.from_iterable(platform_integrations.values()), *chain.from_iterable(platform_integrations.values()),
} }
translations_to_load = additional_manifests_to_load.copy()
# Resolve all dependencies so we know all integrations # Resolve all dependencies so we know all integrations
# that will have to be loaded and start right-away # that will have to be loaded and start right-away
integration_cache: dict[str, loader.Integration] = {} integrations_or_excs = await loader.async_get_integrations(
to_resolve: set[str] = domains_to_setup hass, {*domains_to_setup, *additional_domains_to_process}
while to_resolve or additional_manifests_to_load: )
old_to_resolve: set[str] = to_resolve # Eliminate those missing or with invalid manifest
to_resolve = set() integrations_to_process = {
domain: itg
for domain, itg in integrations_or_excs.items()
if isinstance(itg, Integration)
}
integrations_dependencies = await loader.resolve_integrations_dependencies(
hass, integrations_to_process.values()
)
# Eliminate those without valid dependencies
integrations_to_process = {
domain: integrations_to_process[domain] for domain in integrations_dependencies
}
if additional_manifests_to_load: integrations_to_setup = {
to_get = {*old_to_resolve, *additional_manifests_to_load} domain: itg
additional_manifests_to_load.clear() for domain, itg in integrations_to_process.items()
else: if domain in domains_to_setup
to_get = old_to_resolve }
all_integrations_to_setup = integrations_to_setup.copy()
all_integrations_to_setup.update(
(dep, loader.async_get_loaded_integration(hass, dep))
for domain in integrations_to_setup
for dep in integrations_dependencies[domain].difference(
all_integrations_to_setup
)
)
manifest_deps: set[str] = set() # Gather requirements for all integrations,
resolve_dependencies_tasks: list[asyncio.Task[bool]] = [] # their dependencies and after dependencies.
integrations_to_process: list[loader.Integration] = [] # To gather all the requirements we must ignore exceptions here.
# The exceptions will be detected and handled later in the bootstrap process.
for domain, itg in (await loader.async_get_integrations(hass, to_get)).items(): integrations_after_dependencies = (
if not isinstance(itg, loader.Integration): await loader.resolve_integrations_after_dependencies(
continue hass, integrations_to_process.values(), ignore_exceptions=True
integration_cache[domain] = itg )
needed_requirements.update(itg.requirements) )
integrations_requirements = {
# Make sure manifests for dependencies are loaded in the next domain: itg.requirements for domain, itg in integrations_to_process.items()
# loop to try to group as many as manifest loads in a single }
# call to avoid the creating one-off executor jobs later in integrations_requirements.update(
# the setup process (dep, loader.async_get_loaded_integration(hass, dep).requirements)
additional_manifests_to_load.update( for deps in integrations_after_dependencies.values()
dep for dep in deps.difference(integrations_requirements)
for dep in chain(itg.dependencies, itg.after_dependencies) )
if dep not in integration_cache all_requirements = set(chain.from_iterable(integrations_requirements.values()))
)
if domain not in old_to_resolve:
continue
integrations_to_process.append(itg)
manifest_deps.update(itg.dependencies)
manifest_deps.update(itg.after_dependencies)
if not itg.all_dependencies_resolved:
resolve_dependencies_tasks.append(
create_eager_task(
itg.resolve_dependencies(),
name=f"resolve dependencies {domain}",
loop=hass.loop,
)
)
if unseen_deps := manifest_deps - integration_cache.keys():
# If there are dependencies, try to preload all
# the integrations manifest at once and add them
# to the list of requirements we need to install
# so we can try to check if they are already installed
# in a single call below which avoids each integration
# having to wait for the lock to do it individually
deps = await loader.async_get_integrations(hass, unseen_deps)
for dependant_domain, dependant_itg in deps.items():
if isinstance(dependant_itg, loader.Integration):
integration_cache[dependant_domain] = dependant_itg
needed_requirements.update(dependant_itg.requirements)
if resolve_dependencies_tasks:
await asyncio.gather(*resolve_dependencies_tasks)
for itg in integrations_to_process:
try:
all_deps = itg.all_dependencies
except RuntimeError:
# Integration.all_dependencies raises RuntimeError if
# dependencies could not be resolved
continue
for dep in all_deps:
if dep in domains_to_setup:
continue
domains_to_setup.add(dep)
to_resolve.add(dep)
_LOGGER.info("Domains to be set up: %s", domains_to_setup)
# Optimistically check if requirements are already installed # Optimistically check if requirements are already installed
# ahead of setting up the integrations so we can prime the cache # ahead of setting up the integrations so we can prime the cache
# We do not wait for this since its an optimization only # We do not wait for this since it's an optimization only
hass.async_create_background_task( hass.async_create_background_task(
requirements.async_load_installed_versions(hass, needed_requirements), requirements.async_load_installed_versions(hass, all_requirements),
"check installed requirements", "check installed requirements",
eager_start=True, eager_start=True,
) )
#
# Only add the domains_to_setup after we finish resolving
# as new domains are likely to added in the process
#
translations_to_load.update(domains_to_setup)
# Start loading translations for all integrations we are going to set up # Start loading translations for all integrations we are going to set up
# in the background so they are ready when we need them. This avoids a # in the background so they are ready when we need them. This avoids a
# lot of waiting for the translation load lock and a thundering herd of # lot of waiting for the translation load lock and a thundering herd of
@ -865,6 +829,7 @@ async def _async_resolve_domains_to_setup(
# hold the translation load lock and if anything is fast enough to # hold the translation load lock and if anything is fast enough to
# wait for the translation load lock, loading will be done by the # wait for the translation load lock, loading will be done by the
# time it gets to it. # time it gets to it.
translations_to_load = {*all_integrations_to_setup, *additional_domains_to_process}
hass.async_create_background_task( hass.async_create_background_task(
translation.async_load_integrations(hass, translations_to_load), translation.async_load_integrations(hass, translations_to_load),
"load translations", "load translations",
@ -876,13 +841,13 @@ async def _async_resolve_domains_to_setup(
# in the setup process. # in the setup process.
hass.async_create_background_task( hass.async_create_background_task(
get_internal_store_manager(hass).async_preload( get_internal_store_manager(hass).async_preload(
[*PRELOAD_STORAGE, *domains_to_setup] [*PRELOAD_STORAGE, *all_integrations_to_setup]
), ),
"preload storage", "preload storage",
eager_start=True, eager_start=True,
) )
return domains_to_setup, integration_cache return integrations_to_setup, all_integrations_to_setup
async def _async_set_up_integrations( async def _async_set_up_integrations(
@ -892,65 +857,84 @@ async def _async_set_up_integrations(
watcher = _WatchPendingSetups(hass, _setup_started(hass)) watcher = _WatchPendingSetups(hass, _setup_started(hass))
watcher.async_start() watcher.async_start()
domains_to_setup, integration_cache = await _async_resolve_domains_to_setup( integrations, all_integrations = await _async_resolve_domains_and_preload(
hass, config hass, config
) )
stage_2_domains = domains_to_setup.copy() # Detect all cycles
integrations_after_dependencies = (
await loader.resolve_integrations_after_dependencies(
hass, all_integrations.values(), set(all_integrations)
)
)
all_domains = set(integrations_after_dependencies)
domains = set(integrations) & all_domains
_LOGGER.info(
"Domains to be set up: %s | %s",
domains,
all_domains - domains,
)
async_set_domains_to_be_loaded(hass, all_domains)
# Initialize recorder # Initialize recorder
if "recorder" in domains_to_setup: if "recorder" in all_domains:
recorder.async_initialize_recorder(hass) recorder.async_initialize_recorder(hass)
stage_0_and_1_domains: list[tuple[str, set[str], int | None]] = [ # Initialize backup
if "backup" in all_domains:
backup.async_initialize_backup(hass)
stages: list[tuple[str, set[str], int | None]] = [
*( *(
(name, domain_group & domains_to_setup, timeout) (name, domain_group, timeout)
for name, domain_group, timeout in STAGE_0_INTEGRATIONS for name, domain_group, timeout in STAGE_0_INTEGRATIONS
), ),
("stage 1", STAGE_1_INTEGRATIONS & domains_to_setup, STAGE_1_TIMEOUT), ("1", STAGE_1_INTEGRATIONS, STAGE_1_TIMEOUT),
("2", domains, STAGE_2_TIMEOUT),
] ]
_LOGGER.info("Setting up stage 0 and 1") _LOGGER.info("Setting up stage 0")
for name, domain_group, timeout in stage_0_and_1_domains: for name, domain_group, timeout in stages:
if not domain_group: stage_domains_unfiltered = domain_group & all_domains
if not stage_domains_unfiltered:
_LOGGER.info("Nothing to set up in stage %s: %s", name, domain_group)
continue continue
_LOGGER.info("Setting up %s: %s", name, domain_group) stage_domains = stage_domains_unfiltered - hass.config.components
to_be_loaded = domain_group.copy() if not stage_domains:
to_be_loaded.update( _LOGGER.info("Already set up stage %s: %s", name, stage_domains_unfiltered)
continue
stage_dep_domains_unfiltered = {
dep dep
for domain in domain_group for domain in stage_domains
if (integration := integration_cache.get(domain)) is not None for dep in integrations_after_dependencies[domain]
for dep in integration.all_dependencies if dep not in stage_domains
}
stage_dep_domains = stage_dep_domains_unfiltered - hass.config.components
stage_all_domains = stage_domains | stage_dep_domains
_LOGGER.info(
"Setting up stage %s: %s | %s\nDependencies: %s | %s",
name,
stage_domains,
stage_domains_unfiltered - stage_domains,
stage_dep_domains,
stage_dep_domains_unfiltered - stage_dep_domains,
) )
async_set_domains_to_be_loaded(hass, to_be_loaded)
stage_2_domains -= to_be_loaded
if timeout is None: if timeout is None:
await _async_setup_multi_components(hass, domain_group, config) await _async_setup_multi_components(hass, stage_all_domains, config)
else: continue
try:
async with hass.timeout.async_timeout(timeout, cool_down=COOLDOWN_TIME):
await _async_setup_multi_components(hass, domain_group, config)
except TimeoutError:
_LOGGER.warning(
"Setup timed out for %s waiting on %s - moving forward",
name,
hass._active_tasks, # noqa: SLF001
)
# Add after dependencies when setting up stage 2 domains
async_set_domains_to_be_loaded(hass, stage_2_domains)
if stage_2_domains:
_LOGGER.info("Setting up stage 2: %s", stage_2_domains)
try: try:
async with hass.timeout.async_timeout( async with hass.timeout.async_timeout(timeout, cool_down=COOLDOWN_TIME):
STAGE_2_TIMEOUT, cool_down=COOLDOWN_TIME await _async_setup_multi_components(hass, stage_all_domains, config)
):
await _async_setup_multi_components(hass, stage_2_domains, config)
except TimeoutError: except TimeoutError:
_LOGGER.warning( _LOGGER.warning(
"Setup timed out for stage 2 waiting on %s - moving forward", "Setup timed out for stage %s waiting on %s - moving forward",
name,
hass._active_tasks, # noqa: SLF001 hass._active_tasks, # noqa: SLF001
) )
@ -1052,8 +1036,6 @@ async def _async_setup_multi_components(
config: dict[str, Any], config: dict[str, Any],
) -> None: ) -> None:
"""Set up multiple domains. Log on failure.""" """Set up multiple domains. Log on failure."""
# Avoid creating tasks for domains that were setup in a previous stage
domains_not_yet_setup = domains - hass.config.components
# Create setup tasks for base platforms first since everything will have # Create setup tasks for base platforms first since everything will have
# to wait to be imported, and the sooner we can get the base platforms # to wait to be imported, and the sooner we can get the base platforms
# loaded the sooner we can start loading the rest of the integrations. # loaded the sooner we can start loading the rest of the integrations.
@ -1063,9 +1045,7 @@ async def _async_setup_multi_components(
f"setup component {domain}", f"setup component {domain}",
eager_start=True, eager_start=True,
) )
for domain in sorted( for domain in sorted(domains, key=SETUP_ORDER_SORT_KEY, reverse=True)
domains_not_yet_setup, key=SETUP_ORDER_SORT_KEY, reverse=True
)
} }
results = await asyncio.gather(*futures.values(), return_exceptions=True) results = await asyncio.gather(*futures.values(), return_exceptions=True)
for idx, domain in enumerate(futures): for idx, domain in enumerate(futures):

View File

@ -0,0 +1,5 @@
{
"domain": "bosch",
"name": "Bosch",
"integrations": ["bosch_alarm", "bosch_shc", "home_connect"]
}

View File

@ -0,0 +1,5 @@
{
"domain": "eve",
"name": "Eve",
"iot_standards": ["matter"]
}

View File

@ -6,6 +6,7 @@
"azure_devops", "azure_devops",
"azure_event_hub", "azure_event_hub",
"azure_service_bus", "azure_service_bus",
"azure_storage",
"microsoft_face_detect", "microsoft_face_detect",
"microsoft_face_identify", "microsoft_face_identify",
"microsoft_face", "microsoft_face",

View File

@ -1,5 +1,6 @@
{ {
"domain": "motionblinds", "domain": "motionblinds",
"name": "Motionblinds", "name": "Motionblinds",
"integrations": ["motion_blinds", "motionblinds_ble"] "integrations": ["motion_blinds", "motionblinds_ble"],
"iot_standards": ["matter"]
} }

View File

@ -0,0 +1,5 @@
{
"domain": "sensorpush",
"name": "SensorPush",
"integrations": ["sensorpush", "sensorpush_cloud"]
}

View File

@ -24,7 +24,7 @@ from homeassistant.components.weather import (
API_METRIC: Final = "Metric" API_METRIC: Final = "Metric"
ATTRIBUTION: Final = "Data provided by AccuWeather" ATTRIBUTION: Final = "Data provided by AccuWeather"
ATTR_CATEGORY: Final = "Category" ATTR_CATEGORY_VALUE = "CategoryValue"
ATTR_DIRECTION: Final = "Direction" ATTR_DIRECTION: Final = "Direction"
ATTR_ENGLISH: Final = "English" ATTR_ENGLISH: Final = "English"
ATTR_LEVEL: Final = "level" ATTR_LEVEL: Final = "level"
@ -55,5 +55,18 @@ CONDITION_MAP = {
for cond_ha, cond_codes in CONDITION_CLASSES.items() for cond_ha, cond_codes in CONDITION_CLASSES.items()
for cond_code in cond_codes for cond_code in cond_codes
} }
AIR_QUALITY_CATEGORY_MAP = {
1: "good",
2: "moderate",
3: "unhealthy",
4: "very_unhealthy",
5: "hazardous",
}
POLLEN_CATEGORY_MAP = {
1: "low",
2: "moderate",
3: "high",
4: "very_high",
}
UPDATE_INTERVAL_OBSERVATION = timedelta(minutes=40) UPDATE_INTERVAL_OBSERVATION = timedelta(minutes=40)
UPDATE_INTERVAL_DAILY_FORECAST = timedelta(hours=6) UPDATE_INTERVAL_DAILY_FORECAST = timedelta(hours=6)

View File

@ -75,7 +75,11 @@ class AccuWeatherObservationDataUpdateCoordinator(
async with timeout(10): async with timeout(10):
result = await self.accuweather.async_get_current_conditions() result = await self.accuweather.async_get_current_conditions()
except EXCEPTIONS as error: except EXCEPTIONS as error:
raise UpdateFailed(error) from error raise UpdateFailed(
translation_domain=DOMAIN,
translation_key="current_conditions_update_error",
translation_placeholders={"error": repr(error)},
) from error
_LOGGER.debug("Requests remaining: %d", self.accuweather.requests_remaining) _LOGGER.debug("Requests remaining: %d", self.accuweather.requests_remaining)
@ -117,9 +121,15 @@ class AccuWeatherDailyForecastDataUpdateCoordinator(
"""Update data via library.""" """Update data via library."""
try: try:
async with timeout(10): async with timeout(10):
result = await self.accuweather.async_get_daily_forecast() result = await self.accuweather.async_get_daily_forecast(
language=self.hass.config.language
)
except EXCEPTIONS as error: except EXCEPTIONS as error:
raise UpdateFailed(error) from error raise UpdateFailed(
translation_domain=DOMAIN,
translation_key="forecast_update_error",
translation_placeholders={"error": repr(error)},
) from error
_LOGGER.debug("Requests remaining: %d", self.accuweather.requests_remaining) _LOGGER.debug("Requests remaining: %d", self.accuweather.requests_remaining)

View File

@ -7,6 +7,6 @@
"integration_type": "service", "integration_type": "service",
"iot_class": "cloud_polling", "iot_class": "cloud_polling",
"loggers": ["accuweather"], "loggers": ["accuweather"],
"requirements": ["accuweather==4.0.0"], "requirements": ["accuweather==4.2.0"],
"single_config_entry": true "single_config_entry": true
} }

View File

@ -29,8 +29,9 @@ from homeassistant.helpers.entity_platform import AddConfigEntryEntitiesCallback
from homeassistant.helpers.update_coordinator import CoordinatorEntity from homeassistant.helpers.update_coordinator import CoordinatorEntity
from .const import ( from .const import (
AIR_QUALITY_CATEGORY_MAP,
API_METRIC, API_METRIC,
ATTR_CATEGORY, ATTR_CATEGORY_VALUE,
ATTR_DIRECTION, ATTR_DIRECTION,
ATTR_ENGLISH, ATTR_ENGLISH,
ATTR_LEVEL, ATTR_LEVEL,
@ -38,6 +39,7 @@ from .const import (
ATTR_VALUE, ATTR_VALUE,
ATTRIBUTION, ATTRIBUTION,
MAX_FORECAST_DAYS, MAX_FORECAST_DAYS,
POLLEN_CATEGORY_MAP,
) )
from .coordinator import ( from .coordinator import (
AccuWeatherConfigEntry, AccuWeatherConfigEntry,
@ -59,9 +61,9 @@ class AccuWeatherSensorDescription(SensorEntityDescription):
FORECAST_SENSOR_TYPES: tuple[AccuWeatherSensorDescription, ...] = ( FORECAST_SENSOR_TYPES: tuple[AccuWeatherSensorDescription, ...] = (
AccuWeatherSensorDescription( AccuWeatherSensorDescription(
key="AirQuality", key="AirQuality",
value_fn=lambda data: cast(str, data[ATTR_CATEGORY]), value_fn=lambda data: AIR_QUALITY_CATEGORY_MAP[data[ATTR_CATEGORY_VALUE]],
device_class=SensorDeviceClass.ENUM, device_class=SensorDeviceClass.ENUM,
options=["good", "hazardous", "high", "low", "moderate", "unhealthy"], options=list(AIR_QUALITY_CATEGORY_MAP.values()),
translation_key="air_quality", translation_key="air_quality",
), ),
AccuWeatherSensorDescription( AccuWeatherSensorDescription(
@ -83,7 +85,9 @@ FORECAST_SENSOR_TYPES: tuple[AccuWeatherSensorDescription, ...] = (
entity_registry_enabled_default=False, entity_registry_enabled_default=False,
native_unit_of_measurement=CONCENTRATION_PARTS_PER_CUBIC_METER, native_unit_of_measurement=CONCENTRATION_PARTS_PER_CUBIC_METER,
value_fn=lambda data: cast(int, data[ATTR_VALUE]), value_fn=lambda data: cast(int, data[ATTR_VALUE]),
attr_fn=lambda data: {ATTR_LEVEL: data[ATTR_CATEGORY]}, attr_fn=lambda data: {
ATTR_LEVEL: POLLEN_CATEGORY_MAP[data[ATTR_CATEGORY_VALUE]]
},
translation_key="grass_pollen", translation_key="grass_pollen",
), ),
AccuWeatherSensorDescription( AccuWeatherSensorDescription(
@ -107,7 +111,9 @@ FORECAST_SENSOR_TYPES: tuple[AccuWeatherSensorDescription, ...] = (
entity_registry_enabled_default=False, entity_registry_enabled_default=False,
native_unit_of_measurement=CONCENTRATION_PARTS_PER_CUBIC_METER, native_unit_of_measurement=CONCENTRATION_PARTS_PER_CUBIC_METER,
value_fn=lambda data: cast(int, data[ATTR_VALUE]), value_fn=lambda data: cast(int, data[ATTR_VALUE]),
attr_fn=lambda data: {ATTR_LEVEL: data[ATTR_CATEGORY]}, attr_fn=lambda data: {
ATTR_LEVEL: POLLEN_CATEGORY_MAP[data[ATTR_CATEGORY_VALUE]]
},
translation_key="mold_pollen", translation_key="mold_pollen",
), ),
AccuWeatherSensorDescription( AccuWeatherSensorDescription(
@ -115,7 +121,9 @@ FORECAST_SENSOR_TYPES: tuple[AccuWeatherSensorDescription, ...] = (
native_unit_of_measurement=CONCENTRATION_PARTS_PER_CUBIC_METER, native_unit_of_measurement=CONCENTRATION_PARTS_PER_CUBIC_METER,
entity_registry_enabled_default=False, entity_registry_enabled_default=False,
value_fn=lambda data: cast(int, data[ATTR_VALUE]), value_fn=lambda data: cast(int, data[ATTR_VALUE]),
attr_fn=lambda data: {ATTR_LEVEL: data[ATTR_CATEGORY]}, attr_fn=lambda data: {
ATTR_LEVEL: POLLEN_CATEGORY_MAP[data[ATTR_CATEGORY_VALUE]]
},
translation_key="ragweed_pollen", translation_key="ragweed_pollen",
), ),
AccuWeatherSensorDescription( AccuWeatherSensorDescription(
@ -181,14 +189,18 @@ FORECAST_SENSOR_TYPES: tuple[AccuWeatherSensorDescription, ...] = (
native_unit_of_measurement=CONCENTRATION_PARTS_PER_CUBIC_METER, native_unit_of_measurement=CONCENTRATION_PARTS_PER_CUBIC_METER,
entity_registry_enabled_default=False, entity_registry_enabled_default=False,
value_fn=lambda data: cast(int, data[ATTR_VALUE]), value_fn=lambda data: cast(int, data[ATTR_VALUE]),
attr_fn=lambda data: {ATTR_LEVEL: data[ATTR_CATEGORY]}, attr_fn=lambda data: {
ATTR_LEVEL: POLLEN_CATEGORY_MAP[data[ATTR_CATEGORY_VALUE]]
},
translation_key="tree_pollen", translation_key="tree_pollen",
), ),
AccuWeatherSensorDescription( AccuWeatherSensorDescription(
key="UVIndex", key="UVIndex",
native_unit_of_measurement=UV_INDEX, native_unit_of_measurement=UV_INDEX,
value_fn=lambda data: cast(int, data[ATTR_VALUE]), value_fn=lambda data: cast(int, data[ATTR_VALUE]),
attr_fn=lambda data: {ATTR_LEVEL: data[ATTR_CATEGORY]}, attr_fn=lambda data: {
ATTR_LEVEL: POLLEN_CATEGORY_MAP[data[ATTR_CATEGORY_VALUE]]
},
translation_key="uv_index_forecast", translation_key="uv_index_forecast",
), ),
AccuWeatherSensorDescription( AccuWeatherSensorDescription(

View File

@ -26,10 +26,20 @@
"state": { "state": {
"good": "Good", "good": "Good",
"hazardous": "Hazardous", "hazardous": "Hazardous",
"high": "High",
"low": "Low",
"moderate": "Moderate", "moderate": "Moderate",
"unhealthy": "Unhealthy" "unhealthy": "Unhealthy",
"very_unhealthy": "Very unhealthy"
},
"state_attributes": {
"options": {
"state": {
"good": "[%key:component::accuweather::entity::sensor::air_quality::state::good%]",
"hazardous": "[%key:component::accuweather::entity::sensor::air_quality::state::hazardous%]",
"moderate": "[%key:component::accuweather::entity::sensor::air_quality::state::moderate%]",
"unhealthy": "[%key:component::accuweather::entity::sensor::air_quality::state::unhealthy%]",
"very_unhealthy": "[%key:component::accuweather::entity::sensor::air_quality::state::very_unhealthy%]"
}
}
} }
}, },
"apparent_temperature": { "apparent_temperature": {
@ -62,12 +72,10 @@
"level": { "level": {
"name": "Level", "name": "Level",
"state": { "state": {
"good": "[%key:component::accuweather::entity::sensor::air_quality::state::good%]", "high": "[%key:common::state::high%]",
"hazardous": "[%key:component::accuweather::entity::sensor::air_quality::state::hazardous%]", "low": "[%key:common::state::low%]",
"high": "[%key:component::accuweather::entity::sensor::air_quality::state::high%]", "moderate": "Moderate",
"low": "[%key:component::accuweather::entity::sensor::air_quality::state::low%]", "very_high": "[%key:common::state::very_high%]"
"moderate": "[%key:component::accuweather::entity::sensor::air_quality::state::moderate%]",
"unhealthy": "[%key:component::accuweather::entity::sensor::air_quality::state::unhealthy%]"
} }
} }
} }
@ -81,12 +89,10 @@
"level": { "level": {
"name": "[%key:component::accuweather::entity::sensor::grass_pollen::state_attributes::level::name%]", "name": "[%key:component::accuweather::entity::sensor::grass_pollen::state_attributes::level::name%]",
"state": { "state": {
"good": "[%key:component::accuweather::entity::sensor::air_quality::state::good%]", "high": "[%key:common::state::high%]",
"hazardous": "[%key:component::accuweather::entity::sensor::air_quality::state::hazardous%]", "low": "[%key:common::state::low%]",
"high": "[%key:component::accuweather::entity::sensor::air_quality::state::high%]", "moderate": "[%key:component::accuweather::entity::sensor::grass_pollen::state_attributes::level::state::moderate%]",
"low": "[%key:component::accuweather::entity::sensor::air_quality::state::low%]", "very_high": "[%key:common::state::very_high%]"
"moderate": "[%key:component::accuweather::entity::sensor::air_quality::state::moderate%]",
"unhealthy": "[%key:component::accuweather::entity::sensor::air_quality::state::unhealthy%]"
} }
} }
} }
@ -100,6 +106,15 @@
"steady": "Steady", "steady": "Steady",
"rising": "Rising", "rising": "Rising",
"falling": "Falling" "falling": "Falling"
},
"state_attributes": {
"options": {
"state": {
"falling": "[%key:component::accuweather::entity::sensor::pressure_tendency::state::falling%]",
"rising": "[%key:component::accuweather::entity::sensor::pressure_tendency::state::rising%]",
"steady": "[%key:component::accuweather::entity::sensor::pressure_tendency::state::steady%]"
}
}
} }
}, },
"ragweed_pollen": { "ragweed_pollen": {
@ -108,12 +123,10 @@
"level": { "level": {
"name": "[%key:component::accuweather::entity::sensor::grass_pollen::state_attributes::level::name%]", "name": "[%key:component::accuweather::entity::sensor::grass_pollen::state_attributes::level::name%]",
"state": { "state": {
"good": "[%key:component::accuweather::entity::sensor::air_quality::state::good%]", "high": "[%key:common::state::high%]",
"hazardous": "[%key:component::accuweather::entity::sensor::air_quality::state::hazardous%]", "low": "[%key:common::state::low%]",
"high": "[%key:component::accuweather::entity::sensor::air_quality::state::high%]", "moderate": "[%key:component::accuweather::entity::sensor::grass_pollen::state_attributes::level::state::moderate%]",
"low": "[%key:component::accuweather::entity::sensor::air_quality::state::low%]", "very_high": "[%key:common::state::very_high%]"
"moderate": "[%key:component::accuweather::entity::sensor::air_quality::state::moderate%]",
"unhealthy": "[%key:component::accuweather::entity::sensor::air_quality::state::unhealthy%]"
} }
} }
} }
@ -154,12 +167,10 @@
"level": { "level": {
"name": "[%key:component::accuweather::entity::sensor::grass_pollen::state_attributes::level::name%]", "name": "[%key:component::accuweather::entity::sensor::grass_pollen::state_attributes::level::name%]",
"state": { "state": {
"good": "[%key:component::accuweather::entity::sensor::air_quality::state::good%]", "high": "[%key:common::state::high%]",
"hazardous": "[%key:component::accuweather::entity::sensor::air_quality::state::hazardous%]", "low": "[%key:common::state::low%]",
"high": "[%key:component::accuweather::entity::sensor::air_quality::state::high%]", "moderate": "[%key:component::accuweather::entity::sensor::grass_pollen::state_attributes::level::state::moderate%]",
"low": "[%key:component::accuweather::entity::sensor::air_quality::state::low%]", "very_high": "[%key:common::state::very_high%]"
"moderate": "[%key:component::accuweather::entity::sensor::air_quality::state::moderate%]",
"unhealthy": "[%key:component::accuweather::entity::sensor::air_quality::state::unhealthy%]"
} }
} }
} }
@ -170,12 +181,10 @@
"level": { "level": {
"name": "[%key:component::accuweather::entity::sensor::grass_pollen::state_attributes::level::name%]", "name": "[%key:component::accuweather::entity::sensor::grass_pollen::state_attributes::level::name%]",
"state": { "state": {
"good": "[%key:component::accuweather::entity::sensor::air_quality::state::good%]", "high": "[%key:common::state::high%]",
"hazardous": "[%key:component::accuweather::entity::sensor::air_quality::state::hazardous%]", "low": "[%key:common::state::low%]",
"high": "[%key:component::accuweather::entity::sensor::air_quality::state::high%]", "moderate": "[%key:component::accuweather::entity::sensor::grass_pollen::state_attributes::level::state::moderate%]",
"low": "[%key:component::accuweather::entity::sensor::air_quality::state::low%]", "very_high": "[%key:common::state::very_high%]"
"moderate": "[%key:component::accuweather::entity::sensor::air_quality::state::moderate%]",
"unhealthy": "[%key:component::accuweather::entity::sensor::air_quality::state::unhealthy%]"
} }
} }
} }
@ -186,12 +195,10 @@
"level": { "level": {
"name": "[%key:component::accuweather::entity::sensor::grass_pollen::state_attributes::level::name%]", "name": "[%key:component::accuweather::entity::sensor::grass_pollen::state_attributes::level::name%]",
"state": { "state": {
"good": "[%key:component::accuweather::entity::sensor::air_quality::state::good%]", "high": "[%key:common::state::high%]",
"hazardous": "[%key:component::accuweather::entity::sensor::air_quality::state::hazardous%]", "low": "[%key:common::state::low%]",
"high": "[%key:component::accuweather::entity::sensor::air_quality::state::high%]", "moderate": "[%key:component::accuweather::entity::sensor::grass_pollen::state_attributes::level::state::moderate%]",
"low": "[%key:component::accuweather::entity::sensor::air_quality::state::low%]", "very_high": "[%key:common::state::very_high%]"
"moderate": "[%key:component::accuweather::entity::sensor::air_quality::state::moderate%]",
"unhealthy": "[%key:component::accuweather::entity::sensor::air_quality::state::unhealthy%]"
} }
} }
} }
@ -222,6 +229,14 @@
} }
} }
}, },
"exceptions": {
"current_conditions_update_error": {
"message": "An error occurred while retrieving weather current conditions data from the AccuWeather API: {error}"
},
"forecast_update_error": {
"message": "An error occurred while retrieving weather forecast data from the AccuWeather API: {error}"
}
},
"system_health": { "system_health": {
"info": { "info": {
"can_reach_server": "Reach AccuWeather server", "can_reach_server": "Reach AccuWeather server",

View File

@ -5,14 +5,14 @@
"data": { "data": {
"connection_type": "Select connection type" "connection_type": "Select connection type"
}, },
"description": "Select connection type. Local requires heaters with bluetooth" "description": "Select connection type. Local requires heaters with Bluetooth"
}, },
"local": { "local": {
"data": { "data": {
"wifi_ssid": "Wi-Fi SSID", "wifi_ssid": "Wi-Fi SSID",
"wifi_pswd": "Wi-Fi Password" "wifi_pswd": "Wi-Fi password"
}, },
"description": "Reset the heater by pressing + and OK until display shows 'Reset'. Then press and hold OK button on the heater until the blue led starts blinking before pressing Submit. Configuring heater might take some minutes." "description": "Reset the heater by pressing + and OK until display shows 'Reset'. Then press and hold OK button on the heater until the blue LED starts blinking before pressing Submit. Configuring heater might take some minutes."
}, },
"cloud": { "cloud": {
"data": { "data": {

View File

@ -7,7 +7,7 @@ from dataclasses import dataclass
from adguardhome import AdGuardHome, AdGuardHomeConnectionError from adguardhome import AdGuardHome, AdGuardHomeConnectionError
import voluptuous as vol import voluptuous as vol
from homeassistant.config_entries import ConfigEntry, ConfigEntryState from homeassistant.config_entries import ConfigEntry
from homeassistant.const import ( from homeassistant.const import (
CONF_HOST, CONF_HOST,
CONF_NAME, CONF_NAME,
@ -123,12 +123,7 @@ async def async_setup_entry(hass: HomeAssistant, entry: AdGuardConfigEntry) -> b
async def async_unload_entry(hass: HomeAssistant, entry: AdGuardConfigEntry) -> bool: async def async_unload_entry(hass: HomeAssistant, entry: AdGuardConfigEntry) -> bool:
"""Unload AdGuard Home config entry.""" """Unload AdGuard Home config entry."""
unload_ok = await hass.config_entries.async_unload_platforms(entry, PLATFORMS) unload_ok = await hass.config_entries.async_unload_platforms(entry, PLATFORMS)
loaded_entries = [ if not hass.config_entries.async_loaded_entries(DOMAIN):
entry
for entry in hass.config_entries.async_entries(DOMAIN)
if entry.state == ConfigEntryState.LOADED
]
if len(loaded_entries) == 1:
# This is the last loaded instance of AdGuard, deregister any services # This is the last loaded instance of AdGuard, deregister any services
hass.services.async_remove(DOMAIN, SERVICE_ADD_URL) hass.services.async_remove(DOMAIN, SERVICE_ADD_URL)
hass.services.async_remove(DOMAIN, SERVICE_REMOVE_URL) hass.services.async_remove(DOMAIN, SERVICE_REMOVE_URL)

View File

@ -2,6 +2,7 @@
from __future__ import annotations from __future__ import annotations
from decimal import Decimal
import logging import logging
from typing import Any from typing import Any
@ -14,6 +15,7 @@ from homeassistant.components.climate import (
FAN_MEDIUM, FAN_MEDIUM,
ClimateEntity, ClimateEntity,
ClimateEntityFeature, ClimateEntityFeature,
HVACAction,
HVACMode, HVACMode,
) )
from homeassistant.const import ATTR_TEMPERATURE, PRECISION_WHOLE, UnitOfTemperature from homeassistant.const import ATTR_TEMPERATURE, PRECISION_WHOLE, UnitOfTemperature
@ -49,6 +51,14 @@ ADVANTAGE_AIR_MYTEMP_ENABLED = "climateControlModeEnabled"
ADVANTAGE_AIR_HEAT_TARGET = "myAutoHeatTargetTemp" ADVANTAGE_AIR_HEAT_TARGET = "myAutoHeatTargetTemp"
ADVANTAGE_AIR_COOL_TARGET = "myAutoCoolTargetTemp" ADVANTAGE_AIR_COOL_TARGET = "myAutoCoolTargetTemp"
ADVANTAGE_AIR_MYFAN = "autoAA" ADVANTAGE_AIR_MYFAN = "autoAA"
ADVANTAGE_AIR_MYAUTO_MODE_SET = "myAutoModeCurrentSetMode"
HVAC_ACTIONS = {
"cool": HVACAction.COOLING,
"heat": HVACAction.HEATING,
"vent": HVACAction.FAN,
"dry": HVACAction.DRYING,
}
HVAC_MODES = [ HVAC_MODES = [
HVACMode.OFF, HVACMode.OFF,
@ -175,6 +185,17 @@ class AdvantageAirAC(AdvantageAirAcEntity, ClimateEntity):
return ADVANTAGE_AIR_HVAC_MODES.get(self._ac["mode"]) return ADVANTAGE_AIR_HVAC_MODES.get(self._ac["mode"])
return HVACMode.OFF return HVACMode.OFF
@property
def hvac_action(self) -> HVACAction | None:
"""Return the current running HVAC action."""
if self._ac["state"] == ADVANTAGE_AIR_STATE_OFF:
return HVACAction.OFF
if self._ac["mode"] == "myauto":
return HVAC_ACTIONS.get(
self._ac.get(ADVANTAGE_AIR_MYAUTO_MODE_SET, HVACAction.OFF)
)
return HVAC_ACTIONS.get(self._ac["mode"])
@property @property
def fan_mode(self) -> str | None: def fan_mode(self) -> str | None:
"""Return the current fan modes.""" """Return the current fan modes."""
@ -273,6 +294,22 @@ class AdvantageAirZone(AdvantageAirZoneEntity, ClimateEntity):
return HVACMode.HEAT_COOL return HVACMode.HEAT_COOL
return HVACMode.OFF return HVACMode.OFF
@property
def hvac_action(self) -> HVACAction | None:
"""Return the HVAC action, inheriting from master AC if zone is open but idle if air is <= 5%."""
if self._ac["state"] == ADVANTAGE_AIR_STATE_OFF:
return HVACAction.OFF
master_action = HVAC_ACTIONS.get(self._ac["mode"], HVACAction.OFF)
if self._ac["mode"] == "myauto":
master_action = HVAC_ACTIONS.get(
str(self._ac.get(ADVANTAGE_AIR_MYAUTO_MODE_SET)), HVACAction.OFF
)
if self._zone["state"] == ADVANTAGE_AIR_STATE_OPEN:
if self._zone["value"] <= Decimal(5):
return HVACAction.IDLE
return master_action
return HVACAction.OFF
@property @property
def current_temperature(self) -> float | None: def current_temperature(self) -> float | None:
"""Return the current temperature.""" """Return the current temperature."""

View File

@ -7,3 +7,4 @@ ADVANTAGE_AIR_STATE_CLOSE = "close"
ADVANTAGE_AIR_STATE_ON = "on" ADVANTAGE_AIR_STATE_ON = "on"
ADVANTAGE_AIR_STATE_OFF = "off" ADVANTAGE_AIR_STATE_OFF = "off"
ADVANTAGE_AIR_AUTOFAN_ENABLED = "aaAutoFanModeEnabled" ADVANTAGE_AIR_AUTOFAN_ENABLED = "aaAutoFanModeEnabled"
ADVANTAGE_AIR_NIGHT_MODE_ENABLED = "quietNightModeEnabled"

View File

@ -41,7 +41,7 @@ async def async_setup_entry(
entities.append( entities.append(
AdvantageAirThingCover(instance, thing, CoverDeviceClass.BLIND) AdvantageAirThingCover(instance, thing, CoverDeviceClass.BLIND)
) )
elif thing["channelDipState"] == 3: # 3 = "Garage door" elif thing["channelDipState"] in [3, 10]: # 3 & 10 = "Garage door"
entities.append( entities.append(
AdvantageAirThingCover(instance, thing, CoverDeviceClass.GARAGE) AdvantageAirThingCover(instance, thing, CoverDeviceClass.GARAGE)
) )

View File

@ -9,6 +9,7 @@ from homeassistant.helpers.entity_platform import AddConfigEntryEntitiesCallback
from . import AdvantageAirDataConfigEntry from . import AdvantageAirDataConfigEntry
from .const import ( from .const import (
ADVANTAGE_AIR_AUTOFAN_ENABLED, ADVANTAGE_AIR_AUTOFAN_ENABLED,
ADVANTAGE_AIR_NIGHT_MODE_ENABLED,
ADVANTAGE_AIR_STATE_OFF, ADVANTAGE_AIR_STATE_OFF,
ADVANTAGE_AIR_STATE_ON, ADVANTAGE_AIR_STATE_ON,
) )
@ -32,6 +33,8 @@ async def async_setup_entry(
entities.append(AdvantageAirFreshAir(instance, ac_key)) entities.append(AdvantageAirFreshAir(instance, ac_key))
if ADVANTAGE_AIR_AUTOFAN_ENABLED in ac_device["info"]: if ADVANTAGE_AIR_AUTOFAN_ENABLED in ac_device["info"]:
entities.append(AdvantageAirMyFan(instance, ac_key)) entities.append(AdvantageAirMyFan(instance, ac_key))
if ADVANTAGE_AIR_NIGHT_MODE_ENABLED in ac_device["info"]:
entities.append(AdvantageAirNightMode(instance, ac_key))
if things := instance.coordinator.data.get("myThings"): if things := instance.coordinator.data.get("myThings"):
entities.extend( entities.extend(
AdvantageAirRelay(instance, thing) AdvantageAirRelay(instance, thing)
@ -93,6 +96,32 @@ class AdvantageAirMyFan(AdvantageAirAcEntity, SwitchEntity):
await self.async_update_ac({ADVANTAGE_AIR_AUTOFAN_ENABLED: False}) await self.async_update_ac({ADVANTAGE_AIR_AUTOFAN_ENABLED: False})
class AdvantageAirNightMode(AdvantageAirAcEntity, SwitchEntity):
"""Representation of Advantage 'MySleep$aver' Mode control."""
_attr_icon = "mdi:weather-night"
_attr_name = "MySleep$aver"
_attr_device_class = SwitchDeviceClass.SWITCH
def __init__(self, instance: AdvantageAirData, ac_key: str) -> None:
"""Initialize an Advantage Air Night Mode control."""
super().__init__(instance, ac_key)
self._attr_unique_id += "-nightmode"
@property
def is_on(self) -> bool:
"""Return the Night Mode status."""
return self._ac[ADVANTAGE_AIR_NIGHT_MODE_ENABLED]
async def async_turn_on(self, **kwargs: Any) -> None:
"""Turn Night Mode on."""
await self.async_update_ac({ADVANTAGE_AIR_NIGHT_MODE_ENABLED: True})
async def async_turn_off(self, **kwargs: Any) -> None:
"""Turn Night Mode off."""
await self.async_update_ac({ADVANTAGE_AIR_NIGHT_MODE_ENABLED: False})
class AdvantageAirRelay(AdvantageAirThingEntity, SwitchEntity): class AdvantageAirRelay(AdvantageAirThingEntity, SwitchEntity):
"""Representation of Advantage Air Thing.""" """Representation of Advantage Air Thing."""

View File

@ -51,7 +51,7 @@
"issues": { "issues": {
"deprecated_yaml_import_issue_cannot_connect": { "deprecated_yaml_import_issue_cannot_connect": {
"title": "The {integration_title} YAML configuration import failed", "title": "The {integration_title} YAML configuration import failed",
"description": "Configuring {integration_title} using YAML is being removed but there was an connection error importing your YAML configuration.\n\nEnsure connection to {integration_title} works and restart Home Assistant to try again or remove the {integration_title} YAML configuration from your configuration.yaml file and continue to [set up the integration]({url}) manually." "description": "Configuring {integration_title} using YAML is being removed but there was a connection error importing your YAML configuration.\n\nEnsure connection to {integration_title} works and restart Home Assistant to try again or remove the {integration_title} YAML configuration from your configuration.yaml file and continue to [set up the integration]({url}) manually."
} }
} }
} }

View File

@ -6,6 +6,6 @@
"documentation": "https://www.home-assistant.io/integrations/airgradient", "documentation": "https://www.home-assistant.io/integrations/airgradient",
"integration_type": "device", "integration_type": "device",
"iot_class": "local_polling", "iot_class": "local_polling",
"requirements": ["airgradient==0.9.1"], "requirements": ["airgradient==0.9.2"],
"zeroconf": ["_airgradient._tcp.local."] "zeroconf": ["_airgradient._tcp.local."]
} }

View File

@ -11,7 +11,7 @@
} }
}, },
"discovery_confirm": { "discovery_confirm": {
"description": "Do you want to setup {model}?" "description": "Do you want to set up {model}?"
} }
}, },
"abort": { "abort": {
@ -68,8 +68,8 @@
"led_bar_mode": { "led_bar_mode": {
"name": "LED bar mode", "name": "LED bar mode",
"state": { "state": {
"off": "Off", "off": "[%key:common::state::off%]",
"co2": "Carbon dioxide", "co2": "[%key:component::sensor::entity_component::carbon_dioxide::name%]",
"pm": "Particulate matter" "pm": "Particulate matter"
} }
}, },
@ -143,8 +143,8 @@
"led_bar_mode": { "led_bar_mode": {
"name": "[%key:component::airgradient::entity::select::led_bar_mode::name%]", "name": "[%key:component::airgradient::entity::select::led_bar_mode::name%]",
"state": { "state": {
"off": "[%key:component::airgradient::entity::select::led_bar_mode::state::off%]", "off": "[%key:common::state::off%]",
"co2": "[%key:component::airgradient::entity::select::led_bar_mode::state::co2%]", "co2": "[%key:component::sensor::entity_component::carbon_dioxide::name%]",
"pm": "[%key:component::airgradient::entity::select::led_bar_mode::state::pm%]" "pm": "[%key:component::airgradient::entity::select::led_bar_mode::state::pm%]"
} }
}, },

View File

@ -105,7 +105,14 @@ class AirlyDataUpdateCoordinator(DataUpdateCoordinator[dict[str, str | float | i
try: try:
await measurements.update() await measurements.update()
except (AirlyError, ClientConnectorError) as error: except (AirlyError, ClientConnectorError) as error:
raise UpdateFailed(error) from error raise UpdateFailed(
translation_domain=DOMAIN,
translation_key="update_error",
translation_placeholders={
"entry": self.config_entry.title,
"error": repr(error),
},
) from error
_LOGGER.debug( _LOGGER.debug(
"Requests remaining: %s/%s", "Requests remaining: %s/%s",
@ -126,7 +133,11 @@ class AirlyDataUpdateCoordinator(DataUpdateCoordinator[dict[str, str | float | i
standards = measurements.current["standards"] standards = measurements.current["standards"]
if index["description"] == NO_AIRLY_SENSORS: if index["description"] == NO_AIRLY_SENSORS:
raise UpdateFailed("Can't retrieve data: no Airly sensors in this area") raise UpdateFailed(
translation_domain=DOMAIN,
translation_key="no_station",
translation_placeholders={"entry": self.config_entry.title},
)
for value in values: for value in values:
data[value["name"]] = value["value"] data[value["name"]] = value["value"]
for standard in standards: for standard in standards:

View File

@ -36,5 +36,13 @@
"name": "[%key:component::sensor::entity_component::carbon_monoxide::name%]" "name": "[%key:component::sensor::entity_component::carbon_monoxide::name%]"
} }
} }
},
"exceptions": {
"update_error": {
"message": "An error occurred while retrieving data from the Airly API for {entry}: {error}"
},
"no_station": {
"message": "An error occurred while retrieving data from the Airly API for {entry}: no measuring stations in this area"
}
} }
} }

View File

@ -8,7 +8,7 @@ from aiohttp import ClientSession
from aiohttp.client_exceptions import ClientConnectorError from aiohttp.client_exceptions import ClientConnectorError
from pyairnow import WebServiceAPI from pyairnow import WebServiceAPI
from pyairnow.conv import aqi_to_concentration from pyairnow.conv import aqi_to_concentration
from pyairnow.errors import AirNowError from pyairnow.errors import AirNowError, InvalidJsonError
from homeassistant.config_entries import ConfigEntry from homeassistant.config_entries import ConfigEntry
from homeassistant.core import HomeAssistant from homeassistant.core import HomeAssistant
@ -79,7 +79,7 @@ class AirNowDataUpdateCoordinator(DataUpdateCoordinator[dict[str, Any]]):
distance=self.distance, distance=self.distance,
) )
except (AirNowError, ClientConnectorError) as error: except (AirNowError, ClientConnectorError, InvalidJsonError) as error:
raise UpdateFailed(error) from error raise UpdateFailed(error) from error
if not obs: if not obs:

View File

@ -7,7 +7,7 @@
"api_key": "[%key:common::config_flow::data::api_key%]", "api_key": "[%key:common::config_flow::data::api_key%]",
"latitude": "[%key:common::config_flow::data::latitude%]", "latitude": "[%key:common::config_flow::data::latitude%]",
"longitude": "[%key:common::config_flow::data::longitude%]", "longitude": "[%key:common::config_flow::data::longitude%]",
"radius": "Station Radius (miles; optional)" "radius": "Station radius (miles; optional)"
} }
} }
}, },
@ -25,7 +25,7 @@
"step": { "step": {
"init": { "init": {
"data": { "data": {
"radius": "Station Radius (miles)" "radius": "Station radius (miles)"
} }
} }
} }

View File

@ -83,6 +83,7 @@ class AirQConfigFlow(ConfigFlow, domain=DOMAIN):
await self.async_set_unique_id(device_info["id"]) await self.async_set_unique_id(device_info["id"])
self._abort_if_unique_id_configured() self._abort_if_unique_id_configured()
_LOGGER.debug("Creating an entry for %s", device_info["name"])
return self.async_create_entry(title=device_info["name"], data=user_input) return self.async_create_entry(title=device_info["name"], data=user_input)
return self.async_show_form( return self.async_show_form(

View File

@ -5,7 +5,7 @@ from __future__ import annotations
from datetime import timedelta from datetime import timedelta
import logging import logging
from aioairq import AirQ from aioairq.core import AirQ, identify_warming_up_sensors
from homeassistant.config_entries import ConfigEntry from homeassistant.config_entries import ConfigEntry
from homeassistant.const import CONF_IP_ADDRESS, CONF_PASSWORD from homeassistant.const import CONF_IP_ADDRESS, CONF_PASSWORD
@ -55,6 +55,9 @@ class AirQCoordinator(DataUpdateCoordinator):
async def _async_update_data(self) -> dict: async def _async_update_data(self) -> dict:
"""Fetch the data from the device.""" """Fetch the data from the device."""
if "name" not in self.device_info: if "name" not in self.device_info:
_LOGGER.debug(
"'name' not found in AirQCoordinator.device_info, fetching from the device"
)
info = await self.airq.fetch_device_info() info = await self.airq.fetch_device_info()
self.device_info.update( self.device_info.update(
DeviceInfo( DeviceInfo(
@ -64,7 +67,16 @@ class AirQCoordinator(DataUpdateCoordinator):
hw_version=info["hw_version"], hw_version=info["hw_version"],
) )
) )
return await self.airq.get_latest_data( # type: ignore[no-any-return] _LOGGER.debug(
"Updated AirQCoordinator.device_info for 'name' %s",
self.device_info.get("name"),
)
data: dict = await self.airq.get_latest_data(
return_average=self.return_average, return_average=self.return_average,
clip_negative_values=self.clip_negative, clip_negative_values=self.clip_negative,
) )
if warming_up_sensors := identify_warming_up_sensors(data):
_LOGGER.debug(
"Following sensors are still warming up: %s", warming_up_sensors
)
return data

View File

@ -91,7 +91,7 @@
"name": "Hydrogen fluoride" "name": "Hydrogen fluoride"
}, },
"health_index": { "health_index": {
"name": "Health Index" "name": "Health index"
}, },
"absolute_humidity": { "absolute_humidity": {
"name": "Absolute humidity" "name": "Absolute humidity"
@ -112,10 +112,10 @@
"name": "Oxygen" "name": "Oxygen"
}, },
"performance_index": { "performance_index": {
"name": "Performance Index" "name": "Performance index"
}, },
"hydrogen_phosphide": { "hydrogen_phosphide": {
"name": "Hydrogen Phosphide" "name": "Hydrogen phosphide"
}, },
"relative_pressure": { "relative_pressure": {
"name": "Relative pressure" "name": "Relative pressure"
@ -127,22 +127,22 @@
"name": "Refrigerant" "name": "Refrigerant"
}, },
"silicon_hydride": { "silicon_hydride": {
"name": "Silicon Hydride" "name": "Silicon hydride"
}, },
"noise": { "noise": {
"name": "Noise" "name": "Noise"
}, },
"maximum_noise": { "maximum_noise": {
"name": "Noise (Maximum)" "name": "Noise (maximum)"
}, },
"radon": { "radon": {
"name": "Radon" "name": "Radon"
}, },
"industrial_volatile_organic_compounds": { "industrial_volatile_organic_compounds": {
"name": "VOCs (Industrial)" "name": "VOCs (industrial)"
}, },
"virus_index": { "virus_index": {
"name": "Virus Index" "name": "Virus index"
} }
} }
} }

View File

@ -102,7 +102,8 @@ class AirthingsConfigFlow(ConfigFlow, domain=DOMAIN):
device = await self._get_device_data(discovery_info) device = await self._get_device_data(discovery_info)
except AirthingsDeviceUpdateError: except AirthingsDeviceUpdateError:
return self.async_abort(reason="cannot_connect") return self.async_abort(reason="cannot_connect")
except Exception: # noqa: BLE001 except Exception:
_LOGGER.exception("Unknown error occurred")
return self.async_abort(reason="unknown") return self.async_abort(reason="unknown")
name = get_name(device) name = get_name(device)
@ -160,7 +161,8 @@ class AirthingsConfigFlow(ConfigFlow, domain=DOMAIN):
device = await self._get_device_data(discovery_info) device = await self._get_device_data(discovery_info)
except AirthingsDeviceUpdateError: except AirthingsDeviceUpdateError:
return self.async_abort(reason="cannot_connect") return self.async_abort(reason="cannot_connect")
except Exception: # noqa: BLE001 except Exception:
_LOGGER.exception("Unknown error occurred")
return self.async_abort(reason="unknown") return self.async_abort(reason="unknown")
name = get_name(device) name = get_name(device)
self._discovered_devices[address] = Discovery(name, discovery_info, device) self._discovered_devices[address] = Discovery(name, discovery_info, device)

View File

@ -32,7 +32,8 @@ class AirTouch5ConfigFlow(ConfigFlow, domain=DOMAIN):
client = Airtouch5SimpleClient(user_input[CONF_HOST]) client = Airtouch5SimpleClient(user_input[CONF_HOST])
try: try:
await client.test_connection() await client.test_connection()
except Exception: # noqa: BLE001 except Exception:
_LOGGER.exception("Unexpected exception")
errors = {"base": "cannot_connect"} errors = {"base": "cannot_connect"}
else: else:
await self.async_set_unique_id(user_input[CONF_HOST]) await self.async_set_unique_id(user_input[CONF_HOST])

View File

@ -2,7 +2,7 @@
"config": { "config": {
"step": { "step": {
"geography_by_coords": { "geography_by_coords": {
"title": "Configure a Geography", "title": "Configure a geography",
"description": "Use the AirVisual cloud API to monitor a latitude/longitude.", "description": "Use the AirVisual cloud API to monitor a latitude/longitude.",
"data": { "data": {
"api_key": "[%key:common::config_flow::data::api_key%]", "api_key": "[%key:common::config_flow::data::api_key%]",
@ -16,8 +16,8 @@
"data": { "data": {
"api_key": "[%key:common::config_flow::data::api_key%]", "api_key": "[%key:common::config_flow::data::api_key%]",
"city": "City", "city": "City",
"country": "Country", "state": "State",
"state": "State" "country": "[%key:common::config_flow::data::country%]"
} }
}, },
"reauth_confirm": { "reauth_confirm": {
@ -56,12 +56,12 @@
"sensor": { "sensor": {
"pollutant_label": { "pollutant_label": {
"state": { "state": {
"co": "Carbon Monoxide", "co": "[%key:component::sensor::entity_component::carbon_monoxide::name%]",
"n2": "Nitrogen Dioxide", "n2": "[%key:component::sensor::entity_component::nitrogen_dioxide::name%]",
"o3": "Ozone", "o3": "[%key:component::sensor::entity_component::ozone::name%]",
"p1": "PM10", "p1": "[%key:component::sensor::entity_component::pm10::name%]",
"p2": "PM2.5", "p2": "[%key:component::sensor::entity_component::pm25::name%]",
"s2": "Sulfur Dioxide" "s2": "[%key:component::sensor::entity_component::sulphur_dioxide::name%]"
} }
}, },
"pollutant_level": { "pollutant_level": {

View File

@ -11,5 +11,5 @@
"documentation": "https://www.home-assistant.io/integrations/airzone", "documentation": "https://www.home-assistant.io/integrations/airzone",
"iot_class": "local_polling", "iot_class": "local_polling",
"loggers": ["aioairzone"], "loggers": ["aioairzone"],
"requirements": ["aioairzone==0.9.9"] "requirements": ["aioairzone==1.0.0"]
} }

View File

@ -9,6 +9,8 @@ from aioairzone.const import (
AZD_HUMIDITY, AZD_HUMIDITY,
AZD_TEMP, AZD_TEMP,
AZD_TEMP_UNIT, AZD_TEMP_UNIT,
AZD_THERMOSTAT_BATTERY,
AZD_THERMOSTAT_SIGNAL,
AZD_WEBSERVER, AZD_WEBSERVER,
AZD_WIFI_RSSI, AZD_WIFI_RSSI,
AZD_ZONES, AZD_ZONES,
@ -73,6 +75,20 @@ ZONE_SENSOR_TYPES: Final[tuple[SensorEntityDescription, ...]] = (
native_unit_of_measurement=PERCENTAGE, native_unit_of_measurement=PERCENTAGE,
state_class=SensorStateClass.MEASUREMENT, state_class=SensorStateClass.MEASUREMENT,
), ),
SensorEntityDescription(
device_class=SensorDeviceClass.BATTERY,
key=AZD_THERMOSTAT_BATTERY,
native_unit_of_measurement=PERCENTAGE,
state_class=SensorStateClass.MEASUREMENT,
),
SensorEntityDescription(
entity_category=EntityCategory.DIAGNOSTIC,
entity_registry_enabled_default=False,
key=AZD_THERMOSTAT_SIGNAL,
native_unit_of_measurement=PERCENTAGE,
state_class=SensorStateClass.MEASUREMENT,
translation_key="thermostat_signal",
),
) )

View File

@ -76,6 +76,9 @@
"sensor": { "sensor": {
"rssi": { "rssi": {
"name": "RSSI" "name": "RSSI"
},
"thermostat_signal": {
"name": "Signal strength"
} }
} }
} }

View File

@ -6,5 +6,5 @@
"documentation": "https://www.home-assistant.io/integrations/airzone_cloud", "documentation": "https://www.home-assistant.io/integrations/airzone_cloud",
"iot_class": "cloud_push", "iot_class": "cloud_push",
"loggers": ["aioairzone_cloud"], "loggers": ["aioairzone_cloud"],
"requirements": ["aioairzone-cloud==0.6.10"] "requirements": ["aioairzone-cloud==0.6.11"]
} }

View File

@ -32,9 +32,9 @@
"air_quality": { "air_quality": {
"name": "Air Quality mode", "name": "Air Quality mode",
"state": { "state": {
"off": "Off", "off": "[%key:common::state::off%]",
"on": "On", "on": "[%key:common::state::on%]",
"auto": "Auto" "auto": "[%key:common::state::auto%]"
} }
}, },
"modes": { "modes": {

View File

@ -2,7 +2,7 @@
from __future__ import annotations from __future__ import annotations
from homeassistant.config_entries import ConfigEntry, ConfigEntryState from homeassistant.config_entries import ConfigEntry
from homeassistant.core import HomeAssistant from homeassistant.core import HomeAssistant
from homeassistant.helpers import issue_registry as ir from homeassistant.helpers import issue_registry as ir
@ -28,11 +28,13 @@ async def async_setup_entry(hass: HomeAssistant, _: ConfigEntry) -> bool:
async def async_unload_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool: async def async_unload_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool:
"""Unload a config entry.""" """Unload a config entry."""
if all(
config_entry.state is ConfigEntryState.NOT_LOADED
for config_entry in hass.config_entries.async_entries(DOMAIN)
if config_entry.entry_id != entry.entry_id
):
ir.async_delete_issue(hass, DOMAIN, DOMAIN)
return True return True
async def async_remove_entry(hass: HomeAssistant, entry: ConfigEntry) -> None:
"""Remove a config entry."""
if not hass.config_entries.async_loaded_entries(DOMAIN):
ir.async_delete_issue(hass, DOMAIN, DOMAIN)
# Remove any remaining disabled or ignored entries
for _entry in hass.config_entries.async_entries(DOMAIN):
hass.async_create_task(hass.config_entries.async_remove(_entry.entry_id))

View File

@ -7,5 +7,5 @@
"integration_type": "device", "integration_type": "device",
"iot_class": "local_push", "iot_class": "local_push",
"loggers": ["adext", "alarmdecoder"], "loggers": ["adext", "alarmdecoder"],
"requirements": ["adext==0.4.3"] "requirements": ["adext==0.4.4"]
} }

View File

@ -14,7 +14,7 @@ from homeassistant.components.notify import (
) )
from homeassistant.const import STATE_IDLE, STATE_OFF, STATE_ON from homeassistant.const import STATE_IDLE, STATE_OFF, STATE_ON
from homeassistant.core import Event, EventStateChangedData, HassJob, HomeAssistant from homeassistant.core import Event, EventStateChangedData, HassJob, HomeAssistant
from homeassistant.exceptions import ServiceNotFound from homeassistant.exceptions import ServiceNotFound, ServiceValidationError
from homeassistant.helpers.entity import Entity from homeassistant.helpers.entity import Entity
from homeassistant.helpers.event import ( from homeassistant.helpers.event import (
async_track_point_in_time, async_track_point_in_time,
@ -195,7 +195,8 @@ class AlertEntity(Entity):
async def async_turn_off(self, **kwargs: Any) -> None: async def async_turn_off(self, **kwargs: Any) -> None:
"""Async Acknowledge alert.""" """Async Acknowledge alert."""
LOGGER.debug("Acknowledged Alert: %s", self._attr_name) if not self._can_ack:
raise ServiceValidationError("This alert cannot be acknowledged")
self._ack = True self._ack = True
self.async_write_ha_state() self.async_write_ha_state()

View File

@ -1438,7 +1438,7 @@ class AlexaModeController(AlexaCapability):
# Fan preset_mode # Fan preset_mode
if self.instance == f"{fan.DOMAIN}.{fan.ATTR_PRESET_MODE}": if self.instance == f"{fan.DOMAIN}.{fan.ATTR_PRESET_MODE}":
mode = self.entity.attributes.get(fan.ATTR_PRESET_MODE, None) mode = self.entity.attributes.get(fan.ATTR_PRESET_MODE, None)
if mode in self.entity.attributes.get(fan.ATTR_PRESET_MODES, None): if mode in self.entity.attributes.get(fan.ATTR_PRESET_MODES, ()):
return f"{fan.ATTR_PRESET_MODE}.{mode}" return f"{fan.ATTR_PRESET_MODE}.{mode}"
# Humidifier mode # Humidifier mode

View File

@ -6,5 +6,5 @@
"iot_class": "cloud_push", "iot_class": "cloud_push",
"loggers": ["boto3", "botocore", "s3transfer"], "loggers": ["boto3", "botocore", "s3transfer"],
"quality_scale": "legacy", "quality_scale": "legacy",
"requirements": ["boto3==1.34.131"] "requirements": ["boto3==1.37.1"]
} }

View File

@ -239,6 +239,8 @@ SENSOR_DESCRIPTIONS = (
native_unit_of_measurement=DEGREE, native_unit_of_measurement=DEGREE,
suggested_display_precision=0, suggested_display_precision=0,
entity_registry_enabled_default=False, entity_registry_enabled_default=False,
device_class=SensorDeviceClass.WIND_DIRECTION,
state_class=SensorStateClass.MEASUREMENT_ANGLE,
), ),
SensorEntityDescription( SensorEntityDescription(
key=TYPE_WINDGUSTMPH, key=TYPE_WINDGUSTMPH,

View File

@ -608,21 +608,26 @@ SENSOR_DESCRIPTIONS = (
key=TYPE_WINDDIR, key=TYPE_WINDDIR,
translation_key="wind_direction", translation_key="wind_direction",
native_unit_of_measurement=DEGREE, native_unit_of_measurement=DEGREE,
device_class=SensorDeviceClass.WIND_DIRECTION,
state_class=SensorStateClass.MEASUREMENT_ANGLE,
), ),
SensorEntityDescription( SensorEntityDescription(
key=TYPE_WINDDIR_AVG10M, key=TYPE_WINDDIR_AVG10M,
translation_key="wind_direction_average_10m", translation_key="wind_direction_average_10m",
native_unit_of_measurement=DEGREE, native_unit_of_measurement=DEGREE,
device_class=SensorDeviceClass.WIND_DIRECTION,
), ),
SensorEntityDescription( SensorEntityDescription(
key=TYPE_WINDDIR_AVG2M, key=TYPE_WINDDIR_AVG2M,
translation_key="wind_direction_average_2m", translation_key="wind_direction_average_2m",
native_unit_of_measurement=DEGREE, native_unit_of_measurement=DEGREE,
device_class=SensorDeviceClass.WIND_DIRECTION,
), ),
SensorEntityDescription( SensorEntityDescription(
key=TYPE_WINDGUSTDIR, key=TYPE_WINDGUSTDIR,
translation_key="wind_gust_direction", translation_key="wind_gust_direction",
native_unit_of_measurement=DEGREE, native_unit_of_measurement=DEGREE,
device_class=SensorDeviceClass.WIND_DIRECTION,
), ),
SensorEntityDescription( SensorEntityDescription(
key=TYPE_WINDGUSTMPH, key=TYPE_WINDGUSTMPH,

View File

@ -8,7 +8,7 @@ from python_homeassistant_analytics import (
HomeassistantAnalyticsClient, HomeassistantAnalyticsClient,
HomeassistantAnalyticsConnectionError, HomeassistantAnalyticsConnectionError,
) )
from python_homeassistant_analytics.models import IntegrationType from python_homeassistant_analytics.models import Environment, IntegrationType
import voluptuous as vol import voluptuous as vol
from homeassistant.config_entries import ConfigFlow, ConfigFlowResult, OptionsFlow from homeassistant.config_entries import ConfigFlow, ConfigFlowResult, OptionsFlow
@ -81,7 +81,7 @@ class HomeassistantAnalyticsConfigFlow(ConfigFlow, domain=DOMAIN):
) )
try: try:
addons = await client.get_addons() addons = await client.get_addons()
integrations = await client.get_integrations() integrations = await client.get_integrations(Environment.NEXT)
custom_integrations = await client.get_custom_integrations() custom_integrations = await client.get_custom_integrations()
except HomeassistantAnalyticsConnectionError: except HomeassistantAnalyticsConnectionError:
LOGGER.exception("Error connecting to Home Assistant analytics") LOGGER.exception("Error connecting to Home Assistant analytics")
@ -165,7 +165,7 @@ class HomeassistantAnalyticsOptionsFlowHandler(OptionsFlow):
) )
try: try:
addons = await client.get_addons() addons = await client.get_addons()
integrations = await client.get_integrations() integrations = await client.get_integrations(Environment.NEXT)
custom_integrations = await client.get_custom_integrations() custom_integrations = await client.get_custom_integrations()
except HomeassistantAnalyticsConnectionError: except HomeassistantAnalyticsConnectionError:
LOGGER.exception("Error connecting to Home Assistant analytics") LOGGER.exception("Error connecting to Home Assistant analytics")

View File

@ -5,5 +5,5 @@
"config_flow": true, "config_flow": true,
"documentation": "https://www.home-assistant.io/integrations/android_ip_webcam", "documentation": "https://www.home-assistant.io/integrations/android_ip_webcam",
"iot_class": "local_polling", "iot_class": "local_polling",
"requirements": ["pydroid-ipcam==2.0.0"] "requirements": ["pydroid-ipcam==3.0.0"]
} }

View File

@ -73,7 +73,7 @@ class AndroidTVRemoteBaseEntity(Entity):
self._api.send_key_command(key_code, direction) self._api.send_key_command(key_code, direction)
except ConnectionClosed as exc: except ConnectionClosed as exc:
raise HomeAssistantError( raise HomeAssistantError(
"Connection to Android TV device is closed" translation_domain=DOMAIN, translation_key="connection_closed"
) from exc ) from exc
def _send_launch_app_command(self, app_link: str) -> None: def _send_launch_app_command(self, app_link: str) -> None:
@ -85,5 +85,5 @@ class AndroidTVRemoteBaseEntity(Entity):
self._api.send_launch_app_command(app_link) self._api.send_launch_app_command(app_link)
except ConnectionClosed as exc: except ConnectionClosed as exc:
raise HomeAssistantError( raise HomeAssistantError(
"Connection to Android TV device is closed" translation_domain=DOMAIN, translation_key="connection_closed"
) from exc ) from exc

View File

@ -7,6 +7,6 @@
"integration_type": "device", "integration_type": "device",
"iot_class": "local_push", "iot_class": "local_push",
"loggers": ["androidtvremote2"], "loggers": ["androidtvremote2"],
"requirements": ["androidtvremote2==0.1.2"], "requirements": ["androidtvremote2==0.2.1"],
"zeroconf": ["_androidtvremote2._tcp.local."] "zeroconf": ["_androidtvremote2._tcp.local."]
} }

View File

@ -21,7 +21,7 @@ from homeassistant.exceptions import HomeAssistantError
from homeassistant.helpers.entity_platform import AddConfigEntryEntitiesCallback from homeassistant.helpers.entity_platform import AddConfigEntryEntitiesCallback
from . import AndroidTVRemoteConfigEntry from . import AndroidTVRemoteConfigEntry
from .const import CONF_APP_ICON, CONF_APP_NAME from .const import CONF_APP_ICON, CONF_APP_NAME, DOMAIN
from .entity import AndroidTVRemoteBaseEntity from .entity import AndroidTVRemoteBaseEntity
PARALLEL_UPDATES = 0 PARALLEL_UPDATES = 0
@ -233,5 +233,5 @@ class AndroidTVRemoteMediaPlayerEntity(AndroidTVRemoteBaseEntity, MediaPlayerEnt
await asyncio.sleep(delay_secs) await asyncio.sleep(delay_secs)
except ConnectionClosed as exc: except ConnectionClosed as exc:
raise HomeAssistantError( raise HomeAssistantError(
"Connection to Android TV device is closed" translation_domain=DOMAIN, translation_key="connection_closed"
) from exc ) from exc

View File

@ -54,5 +54,10 @@
} }
} }
} }
},
"exceptions": {
"connection_closed": {
"message": "Connection to the Android TV device is closed"
}
} }
} }

View File

@ -2,6 +2,8 @@
from __future__ import annotations from __future__ import annotations
import logging
from anova_wifi import AnovaApi, InvalidLogin from anova_wifi import AnovaApi, InvalidLogin
import voluptuous as vol import voluptuous as vol
@ -11,8 +13,10 @@ from homeassistant.helpers.aiohttp_client import async_get_clientsession
from .const import DOMAIN from .const import DOMAIN
_LOGGER = logging.getLogger(__name__)
class AnovaConfligFlow(ConfigFlow, domain=DOMAIN):
class AnovaConfigFlow(ConfigFlow, domain=DOMAIN):
"""Sets up a config flow for Anova.""" """Sets up a config flow for Anova."""
VERSION = 1 VERSION = 1
@ -35,7 +39,8 @@ class AnovaConfligFlow(ConfigFlow, domain=DOMAIN):
await api.authenticate() await api.authenticate()
except InvalidLogin: except InvalidLogin:
errors["base"] = "invalid_auth" errors["base"] = "invalid_auth"
except Exception: # noqa: BLE001 except Exception:
_LOGGER.exception("Unexpected exception")
errors["base"] = "unknown" errors["base"] = "unknown"
else: else:
return self.async_create_entry( return self.async_create_entry(

View File

@ -22,6 +22,7 @@ from . import AnthemavConfigEntry
from .const import ANTHEMAV_UPDATE_SIGNAL, DOMAIN, MANUFACTURER from .const import ANTHEMAV_UPDATE_SIGNAL, DOMAIN, MANUFACTURER
_LOGGER = logging.getLogger(__name__) _LOGGER = logging.getLogger(__name__)
VOLUME_STEP = 0.01
async def async_setup_entry( async def async_setup_entry(
@ -60,6 +61,7 @@ class AnthemAVR(MediaPlayerEntity):
| MediaPlayerEntityFeature.TURN_OFF | MediaPlayerEntityFeature.TURN_OFF
| MediaPlayerEntityFeature.SELECT_SOURCE | MediaPlayerEntityFeature.SELECT_SOURCE
) )
_attr_volume_step = VOLUME_STEP
def __init__( def __init__(
self, self,

View File

@ -10,7 +10,7 @@
}, },
"error": { "error": {
"cannot_connect": "[%key:common::config_flow::error::cannot_connect%]", "cannot_connect": "[%key:common::config_flow::error::cannot_connect%]",
"cannot_receive_deviceinfo": "Failed to retreive MAC Address. Make sure the device is turned on" "cannot_receive_deviceinfo": "Failed to retrieve MAC Address. Make sure the device is turned on"
}, },
"abort": { "abort": {
"already_configured": "[%key:common::config_flow::abort::already_configured_device%]" "already_configured": "[%key:common::config_flow::abort::already_configured_device%]"

View File

@ -2,6 +2,8 @@
from __future__ import annotations from __future__ import annotations
from functools import partial
import anthropic import anthropic
from homeassistant.config_entries import ConfigEntry from homeassistant.config_entries import ConfigEntry
@ -10,7 +12,7 @@ from homeassistant.core import HomeAssistant
from homeassistant.exceptions import ConfigEntryNotReady from homeassistant.exceptions import ConfigEntryNotReady
from homeassistant.helpers import config_validation as cv from homeassistant.helpers import config_validation as cv
from .const import DOMAIN, LOGGER from .const import CONF_CHAT_MODEL, DOMAIN, LOGGER, RECOMMENDED_CHAT_MODEL
PLATFORMS = (Platform.CONVERSATION,) PLATFORMS = (Platform.CONVERSATION,)
CONFIG_SCHEMA = cv.config_entry_only_config_schema(DOMAIN) CONFIG_SCHEMA = cv.config_entry_only_config_schema(DOMAIN)
@ -20,14 +22,13 @@ type AnthropicConfigEntry = ConfigEntry[anthropic.AsyncClient]
async def async_setup_entry(hass: HomeAssistant, entry: AnthropicConfigEntry) -> bool: async def async_setup_entry(hass: HomeAssistant, entry: AnthropicConfigEntry) -> bool:
"""Set up Anthropic from a config entry.""" """Set up Anthropic from a config entry."""
client = anthropic.AsyncAnthropic(api_key=entry.data[CONF_API_KEY]) client = await hass.async_add_executor_job(
partial(anthropic.AsyncAnthropic, api_key=entry.data[CONF_API_KEY])
)
try: try:
await client.messages.create( model_id = entry.options.get(CONF_CHAT_MODEL, RECOMMENDED_CHAT_MODEL)
model="claude-3-haiku-20240307", model = await client.models.retrieve(model_id=model_id, timeout=10.0)
max_tokens=1, LOGGER.debug("Anthropic model: %s", model.display_name)
messages=[{"role": "user", "content": "Hi"}],
timeout=10.0,
)
except anthropic.AuthenticationError as err: except anthropic.AuthenticationError as err:
LOGGER.error("Invalid API key: %s", err) LOGGER.error("Invalid API key: %s", err)
return False return False

View File

@ -2,6 +2,7 @@
from __future__ import annotations from __future__ import annotations
from functools import partial
import logging import logging
from types import MappingProxyType from types import MappingProxyType
from typing import Any from typing import Any
@ -33,10 +34,12 @@ from .const import (
CONF_PROMPT, CONF_PROMPT,
CONF_RECOMMENDED, CONF_RECOMMENDED,
CONF_TEMPERATURE, CONF_TEMPERATURE,
CONF_THINKING_BUDGET,
DOMAIN, DOMAIN,
RECOMMENDED_CHAT_MODEL, RECOMMENDED_CHAT_MODEL,
RECOMMENDED_MAX_TOKENS, RECOMMENDED_MAX_TOKENS,
RECOMMENDED_TEMPERATURE, RECOMMENDED_TEMPERATURE,
RECOMMENDED_THINKING_BUDGET,
) )
_LOGGER = logging.getLogger(__name__) _LOGGER = logging.getLogger(__name__)
@ -59,13 +62,10 @@ async def validate_input(hass: HomeAssistant, data: dict[str, Any]) -> None:
Data has the keys from STEP_USER_DATA_SCHEMA with values provided by the user. Data has the keys from STEP_USER_DATA_SCHEMA with values provided by the user.
""" """
client = anthropic.AsyncAnthropic(api_key=data[CONF_API_KEY]) client = await hass.async_add_executor_job(
await client.messages.create( partial(anthropic.AsyncAnthropic, api_key=data[CONF_API_KEY])
model="claude-3-haiku-20240307",
max_tokens=1,
messages=[{"role": "user", "content": "Hi"}],
timeout=10.0,
) )
await client.models.list(timeout=10.0)
class AnthropicConfigFlow(ConfigFlow, domain=DOMAIN): class AnthropicConfigFlow(ConfigFlow, domain=DOMAIN):
@ -130,21 +130,29 @@ class AnthropicOptionsFlow(OptionsFlow):
) -> ConfigFlowResult: ) -> ConfigFlowResult:
"""Manage the options.""" """Manage the options."""
options: dict[str, Any] | MappingProxyType[str, Any] = self.config_entry.options options: dict[str, Any] | MappingProxyType[str, Any] = self.config_entry.options
errors: dict[str, str] = {}
if user_input is not None: if user_input is not None:
if user_input[CONF_RECOMMENDED] == self.last_rendered_recommended: if user_input[CONF_RECOMMENDED] == self.last_rendered_recommended:
if user_input[CONF_LLM_HASS_API] == "none": if user_input[CONF_LLM_HASS_API] == "none":
user_input.pop(CONF_LLM_HASS_API) user_input.pop(CONF_LLM_HASS_API)
return self.async_create_entry(title="", data=user_input)
# Re-render the options again, now with the recommended options shown/hidden if user_input.get(
self.last_rendered_recommended = user_input[CONF_RECOMMENDED] CONF_THINKING_BUDGET, RECOMMENDED_THINKING_BUDGET
) >= user_input.get(CONF_MAX_TOKENS, RECOMMENDED_MAX_TOKENS):
errors[CONF_THINKING_BUDGET] = "thinking_budget_too_large"
options = { if not errors:
CONF_RECOMMENDED: user_input[CONF_RECOMMENDED], return self.async_create_entry(title="", data=user_input)
CONF_PROMPT: user_input[CONF_PROMPT], else:
CONF_LLM_HASS_API: user_input[CONF_LLM_HASS_API], # Re-render the options again, now with the recommended options shown/hidden
} self.last_rendered_recommended = user_input[CONF_RECOMMENDED]
options = {
CONF_RECOMMENDED: user_input[CONF_RECOMMENDED],
CONF_PROMPT: user_input[CONF_PROMPT],
CONF_LLM_HASS_API: user_input[CONF_LLM_HASS_API],
}
suggested_values = options.copy() suggested_values = options.copy()
if not suggested_values.get(CONF_PROMPT): if not suggested_values.get(CONF_PROMPT):
@ -158,6 +166,7 @@ class AnthropicOptionsFlow(OptionsFlow):
return self.async_show_form( return self.async_show_form(
step_id="init", step_id="init",
data_schema=schema, data_schema=schema,
errors=errors or None,
) )
@ -207,6 +216,10 @@ def anthropic_config_option_schema(
CONF_TEMPERATURE, CONF_TEMPERATURE,
default=RECOMMENDED_TEMPERATURE, default=RECOMMENDED_TEMPERATURE,
): NumberSelector(NumberSelectorConfig(min=0, max=1, step=0.05)), ): NumberSelector(NumberSelectorConfig(min=0, max=1, step=0.05)),
vol.Optional(
CONF_THINKING_BUDGET,
default=RECOMMENDED_THINKING_BUDGET,
): int,
} }
) )
return schema return schema

View File

@ -13,3 +13,8 @@ CONF_MAX_TOKENS = "max_tokens"
RECOMMENDED_MAX_TOKENS = 1024 RECOMMENDED_MAX_TOKENS = 1024
CONF_TEMPERATURE = "temperature" CONF_TEMPERATURE = "temperature"
RECOMMENDED_TEMPERATURE = 1.0 RECOMMENDED_TEMPERATURE = 1.0
CONF_THINKING_BUDGET = "thinking_budget"
RECOMMENDED_THINKING_BUDGET = 0
MIN_THINKING_BUDGET = 1024
THINKING_MODELS = ["claude-3-7-sonnet-20250219", "claude-3-7-sonnet-latest"]

View File

@ -1,23 +1,32 @@
"""Conversation support for Anthropic.""" """Conversation support for Anthropic."""
from collections.abc import AsyncGenerator, Callable from collections.abc import AsyncGenerator, Callable, Iterable
import json import json
from typing import Any, Literal from typing import Any, Literal, cast
import anthropic import anthropic
from anthropic import AsyncStream from anthropic import AsyncStream
from anthropic._types import NOT_GIVEN from anthropic._types import NOT_GIVEN
from anthropic.types import ( from anthropic.types import (
InputJSONDelta, InputJSONDelta,
Message,
MessageParam, MessageParam,
MessageStreamEvent, MessageStreamEvent,
RawContentBlockDeltaEvent, RawContentBlockDeltaEvent,
RawContentBlockStartEvent, RawContentBlockStartEvent,
RawContentBlockStopEvent, RawContentBlockStopEvent,
RawMessageStartEvent,
RawMessageStopEvent,
RedactedThinkingBlock,
RedactedThinkingBlockParam,
SignatureDelta,
TextBlock, TextBlock,
TextBlockParam, TextBlockParam,
TextDelta, TextDelta,
ThinkingBlock,
ThinkingBlockParam,
ThinkingConfigDisabledParam,
ThinkingConfigEnabledParam,
ThinkingDelta,
ToolParam, ToolParam,
ToolResultBlockParam, ToolResultBlockParam,
ToolUseBlock, ToolUseBlock,
@ -30,7 +39,7 @@ from homeassistant.config_entries import ConfigEntry
from homeassistant.const import CONF_LLM_HASS_API, MATCH_ALL from homeassistant.const import CONF_LLM_HASS_API, MATCH_ALL
from homeassistant.core import HomeAssistant from homeassistant.core import HomeAssistant
from homeassistant.exceptions import HomeAssistantError from homeassistant.exceptions import HomeAssistantError
from homeassistant.helpers import chat_session, device_registry as dr, intent, llm from homeassistant.helpers import device_registry as dr, intent, llm
from homeassistant.helpers.entity_platform import AddConfigEntryEntitiesCallback from homeassistant.helpers.entity_platform import AddConfigEntryEntitiesCallback
from . import AnthropicConfigEntry from . import AnthropicConfigEntry
@ -39,11 +48,15 @@ from .const import (
CONF_MAX_TOKENS, CONF_MAX_TOKENS,
CONF_PROMPT, CONF_PROMPT,
CONF_TEMPERATURE, CONF_TEMPERATURE,
CONF_THINKING_BUDGET,
DOMAIN, DOMAIN,
LOGGER, LOGGER,
MIN_THINKING_BUDGET,
RECOMMENDED_CHAT_MODEL, RECOMMENDED_CHAT_MODEL,
RECOMMENDED_MAX_TOKENS, RECOMMENDED_MAX_TOKENS,
RECOMMENDED_TEMPERATURE, RECOMMENDED_TEMPERATURE,
RECOMMENDED_THINKING_BUDGET,
THINKING_MODELS,
) )
# Max number of back and forth with the LLM to generate a response # Max number of back and forth with the LLM to generate a response
@ -71,73 +84,101 @@ def _format_tool(
) )
def _message_convert( def _convert_content(
message: Message, chat_content: Iterable[conversation.Content],
) -> MessageParam: ) -> list[MessageParam]:
"""Convert from class to TypedDict.""" """Transform HA chat_log content into Anthropic API format."""
param_content: list[TextBlockParam | ToolUseBlockParam] = [] messages: list[MessageParam] = []
for message_content in message.content: for content in chat_content:
if isinstance(message_content, TextBlock): if isinstance(content, conversation.ToolResultContent):
param_content.append(TextBlockParam(type="text", text=message_content.text)) tool_result_block = ToolResultBlockParam(
elif isinstance(message_content, ToolUseBlock): type="tool_result",
param_content.append( tool_use_id=content.tool_call_id,
ToolUseBlockParam( content=json.dumps(content.tool_result),
type="tool_use",
id=message_content.id,
name=message_content.name,
input=message_content.input,
)
) )
if not messages or messages[-1]["role"] != "user":
return MessageParam(role=message.role, content=param_content) messages.append(
MessageParam(
role="user",
def _convert_content(chat_content: conversation.Content) -> MessageParam: content=[tool_result_block],
"""Create tool response content."""
if isinstance(chat_content, conversation.ToolResultContent):
return MessageParam(
role="user",
content=[
ToolResultBlockParam(
type="tool_result",
tool_use_id=chat_content.tool_call_id,
content=json.dumps(chat_content.tool_result),
)
],
)
if isinstance(chat_content, conversation.AssistantContent):
return MessageParam(
role="assistant",
content=[
TextBlockParam(type="text", text=chat_content.content or ""),
*[
ToolUseBlockParam(
type="tool_use",
id=tool_call.id,
name=tool_call.tool_name,
input=tool_call.tool_args,
) )
for tool_call in chat_content.tool_calls or () )
], elif isinstance(messages[-1]["content"], str):
], messages[-1]["content"] = [
) TextBlockParam(type="text", text=messages[-1]["content"]),
if isinstance(chat_content, conversation.UserContent): tool_result_block,
return MessageParam( ]
role="user", else:
content=chat_content.content, messages[-1]["content"].append(tool_result_block) # type: ignore[attr-defined]
) elif isinstance(content, conversation.UserContent):
# Note: We don't pass SystemContent here as its passed to the API as the prompt # Combine consequent user messages
raise ValueError(f"Unexpected content type: {type(chat_content)}") if not messages or messages[-1]["role"] != "user":
messages.append(
MessageParam(
role="user",
content=content.content,
)
)
elif isinstance(messages[-1]["content"], str):
messages[-1]["content"] = [
TextBlockParam(type="text", text=messages[-1]["content"]),
TextBlockParam(type="text", text=content.content),
]
else:
messages[-1]["content"].append( # type: ignore[attr-defined]
TextBlockParam(type="text", text=content.content)
)
elif isinstance(content, conversation.AssistantContent):
# Combine consequent assistant messages
if not messages or messages[-1]["role"] != "assistant":
messages.append(
MessageParam(
role="assistant",
content=[],
)
)
if content.content:
messages[-1]["content"].append( # type: ignore[union-attr]
TextBlockParam(type="text", text=content.content)
)
if content.tool_calls:
messages[-1]["content"].extend( # type: ignore[union-attr]
[
ToolUseBlockParam(
type="tool_use",
id=tool_call.id,
name=tool_call.tool_name,
input=tool_call.tool_args,
)
for tool_call in content.tool_calls
]
)
else:
# Note: We don't pass SystemContent here as its passed to the API as the prompt
raise TypeError(f"Unexpected content type: {type(content)}")
return messages
async def _transform_stream( async def _transform_stream(
result: AsyncStream[MessageStreamEvent], result: AsyncStream[MessageStreamEvent],
messages: list[MessageParam],
) -> AsyncGenerator[conversation.AssistantContentDeltaDict]: ) -> AsyncGenerator[conversation.AssistantContentDeltaDict]:
"""Transform the response stream into HA format. """Transform the response stream into HA format.
A typical stream of responses might look something like the following: A typical stream of responses might look something like the following:
- RawMessageStartEvent with no content - RawMessageStartEvent with no content
- RawContentBlockStartEvent with an empty ThinkingBlock (if extended thinking is enabled)
- RawContentBlockDeltaEvent with a ThinkingDelta
- RawContentBlockDeltaEvent with a ThinkingDelta
- RawContentBlockDeltaEvent with a ThinkingDelta
- ...
- RawContentBlockDeltaEvent with a SignatureDelta
- RawContentBlockStopEvent
- RawContentBlockStartEvent with a RedactedThinkingBlock (occasionally)
- RawContentBlockStopEvent (RedactedThinkingBlock does not have a delta)
- RawContentBlockStartEvent with an empty TextBlock - RawContentBlockStartEvent with an empty TextBlock
- RawContentBlockDeltaEvent with a TextDelta - RawContentBlockDeltaEvent with a TextDelta
- RawContentBlockDeltaEvent with a TextDelta - RawContentBlockDeltaEvent with a TextDelta
@ -151,44 +192,103 @@ async def _transform_stream(
- RawContentBlockStopEvent - RawContentBlockStopEvent
- RawMessageDeltaEvent with a stop_reason='tool_use' - RawMessageDeltaEvent with a stop_reason='tool_use'
- RawMessageStopEvent(type='message_stop') - RawMessageStopEvent(type='message_stop')
Each message could contain multiple blocks of the same type.
""" """
if result is None: if result is None:
raise TypeError("Expected a stream of messages") raise TypeError("Expected a stream of messages")
current_tool_call: dict | None = None current_message: MessageParam | None = None
current_block: (
TextBlockParam
| ToolUseBlockParam
| ThinkingBlockParam
| RedactedThinkingBlockParam
| None
) = None
current_tool_args: str
async for response in result: async for response in result:
LOGGER.debug("Received response: %s", response) LOGGER.debug("Received response: %s", response)
if isinstance(response, RawContentBlockStartEvent): if isinstance(response, RawMessageStartEvent):
if response.message.role != "assistant":
raise ValueError("Unexpected message role")
current_message = MessageParam(role=response.message.role, content=[])
elif isinstance(response, RawContentBlockStartEvent):
if isinstance(response.content_block, ToolUseBlock): if isinstance(response.content_block, ToolUseBlock):
current_tool_call = { current_block = ToolUseBlockParam(
"id": response.content_block.id, type="tool_use",
"name": response.content_block.name, id=response.content_block.id,
"input": "", name=response.content_block.name,
} input="",
)
current_tool_args = ""
elif isinstance(response.content_block, TextBlock): elif isinstance(response.content_block, TextBlock):
current_block = TextBlockParam(
type="text", text=response.content_block.text
)
yield {"role": "assistant"} yield {"role": "assistant"}
if response.content_block.text:
yield {"content": response.content_block.text}
elif isinstance(response.content_block, ThinkingBlock):
current_block = ThinkingBlockParam(
type="thinking",
thinking=response.content_block.thinking,
signature=response.content_block.signature,
)
elif isinstance(response.content_block, RedactedThinkingBlock):
current_block = RedactedThinkingBlockParam(
type="redacted_thinking", data=response.content_block.data
)
LOGGER.debug(
"Some of Claudes internal reasoning has been automatically "
"encrypted for safety reasons. This doesnt affect the quality of "
"responses"
)
elif isinstance(response, RawContentBlockDeltaEvent): elif isinstance(response, RawContentBlockDeltaEvent):
if current_block is None:
raise ValueError("Unexpected delta without a block")
if isinstance(response.delta, InputJSONDelta): if isinstance(response.delta, InputJSONDelta):
if current_tool_call is None: current_tool_args += response.delta.partial_json
raise ValueError("Unexpected delta without a tool call")
current_tool_call["input"] += response.delta.partial_json
elif isinstance(response.delta, TextDelta): elif isinstance(response.delta, TextDelta):
LOGGER.debug("yielding delta: %s", response.delta.text) text_block = cast(TextBlockParam, current_block)
text_block["text"] += response.delta.text
yield {"content": response.delta.text} yield {"content": response.delta.text}
elif isinstance(response.delta, ThinkingDelta):
thinking_block = cast(ThinkingBlockParam, current_block)
thinking_block["thinking"] += response.delta.thinking
elif isinstance(response.delta, SignatureDelta):
thinking_block = cast(ThinkingBlockParam, current_block)
thinking_block["signature"] += response.delta.signature
elif isinstance(response, RawContentBlockStopEvent): elif isinstance(response, RawContentBlockStopEvent):
if current_tool_call: if current_block is None:
raise ValueError("Unexpected stop event without a current block")
if current_block["type"] == "tool_use":
tool_block = cast(ToolUseBlockParam, current_block)
tool_args = json.loads(current_tool_args) if current_tool_args else {}
tool_block["input"] = tool_args
yield { yield {
"tool_calls": [ "tool_calls": [
llm.ToolInput( llm.ToolInput(
id=current_tool_call["id"], id=tool_block["id"],
tool_name=current_tool_call["name"], tool_name=tool_block["name"],
tool_args=json.loads(current_tool_call["input"]), tool_args=tool_args,
) )
] ]
} }
current_tool_call = None elif current_block["type"] == "thinking":
thinking_block = cast(ThinkingBlockParam, current_block)
LOGGER.debug("Thinking: %s", thinking_block["thinking"])
if current_message is None:
raise ValueError("Unexpected stop event without a current message")
current_message["content"].append(current_block) # type: ignore[union-attr]
current_block = None
elif isinstance(response, RawMessageStopEvent):
if current_message is not None:
messages.append(current_message)
current_message = None
class AnthropicConversationEntity( class AnthropicConversationEntity(
@ -226,18 +326,6 @@ class AnthropicConversationEntity(
self.entry.add_update_listener(self._async_entry_update_listener) self.entry.add_update_listener(self._async_entry_update_listener)
) )
async def async_process(
self, user_input: conversation.ConversationInput
) -> conversation.ConversationResult:
"""Process a sentence."""
with (
chat_session.async_get_chat_session(
self.hass, user_input.conversation_id
) as session,
conversation.async_get_chat_log(self.hass, session, user_input) as chat_log,
):
return await self._async_handle_message(user_input, chat_log)
async def _async_handle_message( async def _async_handle_message(
self, self,
user_input: conversation.ConversationInput, user_input: conversation.ConversationInput,
@ -266,34 +354,50 @@ class AnthropicConversationEntity(
system = chat_log.content[0] system = chat_log.content[0]
if not isinstance(system, conversation.SystemContent): if not isinstance(system, conversation.SystemContent):
raise TypeError("First message must be a system message") raise TypeError("First message must be a system message")
messages = [_convert_content(content) for content in chat_log.content[1:]] messages = _convert_content(chat_log.content[1:])
client = self.entry.runtime_data client = self.entry.runtime_data
thinking_budget = options.get(CONF_THINKING_BUDGET, RECOMMENDED_THINKING_BUDGET)
model = options.get(CONF_CHAT_MODEL, RECOMMENDED_CHAT_MODEL)
# To prevent infinite loops, we limit the number of iterations # To prevent infinite loops, we limit the number of iterations
for _iteration in range(MAX_TOOL_ITERATIONS): for _iteration in range(MAX_TOOL_ITERATIONS):
try: model_args = {
stream = await client.messages.create( "model": model,
model=options.get(CONF_CHAT_MODEL, RECOMMENDED_CHAT_MODEL), "messages": messages,
messages=messages, "tools": tools or NOT_GIVEN,
tools=tools or NOT_GIVEN, "max_tokens": options.get(CONF_MAX_TOKENS, RECOMMENDED_MAX_TOKENS),
max_tokens=options.get(CONF_MAX_TOKENS, RECOMMENDED_MAX_TOKENS), "system": system.content,
system=system.content, "stream": True,
temperature=options.get(CONF_TEMPERATURE, RECOMMENDED_TEMPERATURE), }
stream=True, if model in THINKING_MODELS and thinking_budget >= MIN_THINKING_BUDGET:
model_args["thinking"] = ThinkingConfigEnabledParam(
type="enabled", budget_tokens=thinking_budget
) )
else:
model_args["thinking"] = ThinkingConfigDisabledParam(type="disabled")
model_args["temperature"] = options.get(
CONF_TEMPERATURE, RECOMMENDED_TEMPERATURE
)
try:
stream = await client.messages.create(**model_args)
except anthropic.AnthropicError as err: except anthropic.AnthropicError as err:
raise HomeAssistantError( raise HomeAssistantError(
f"Sorry, I had a problem talking to Anthropic: {err}" f"Sorry, I had a problem talking to Anthropic: {err}"
) from err ) from err
messages.extend( messages.extend(
[ _convert_content(
_convert_content(content) [
async for content in chat_log.async_add_delta_content_stream( content
user_input.agent_id, _transform_stream(stream) async for content in chat_log.async_add_delta_content_stream(
) user_input.agent_id, _transform_stream(stream, messages)
] )
if not isinstance(content, conversation.AssistantContent)
]
)
) )
if not chat_log.unresponded_tool_results: if not chat_log.unresponded_tool_results:
@ -305,7 +409,9 @@ class AnthropicConversationEntity(
intent_response = intent.IntentResponse(language=user_input.language) intent_response = intent.IntentResponse(language=user_input.language)
intent_response.async_set_speech(response_content.content or "") intent_response.async_set_speech(response_content.content or "")
return conversation.ConversationResult( return conversation.ConversationResult(
response=intent_response, conversation_id=chat_log.conversation_id response=intent_response,
conversation_id=chat_log.conversation_id,
continue_conversation=chat_log.continue_conversation,
) )
async def _async_entry_update_listener( async def _async_entry_update_listener(

View File

@ -8,5 +8,5 @@
"documentation": "https://www.home-assistant.io/integrations/anthropic", "documentation": "https://www.home-assistant.io/integrations/anthropic",
"integration_type": "service", "integration_type": "service",
"iot_class": "cloud_polling", "iot_class": "cloud_polling",
"requirements": ["anthropic==0.44.0"] "requirements": ["anthropic==0.47.2"]
} }

View File

@ -23,12 +23,17 @@
"max_tokens": "Maximum tokens to return in response", "max_tokens": "Maximum tokens to return in response",
"temperature": "Temperature", "temperature": "Temperature",
"llm_hass_api": "[%key:common::config_flow::data::llm_hass_api%]", "llm_hass_api": "[%key:common::config_flow::data::llm_hass_api%]",
"recommended": "Recommended model settings" "recommended": "Recommended model settings",
"thinking_budget_tokens": "Thinking budget"
}, },
"data_description": { "data_description": {
"prompt": "Instruct how the LLM should respond. This can be a template." "prompt": "Instruct how the LLM should respond. This can be a template.",
"thinking_budget_tokens": "The number of tokens the model can use to think about the response out of the total maximum number of tokens. Set to 1024 or greater to enable extended thinking."
} }
} }
},
"error": {
"thinking_budget_too_large": "Maximum tokens must be greater than the thinking budget."
} }
} }
} }

View File

@ -53,10 +53,8 @@ class OnlineStatus(CoordinatorEntity[APCUPSdCoordinator], BinarySensorEntity):
"""Initialize the APCUPSd binary device.""" """Initialize the APCUPSd binary device."""
super().__init__(coordinator, context=description.key.upper()) super().__init__(coordinator, context=description.key.upper())
# Set up unique id and device info if serial number is available.
if (serial_no := coordinator.data.serial_no) is not None:
self._attr_unique_id = f"{serial_no}_{description.key}"
self.entity_description = description self.entity_description = description
self._attr_unique_id = f"{coordinator.unique_device_id}_{description.key}"
self._attr_device_info = coordinator.device_info self._attr_device_info = coordinator.device_info
@property @property

View File

@ -85,11 +85,16 @@ class APCUPSdCoordinator(DataUpdateCoordinator[APCUPSdData]):
self._host = host self._host = host
self._port = port self._port = port
@property
def unique_device_id(self) -> str:
"""Return a unique ID of the device, which is the serial number (if available) or the config entry ID."""
return self.data.serial_no or self.config_entry.entry_id
@property @property
def device_info(self) -> DeviceInfo: def device_info(self) -> DeviceInfo:
"""Return the DeviceInfo of this APC UPS, if serial number is available.""" """Return the DeviceInfo of this APC UPS, if serial number is available."""
return DeviceInfo( return DeviceInfo(
identifiers={(DOMAIN, self.data.serial_no or self.config_entry.entry_id)}, identifiers={(DOMAIN, self.unique_device_id)},
model=self.data.model, model=self.data.model,
manufacturer="APC", manufacturer="APC",
name=self.data.name or "APC UPS", name=self.data.name or "APC UPS",

View File

@ -458,11 +458,8 @@ class APCUPSdSensor(CoordinatorEntity[APCUPSdCoordinator], SensorEntity):
"""Initialize the sensor.""" """Initialize the sensor."""
super().__init__(coordinator=coordinator, context=description.key.upper()) super().__init__(coordinator=coordinator, context=description.key.upper())
# Set up unique id and device info if serial number is available.
if (serial_no := coordinator.data.serial_no) is not None:
self._attr_unique_id = f"{serial_no}_{description.key}"
self.entity_description = description self.entity_description = description
self._attr_unique_id = f"{coordinator.unique_device_id}_{description.key}"
self._attr_device_info = coordinator.device_info self._attr_device_info = coordinator.device_info
# Initial update of attributes. # Initial update of attributes.

View File

@ -57,7 +57,7 @@
"name": "Status date" "name": "Status date"
}, },
"dip_switch_settings": { "dip_switch_settings": {
"name": "Dip switch settings" "name": "DIP switch settings"
}, },
"low_battery_signal": { "low_battery_signal": {
"name": "Low battery signal" "name": "Low battery signal"

View File

@ -0,0 +1 @@
"""Virtual integration: Apollo Automation."""

View File

@ -0,0 +1,6 @@
{
"domain": "apollo_automation",
"name": "Apollo Automation",
"integration_type": "virtual",
"supported_by": "esphome"
}

View File

@ -233,7 +233,6 @@ class AppleTVManager(DeviceListener):
pass pass
except Exception: except Exception:
_LOGGER.exception("Failed to connect") _LOGGER.exception("Failed to connect")
await self.disconnect()
async def _connect_loop(self) -> None: async def _connect_loop(self) -> None:
"""Connect loop background task function.""" """Connect loop background task function."""

View File

@ -20,6 +20,7 @@ import voluptuous as vol
from homeassistant.components import zeroconf from homeassistant.components import zeroconf
from homeassistant.config_entries import ( from homeassistant.config_entries import (
SOURCE_IGNORE, SOURCE_IGNORE,
SOURCE_REAUTH,
SOURCE_ZEROCONF, SOURCE_ZEROCONF,
ConfigEntry, ConfigEntry,
ConfigFlow, ConfigFlow,
@ -381,7 +382,9 @@ class AppleTVConfigFlow(ConfigFlow, domain=DOMAIN):
CONF_IDENTIFIERS: list(combined_identifiers), CONF_IDENTIFIERS: list(combined_identifiers),
}, },
) )
if entry.source != SOURCE_IGNORE: # Don't reload ignored entries or in the middle of reauth,
# e.g. if the user is entering a new PIN
if entry.source != SOURCE_IGNORE and self.source != SOURCE_REAUTH:
self.hass.config_entries.async_schedule_reload(entry.entry_id) self.hass.config_entries.async_schedule_reload(entry.entry_id)
if not allow_exist: if not allow_exist:
raise DeviceAlreadyConfigured raise DeviceAlreadyConfigured

View File

@ -120,6 +120,7 @@ class AppleTvMediaPlayer(
"""Initialize the Apple TV media player.""" """Initialize the Apple TV media player."""
super().__init__(name, identifier, manager) super().__init__(name, identifier, manager)
self._playing: Playing | None = None self._playing: Playing | None = None
self._playing_last_updated: datetime | None = None
self._app_list: dict[str, str] = {} self._app_list: dict[str, str] = {}
@callback @callback
@ -209,6 +210,7 @@ class AppleTvMediaPlayer(
This is a callback function from pyatv.interface.PushListener. This is a callback function from pyatv.interface.PushListener.
""" """
self._playing = playstatus self._playing = playstatus
self._playing_last_updated = dt_util.utcnow()
self.async_write_ha_state() self.async_write_ha_state()
@callback @callback
@ -316,7 +318,7 @@ class AppleTvMediaPlayer(
def media_position_updated_at(self) -> datetime | None: def media_position_updated_at(self) -> datetime | None:
"""Last valid time of media position.""" """Last valid time of media position."""
if self.state in {MediaPlayerState.PLAYING, MediaPlayerState.PAUSED}: if self.state in {MediaPlayerState.PLAYING, MediaPlayerState.PAUSED}:
return dt_util.utcnow() return self._playing_last_updated
return None return None
async def async_play_media( async def async_play_media(

View File

@ -7,5 +7,5 @@
"integration_type": "device", "integration_type": "device",
"iot_class": "local_push", "iot_class": "local_push",
"loggers": ["pyaprilaire"], "loggers": ["pyaprilaire"],
"requirements": ["pyaprilaire==0.7.7"] "requirements": ["pyaprilaire==0.8.1"]
} }

View File

@ -43,6 +43,7 @@ class ApSystemsDataCoordinator(DataUpdateCoordinator[ApSystemsSensorData]):
config_entry: ApSystemsConfigEntry config_entry: ApSystemsConfigEntry
device_version: str device_version: str
battery_system: bool
def __init__( def __init__(
self, self,
@ -68,6 +69,7 @@ class ApSystemsDataCoordinator(DataUpdateCoordinator[ApSystemsSensorData]):
self.api.max_power = device_info.maxPower self.api.max_power = device_info.maxPower
self.api.min_power = device_info.minPower self.api.min_power = device_info.minPower
self.device_version = device_info.devVer self.device_version = device_info.devVer
self.battery_system = device_info.isBatterySystem
async def _async_update_data(self) -> ApSystemsSensorData: async def _async_update_data(self) -> ApSystemsSensorData:
try: try:

View File

@ -6,5 +6,5 @@
"documentation": "https://www.home-assistant.io/integrations/apsystems", "documentation": "https://www.home-assistant.io/integrations/apsystems",
"integration_type": "device", "integration_type": "device",
"iot_class": "local_polling", "iot_class": "local_polling",
"requirements": ["apsystems-ez1==2.4.0"] "requirements": ["apsystems-ez1==2.5.0"]
} }

View File

@ -36,6 +36,8 @@ class ApSystemsInverterSwitch(ApSystemsEntity, SwitchEntity):
super().__init__(data) super().__init__(data)
self._api = data.coordinator.api self._api = data.coordinator.api
self._attr_unique_id = f"{data.device_id}_inverter_status" self._attr_unique_id = f"{data.device_id}_inverter_status"
if data.coordinator.battery_system:
self._attr_available = False
async def async_update(self) -> None: async def async_update(self) -> None:
"""Update switch status and availability.""" """Update switch status and availability."""

View File

@ -60,7 +60,7 @@ class AquaCellConfigFlow(ConfigFlow, domain=DOMAIN):
errors["base"] = "cannot_connect" errors["base"] = "cannot_connect"
except AuthenticationFailed: except AuthenticationFailed:
errors["base"] = "invalid_auth" errors["base"] = "invalid_auth"
except Exception: # pylint: disable=broad-except except Exception:
_LOGGER.exception("Unexpected exception") _LOGGER.exception("Unexpected exception")
errors["base"] = "unknown" errors["base"] = "unknown"
else: else:

View File

@ -36,9 +36,9 @@
"wi_fi_strength": { "wi_fi_strength": {
"name": "Wi-Fi strength", "name": "Wi-Fi strength",
"state": { "state": {
"low": "Low", "low": "[%key:common::state::low%]",
"medium": "Medium", "medium": "[%key:common::state::medium%]",
"high": "High" "high": "[%key:common::state::high%]"
} }
} }
} }

View File

@ -5,7 +5,7 @@ from __future__ import annotations
from dataclasses import dataclass from dataclasses import dataclass
from typing import Any from typing import Any
from aranet4.client import Aranet4Advertisement from aranet4.client import Aranet4Advertisement, Color
from bleak.backends.device import BLEDevice from bleak.backends.device import BLEDevice
from homeassistant.components.bluetooth.passive_update_processor import ( from homeassistant.components.bluetooth.passive_update_processor import (
@ -74,6 +74,13 @@ SENSOR_DESCRIPTIONS = {
native_unit_of_measurement=UnitOfPressure.HPA, native_unit_of_measurement=UnitOfPressure.HPA,
state_class=SensorStateClass.MEASUREMENT, state_class=SensorStateClass.MEASUREMENT,
), ),
"status": AranetSensorEntityDescription(
key="threshold",
translation_key="threshold",
name="Threshold",
device_class=SensorDeviceClass.ENUM,
options=[status.name.lower() for status in Color],
),
"co2": AranetSensorEntityDescription( "co2": AranetSensorEntityDescription(
key="co2", key="co2",
name="Carbon Dioxide", name="Carbon Dioxide",
@ -161,7 +168,10 @@ def sensor_update_to_bluetooth_data_update(
val = getattr(adv.readings, key) val = getattr(adv.readings, key)
if val == -1: if val == -1:
continue continue
val *= desc.scale if key == "status":
val = val.name.lower()
else:
val *= desc.scale
data[tag] = val data[tag] = val
names[tag] = desc.name names[tag] = desc.name
descs[tag] = desc descs[tag] = desc

View File

@ -21,5 +21,17 @@
"no_devices_found": "No unconfigured Aranet devices found.", "no_devices_found": "No unconfigured Aranet devices found.",
"outdated_version": "This device is using outdated firmware. Please update it to at least v1.2.0 and try again." "outdated_version": "This device is using outdated firmware. Please update it to at least v1.2.0 and try again."
} }
},
"entity": {
"sensor": {
"threshold": {
"state": {
"error": "Error",
"green": "Green",
"yellow": "Yellow",
"red": "Red"
}
}
}
} }
} }

View File

@ -6,7 +6,7 @@
"documentation": "https://www.home-assistant.io/integrations/arcam_fmj", "documentation": "https://www.home-assistant.io/integrations/arcam_fmj",
"iot_class": "local_polling", "iot_class": "local_polling",
"loggers": ["arcam"], "loggers": ["arcam"],
"requirements": ["arcam-fmj==1.8.0"], "requirements": ["arcam-fmj==1.8.1"],
"ssdp": [ "ssdp": [
{ {
"deviceType": "urn:schemas-upnp-org:device:MediaRenderer:1", "deviceType": "urn:schemas-upnp-org:device:MediaRenderer:1",

View File

@ -6,7 +6,11 @@ import logging
from typing import Any from typing import Any
from homeassistant.components import mqtt from homeassistant.components import mqtt
from homeassistant.components.sensor import SensorDeviceClass, SensorEntity from homeassistant.components.sensor import (
SensorDeviceClass,
SensorEntity,
SensorStateClass,
)
from homeassistant.const import DEGREE, UnitOfPrecipitationDepth, UnitOfTemperature from homeassistant.const import DEGREE, UnitOfPrecipitationDepth, UnitOfTemperature
from homeassistant.core import HomeAssistant, callback from homeassistant.core import HomeAssistant, callback
from homeassistant.helpers.entity_platform import AddEntitiesCallback from homeassistant.helpers.entity_platform import AddEntitiesCallback
@ -92,7 +96,13 @@ def discover_sensors(topic: str, payload: dict[str, Any]) -> list[ArwnSensor] |
device_class=SensorDeviceClass.WIND_SPEED, device_class=SensorDeviceClass.WIND_SPEED,
), ),
ArwnSensor( ArwnSensor(
topic + "/dir", "Wind Direction", "direction", DEGREE, "mdi:compass" topic + "/dir",
"Wind Direction",
"direction",
DEGREE,
"mdi:compass",
device_class=SensorDeviceClass.WIND_DIRECTION,
state_class=SensorStateClass.MEASUREMENT_ANGLE,
), ),
] ]
return None return None
@ -173,6 +183,7 @@ class ArwnSensor(SensorEntity):
units: str, units: str,
icon: str | None = None, icon: str | None = None,
device_class: SensorDeviceClass | None = None, device_class: SensorDeviceClass | None = None,
state_class: SensorStateClass | None = None,
) -> None: ) -> None:
"""Initialize the sensor.""" """Initialize the sensor."""
self.entity_id = _slug(name) self.entity_id = _slug(name)
@ -183,6 +194,7 @@ class ArwnSensor(SensorEntity):
self._attr_native_unit_of_measurement = units self._attr_native_unit_of_measurement = units
self._attr_icon = icon self._attr_icon = icon
self._attr_device_class = device_class self._attr_device_class = device_class
self._attr_state_class = state_class
def set_event(self, event: dict[str, Any]) -> None: def set_event(self, event: dict[str, Any]) -> None:
"""Update the sensor with the most recent event.""" """Update the sensor with the most recent event."""

View File

@ -117,7 +117,7 @@ async def async_pipeline_from_audio_stream(
""" """
with chat_session.async_get_chat_session(hass, conversation_id) as session: with chat_session.async_get_chat_session(hass, conversation_id) as session:
pipeline_input = PipelineInput( pipeline_input = PipelineInput(
conversation_id=session.conversation_id, session=session,
device_id=device_id, device_id=device_id,
stt_metadata=stt_metadata, stt_metadata=stt_metadata,
stt_stream=stt_stream, stt_stream=stt_stream,

View File

@ -13,24 +13,17 @@ from pathlib import Path
from queue import Empty, Queue from queue import Empty, Queue
from threading import Thread from threading import Thread
import time import time
from typing import Any, Literal, cast from typing import TYPE_CHECKING, Any, Literal, cast
import wave import wave
import hass_nabucasa import hass_nabucasa
import voluptuous as vol import voluptuous as vol
from homeassistant.components import ( from homeassistant.components import conversation, stt, tts, wake_word, websocket_api
conversation,
media_source,
stt,
tts,
wake_word,
websocket_api,
)
from homeassistant.components.tts import ( from homeassistant.components.tts import (
generate_media_source_id as tts_generate_media_source_id, generate_media_source_id as tts_generate_media_source_id,
) )
from homeassistant.const import MATCH_ALL from homeassistant.const import ATTR_SUPPORTED_FEATURES, MATCH_ALL
from homeassistant.core import Context, HomeAssistant, callback from homeassistant.core import Context, HomeAssistant, callback
from homeassistant.exceptions import HomeAssistantError from homeassistant.exceptions import HomeAssistantError
from homeassistant.helpers import chat_session, intent from homeassistant.helpers import chat_session, intent
@ -81,6 +74,9 @@ from .error import (
) )
from .vad import AudioBuffer, VoiceActivityTimeout, VoiceCommandSegmenter, chunk_samples from .vad import AudioBuffer, VoiceActivityTimeout, VoiceCommandSegmenter, chunk_samples
if TYPE_CHECKING:
from hassil.recognize import RecognizeResult
_LOGGER = logging.getLogger(__name__) _LOGGER = logging.getLogger(__name__)
STORAGE_KEY = f"{DOMAIN}.pipelines" STORAGE_KEY = f"{DOMAIN}.pipelines"
@ -93,6 +89,9 @@ ENGINE_LANGUAGE_PAIRS = (
) )
KEY_ASSIST_PIPELINE: HassKey[PipelineData] = HassKey(DOMAIN) KEY_ASSIST_PIPELINE: HassKey[PipelineData] = HassKey(DOMAIN)
KEY_PIPELINE_CONVERSATION_DATA: HassKey[dict[str, PipelineConversationData]] = HassKey(
"pipeline_conversation_data"
)
def validate_language(data: dict[str, Any]) -> Any: def validate_language(data: dict[str, Any]) -> Any:
@ -123,6 +122,12 @@ STORED_PIPELINE_RUNS = 10
SAVE_DELAY = 10 SAVE_DELAY = 10
@callback
def _async_local_fallback_intent_filter(result: RecognizeResult) -> bool:
"""Filter out intents that are not local fallback."""
return result.intent.name in (intent.INTENT_GET_STATE)
@callback @callback
def _async_resolve_default_pipeline_settings( def _async_resolve_default_pipeline_settings(
hass: HomeAssistant, hass: HomeAssistant,
@ -557,8 +562,7 @@ class PipelineRun:
id: str = field(default_factory=ulid_util.ulid_now) id: str = field(default_factory=ulid_util.ulid_now)
stt_provider: stt.SpeechToTextEntity | stt.Provider = field(init=False, repr=False) stt_provider: stt.SpeechToTextEntity | stt.Provider = field(init=False, repr=False)
tts_engine: str = field(init=False, repr=False) tts_stream: tts.ResultStream | None = field(init=False, default=None)
tts_options: dict | None = field(init=False, default=None)
wake_word_entity_id: str | None = field(init=False, default=None, repr=False) wake_word_entity_id: str | None = field(init=False, default=None, repr=False)
wake_word_entity: wake_word.WakeWordDetectionEntity = field(init=False, repr=False) wake_word_entity: wake_word.WakeWordDetectionEntity = field(init=False, repr=False)
@ -581,6 +585,12 @@ class PipelineRun:
_device_id: str | None = None _device_id: str | None = None
"""Optional device id set during run start.""" """Optional device id set during run start."""
_conversation_data: PipelineConversationData | None = None
"""Data tied to the conversation ID."""
_intent_agent_only = False
"""If request should only be handled by agent, ignoring sentence triggers and local processing."""
def __post_init__(self) -> None: def __post_init__(self) -> None:
"""Set language for pipeline.""" """Set language for pipeline."""
self.language = self.pipeline.language or self.hass.config.language self.language = self.pipeline.language or self.hass.config.language
@ -630,13 +640,19 @@ class PipelineRun:
self._device_id = device_id self._device_id = device_id
self._start_debug_recording_thread() self._start_debug_recording_thread()
data = { data: dict[str, Any] = {
"pipeline": self.pipeline.id, "pipeline": self.pipeline.id,
"language": self.language, "language": self.language,
"conversation_id": conversation_id, "conversation_id": conversation_id,
} }
if self.runner_data is not None: if self.runner_data is not None:
data["runner_data"] = self.runner_data data["runner_data"] = self.runner_data
if self.tts_stream:
data["tts_output"] = {
"token": self.tts_stream.token,
"url": self.tts_stream.url,
"mime_type": self.tts_stream.content_type,
}
self.process_event(PipelineEvent(PipelineEventType.RUN_START, data)) self.process_event(PipelineEvent(PipelineEventType.RUN_START, data))
@ -998,19 +1014,36 @@ class PipelineRun:
yield chunk.audio yield chunk.audio
async def prepare_recognize_intent(self) -> None: async def prepare_recognize_intent(self, session: chat_session.ChatSession) -> None:
"""Prepare recognizing an intent.""" """Prepare recognizing an intent."""
agent_info = conversation.async_get_agent_info( self._conversation_data = async_get_pipeline_conversation_data(
self.hass, self.hass, session
self.pipeline.conversation_engine or conversation.HOME_ASSISTANT_AGENT,
) )
if agent_info is None: if self._conversation_data.continue_conversation_agent is not None:
engine = self.pipeline.conversation_engine or "default" agent_info = conversation.async_get_agent_info(
raise IntentRecognitionError( self.hass, self._conversation_data.continue_conversation_agent
code="intent-not-supported",
message=f"Intent recognition engine {engine} is not found",
) )
self._conversation_data.continue_conversation_agent = None
if agent_info is None:
raise IntentRecognitionError(
code="intent-agent-not-found",
message=f"Intent recognition engine {self._conversation_data.continue_conversation_agent} asked for follow-up but is no longer found",
)
self._intent_agent_only = True
else:
agent_info = conversation.async_get_agent_info(
self.hass,
self.pipeline.conversation_engine or conversation.HOME_ASSISTANT_AGENT,
)
if agent_info is None:
engine = self.pipeline.conversation_engine or "default"
raise IntentRecognitionError(
code="intent-not-supported",
message=f"Intent recognition engine {engine} is not found",
)
self.intent_agent = agent_info.id self.intent_agent = agent_info.id
@ -1022,7 +1055,7 @@ class PipelineRun:
conversation_extra_system_prompt: str | None, conversation_extra_system_prompt: str | None,
) -> str: ) -> str:
"""Run intent recognition portion of pipeline. Returns text to speak.""" """Run intent recognition portion of pipeline. Returns text to speak."""
if self.intent_agent is None: if self.intent_agent is None or self._conversation_data is None:
raise RuntimeError("Recognize intent was not prepared") raise RuntimeError("Recognize intent was not prepared")
if self.pipeline.conversation_language == MATCH_ALL: if self.pipeline.conversation_language == MATCH_ALL:
@ -1069,7 +1102,7 @@ class PipelineRun:
agent_id = self.intent_agent agent_id = self.intent_agent
processed_locally = agent_id == conversation.HOME_ASSISTANT_AGENT processed_locally = agent_id == conversation.HOME_ASSISTANT_AGENT
intent_response: intent.IntentResponse | None = None intent_response: intent.IntentResponse | None = None
if not processed_locally: if not processed_locally and not self._intent_agent_only:
# Sentence triggers override conversation agent # Sentence triggers override conversation agent
if ( if (
trigger_response_text trigger_response_text
@ -1084,10 +1117,26 @@ class PipelineRun:
) )
intent_response.async_set_speech(trigger_response_text) intent_response.async_set_speech(trigger_response_text)
# Try local intents first, if preferred. intent_filter: Callable[[RecognizeResult], bool] | None = None
elif self.pipeline.prefer_local_intents and ( # If the LLM has API access, we filter out some sentences that are
intent_response := await conversation.async_handle_intents( # interfering with LLM operation.
self.hass, user_input if (
intent_agent_state := self.hass.states.get(self.intent_agent)
) and intent_agent_state.attributes.get(
ATTR_SUPPORTED_FEATURES, 0
) & conversation.ConversationEntityFeature.CONTROL:
intent_filter = _async_local_fallback_intent_filter
# Try local intents
if (
intent_response is None
and self.pipeline.prefer_local_intents
and (
intent_response := await conversation.async_handle_intents(
self.hass,
user_input,
intent_filter=intent_filter,
)
) )
): ):
# Local intent matched # Local intent matched
@ -1170,6 +1219,9 @@ class PipelineRun:
) )
) )
if conversation_result.continue_conversation:
self._conversation_data.continue_conversation_agent = agent_id
return speech return speech
async def prepare_text_to_speech(self) -> None: async def prepare_text_to_speech(self) -> None:
@ -1192,36 +1244,31 @@ class PipelineRun:
tts_options[tts.ATTR_PREFERRED_SAMPLE_BYTES] = SAMPLE_WIDTH tts_options[tts.ATTR_PREFERRED_SAMPLE_BYTES] = SAMPLE_WIDTH
try: try:
options_supported = await tts.async_support_options( self.tts_stream = tts.async_create_stream(
self.hass, hass=self.hass,
engine, engine=engine,
self.pipeline.tts_language, language=self.pipeline.tts_language,
tts_options, options=tts_options,
) )
except HomeAssistantError as err: except HomeAssistantError as err:
raise TextToSpeechError(
code="tts-not-supported",
message=f"Text-to-speech engine '{engine}' not found",
) from err
if not options_supported:
raise TextToSpeechError( raise TextToSpeechError(
code="tts-not-supported", code="tts-not-supported",
message=( message=(
f"Text-to-speech engine {engine} " f"Text-to-speech engine {engine} "
f"does not support language {self.pipeline.tts_language} or options {tts_options}" f"does not support language {self.pipeline.tts_language} or options {tts_options}:"
f" {err}"
), ),
) ) from err
self.tts_engine = engine
self.tts_options = tts_options
async def text_to_speech(self, tts_input: str) -> None: async def text_to_speech(self, tts_input: str) -> None:
"""Run text-to-speech portion of pipeline.""" """Run text-to-speech portion of pipeline."""
assert self.tts_stream is not None
self.process_event( self.process_event(
PipelineEvent( PipelineEvent(
PipelineEventType.TTS_START, PipelineEventType.TTS_START,
{ {
"engine": self.tts_engine, "engine": self.tts_stream.engine,
"language": self.pipeline.tts_language, "language": self.pipeline.tts_language,
"voice": self.pipeline.tts_voice, "voice": self.pipeline.tts_voice,
"tts_input": tts_input, "tts_input": tts_input,
@ -1234,14 +1281,9 @@ class PipelineRun:
tts_media_id = tts_generate_media_source_id( tts_media_id = tts_generate_media_source_id(
self.hass, self.hass,
tts_input, tts_input,
engine=self.tts_engine, engine=self.tts_stream.engine,
language=self.pipeline.tts_language, language=self.tts_stream.language,
options=self.tts_options, options=self.tts_stream.options,
)
tts_media = await media_source.async_resolve_media(
self.hass,
tts_media_id,
None,
) )
except Exception as src_error: except Exception as src_error:
_LOGGER.exception("Unexpected error during text-to-speech") _LOGGER.exception("Unexpected error during text-to-speech")
@ -1250,10 +1292,13 @@ class PipelineRun:
message="Unexpected error during text-to-speech", message="Unexpected error during text-to-speech",
) from src_error ) from src_error
_LOGGER.debug("TTS result %s", tts_media) self.tts_stream.async_set_message(tts_input)
tts_output = { tts_output = {
"media_id": tts_media_id, "media_id": tts_media_id,
**asdict(tts_media), "token": self.tts_stream.token,
"url": self.tts_stream.url,
"mime_type": self.tts_stream.content_type,
} }
self.process_event( self.process_event(
@ -1433,8 +1478,8 @@ class PipelineInput:
run: PipelineRun run: PipelineRun
conversation_id: str session: chat_session.ChatSession
"""Identifier for the conversation.""" """Session for the conversation."""
stt_metadata: stt.SpeechMetadata | None = None stt_metadata: stt.SpeechMetadata | None = None
"""Metadata of stt input audio. Required when start_stage = stt.""" """Metadata of stt input audio. Required when start_stage = stt."""
@ -1459,7 +1504,9 @@ class PipelineInput:
async def execute(self) -> None: async def execute(self) -> None:
"""Run pipeline.""" """Run pipeline."""
self.run.start(conversation_id=self.conversation_id, device_id=self.device_id) self.run.start(
conversation_id=self.session.conversation_id, device_id=self.device_id
)
current_stage: PipelineStage | None = self.run.start_stage current_stage: PipelineStage | None = self.run.start_stage
stt_audio_buffer: list[EnhancedAudioChunk] = [] stt_audio_buffer: list[EnhancedAudioChunk] = []
stt_processed_stream: AsyncIterable[EnhancedAudioChunk] | None = None stt_processed_stream: AsyncIterable[EnhancedAudioChunk] | None = None
@ -1543,7 +1590,7 @@ class PipelineInput:
assert intent_input is not None assert intent_input is not None
tts_input = await self.run.recognize_intent( tts_input = await self.run.recognize_intent(
intent_input, intent_input,
self.conversation_id, self.session.conversation_id,
self.device_id, self.device_id,
self.conversation_extra_system_prompt, self.conversation_extra_system_prompt,
) )
@ -1627,7 +1674,7 @@ class PipelineInput:
<= PIPELINE_STAGE_ORDER.index(PipelineStage.INTENT) <= PIPELINE_STAGE_ORDER.index(PipelineStage.INTENT)
<= end_stage_index <= end_stage_index
): ):
prepare_tasks.append(self.run.prepare_recognize_intent()) prepare_tasks.append(self.run.prepare_recognize_intent(self.session))
if ( if (
start_stage_index start_stage_index
@ -1906,7 +1953,7 @@ class PipelineRunDebug:
class PipelineStore(Store[SerializedPipelineStorageCollection]): class PipelineStore(Store[SerializedPipelineStorageCollection]):
"""Store entity registry data.""" """Store pipeline data."""
async def _async_migrate_func( async def _async_migrate_func(
self, self,
@ -1988,3 +2035,37 @@ async def async_run_migrations(hass: HomeAssistant) -> None:
for pipeline, attr_updates in updates: for pipeline, attr_updates in updates:
await async_update_pipeline(hass, pipeline, **attr_updates) await async_update_pipeline(hass, pipeline, **attr_updates)
@dataclass
class PipelineConversationData:
"""Hold data for the duration of a conversation."""
continue_conversation_agent: str | None = None
"""The agent that requested the conversation to be continued."""
@callback
def async_get_pipeline_conversation_data(
hass: HomeAssistant, session: chat_session.ChatSession
) -> PipelineConversationData:
"""Get the pipeline data for a specific conversation."""
all_conversation_data = hass.data.get(KEY_PIPELINE_CONVERSATION_DATA)
if all_conversation_data is None:
all_conversation_data = {}
hass.data[KEY_PIPELINE_CONVERSATION_DATA] = all_conversation_data
data = all_conversation_data.get(session.conversation_id)
if data is not None:
return data
@callback
def do_cleanup() -> None:
"""Handle cleanup."""
all_conversation_data.pop(session.conversation_id)
session.async_on_cleanup(do_cleanup)
data = all_conversation_data[session.conversation_id] = PipelineConversationData()
return data

View File

@ -239,7 +239,7 @@ async def websocket_run(
with chat_session.async_get_chat_session( with chat_session.async_get_chat_session(
hass, msg.get("conversation_id") hass, msg.get("conversation_id")
) as session: ) as session:
input_args["conversation_id"] = session.conversation_id input_args["session"] = session
pipeline_input = PipelineInput(**input_args) pipeline_input = PipelineInput(**input_args)
try: try:

View File

@ -1,9 +1,11 @@
"""Base class for assist satellite entities.""" """Base class for assist satellite entities."""
import logging import logging
from pathlib import Path
import voluptuous as vol import voluptuous as vol
from homeassistant.components.http import StaticPathConfig
from homeassistant.config_entries import ConfigEntry from homeassistant.config_entries import ConfigEntry
from homeassistant.core import HomeAssistant from homeassistant.core import HomeAssistant
from homeassistant.helpers import config_validation as cv from homeassistant.helpers import config_validation as cv
@ -15,6 +17,8 @@ from .const import (
CONNECTION_TEST_DATA, CONNECTION_TEST_DATA,
DATA_COMPONENT, DATA_COMPONENT,
DOMAIN, DOMAIN,
PREANNOUNCE_FILENAME,
PREANNOUNCE_URL,
AssistSatelliteEntityFeature, AssistSatelliteEntityFeature,
) )
from .entity import ( from .entity import (
@ -56,6 +60,8 @@ async def async_setup(hass: HomeAssistant, config: ConfigType) -> bool:
{ {
vol.Optional("message"): str, vol.Optional("message"): str,
vol.Optional("media_id"): str, vol.Optional("media_id"): str,
vol.Optional("preannounce"): bool,
vol.Optional("preannounce_media_id"): str,
} }
), ),
cv.has_at_least_one_key("message", "media_id"), cv.has_at_least_one_key("message", "media_id"),
@ -70,6 +76,8 @@ async def async_setup(hass: HomeAssistant, config: ConfigType) -> bool:
{ {
vol.Optional("start_message"): str, vol.Optional("start_message"): str,
vol.Optional("start_media_id"): str, vol.Optional("start_media_id"): str,
vol.Optional("preannounce"): bool,
vol.Optional("preannounce_media_id"): str,
vol.Optional("extra_system_prompt"): str, vol.Optional("extra_system_prompt"): str,
} }
), ),
@ -82,6 +90,15 @@ async def async_setup(hass: HomeAssistant, config: ConfigType) -> bool:
async_register_websocket_api(hass) async_register_websocket_api(hass)
hass.http.register_view(ConnectionTestView()) hass.http.register_view(ConnectionTestView())
# Default preannounce sound
await hass.http.async_register_static_paths(
[
StaticPathConfig(
PREANNOUNCE_URL, str(Path(__file__).parent / PREANNOUNCE_FILENAME)
)
]
)
return True return True

View File

@ -20,6 +20,9 @@ CONNECTION_TEST_DATA: HassKey[dict[str, asyncio.Event]] = HassKey(
f"{DOMAIN}_connection_tests" f"{DOMAIN}_connection_tests"
) )
PREANNOUNCE_FILENAME = "preannounce.mp3"
PREANNOUNCE_URL = f"/api/assist_satellite/static/{PREANNOUNCE_FILENAME}"
class AssistSatelliteEntityFeature(IntFlag): class AssistSatelliteEntityFeature(IntFlag):
"""Supported features of Assist satellite entity.""" """Supported features of Assist satellite entity."""

View File

@ -23,15 +23,12 @@ from homeassistant.components.assist_pipeline import (
vad, vad,
) )
from homeassistant.components.media_player import async_process_play_media_url from homeassistant.components.media_player import async_process_play_media_url
from homeassistant.components.tts import (
generate_media_source_id as tts_generate_media_source_id,
)
from homeassistant.core import Context, callback from homeassistant.core import Context, callback
from homeassistant.exceptions import HomeAssistantError from homeassistant.exceptions import HomeAssistantError
from homeassistant.helpers import chat_session, entity from homeassistant.helpers import chat_session, entity
from homeassistant.helpers.entity import EntityDescription from homeassistant.helpers.entity import EntityDescription
from .const import AssistSatelliteEntityFeature from .const import PREANNOUNCE_URL, AssistSatelliteEntityFeature
from .errors import AssistSatelliteError, SatelliteBusyError from .errors import AssistSatelliteError, SatelliteBusyError
_LOGGER = logging.getLogger(__name__) _LOGGER = logging.getLogger(__name__)
@ -98,9 +95,15 @@ class AssistSatelliteAnnouncement:
original_media_id: str original_media_id: str
"""The raw media ID before processing.""" """The raw media ID before processing."""
tts_token: str | None
"""The TTS token of the media."""
media_id_source: Literal["url", "media_id", "tts"] media_id_source: Literal["url", "media_id", "tts"]
"""Source of the media ID.""" """Source of the media ID."""
preannounce_media_id: str | None = None
"""Media ID to be played before announcement."""
class AssistSatelliteEntity(entity.Entity): class AssistSatelliteEntity(entity.Entity):
"""Entity encapsulating the state and functionality of an Assist satellite.""" """Entity encapsulating the state and functionality of an Assist satellite."""
@ -177,6 +180,8 @@ class AssistSatelliteEntity(entity.Entity):
self, self,
message: str | None = None, message: str | None = None,
media_id: str | None = None, media_id: str | None = None,
preannounce: bool = True,
preannounce_media_id: str = PREANNOUNCE_URL,
) -> None: ) -> None:
"""Play and show an announcement on the satellite. """Play and show an announcement on the satellite.
@ -186,6 +191,9 @@ class AssistSatelliteEntity(entity.Entity):
If media_id is provided, it is played directly. It is possible If media_id is provided, it is played directly. It is possible
to omit the message and the satellite will not show any text. to omit the message and the satellite will not show any text.
If preannounce is True, a sound is played before the announcement.
If preannounce_media_id is provided, it overrides the default sound.
Calls async_announce with message and media id. Calls async_announce with message and media id.
""" """
await self._cancel_running_pipeline() await self._cancel_running_pipeline()
@ -193,7 +201,11 @@ class AssistSatelliteEntity(entity.Entity):
if message is None: if message is None:
message = "" message = ""
announcement = await self._resolve_announcement_media_id(message, media_id) announcement = await self._resolve_announcement_media_id(
message,
media_id,
preannounce_media_id=preannounce_media_id if preannounce else None,
)
if self._is_announcing: if self._is_announcing:
raise SatelliteBusyError raise SatelliteBusyError
@ -220,6 +232,8 @@ class AssistSatelliteEntity(entity.Entity):
start_message: str | None = None, start_message: str | None = None,
start_media_id: str | None = None, start_media_id: str | None = None,
extra_system_prompt: str | None = None, extra_system_prompt: str | None = None,
preannounce: bool = True,
preannounce_media_id: str = PREANNOUNCE_URL,
) -> None: ) -> None:
"""Start a conversation from the satellite. """Start a conversation from the satellite.
@ -229,6 +243,9 @@ class AssistSatelliteEntity(entity.Entity):
If start_media_id is provided, it is played directly. It is possible If start_media_id is provided, it is played directly. It is possible
to omit the message and the satellite will not show any text. to omit the message and the satellite will not show any text.
If preannounce is True, a sound is played before the start message or media.
If preannounce_media_id is provided, it overrides the default sound.
Calls async_start_conversation. Calls async_start_conversation.
""" """
await self._cancel_running_pipeline() await self._cancel_running_pipeline()
@ -244,13 +261,17 @@ class AssistSatelliteEntity(entity.Entity):
start_message = "" start_message = ""
announcement = await self._resolve_announcement_media_id( announcement = await self._resolve_announcement_media_id(
start_message, start_media_id start_message,
start_media_id,
preannounce_media_id=preannounce_media_id if preannounce else None,
) )
if self._is_announcing: if self._is_announcing:
raise SatelliteBusyError raise SatelliteBusyError
self._is_announcing = True self._is_announcing = True
self._set_state(AssistSatelliteState.RESPONDING)
# Provide our start info to the LLM so it understands context of incoming message # Provide our start info to the LLM so it understands context of incoming message
if extra_system_prompt is not None: if extra_system_prompt is not None:
self._extra_system_prompt = extra_system_prompt self._extra_system_prompt = extra_system_prompt
@ -280,6 +301,7 @@ class AssistSatelliteEntity(entity.Entity):
raise raise
finally: finally:
self._is_announcing = False self._is_announcing = False
self._set_state(AssistSatelliteState.IDLE)
async def async_start_conversation( async def async_start_conversation(
self, start_announcement: AssistSatelliteAnnouncement self, start_announcement: AssistSatelliteAnnouncement
@ -470,20 +492,27 @@ class AssistSatelliteEntity(entity.Entity):
return vad.VadSensitivity.to_seconds(vad_sensitivity) return vad.VadSensitivity.to_seconds(vad_sensitivity)
async def _resolve_announcement_media_id( async def _resolve_announcement_media_id(
self, message: str, media_id: str | None self,
message: str,
media_id: str | None,
preannounce_media_id: str | None = None,
) -> AssistSatelliteAnnouncement: ) -> AssistSatelliteAnnouncement:
"""Resolve the media ID.""" """Resolve the media ID."""
media_id_source: Literal["url", "media_id", "tts"] | None = None media_id_source: Literal["url", "media_id", "tts"] | None = None
tts_token: str | None = None
if media_id: if media_id:
original_media_id = media_id original_media_id = media_id
else: else:
media_id_source = "tts" media_id_source = "tts"
# Synthesize audio and get URL # Synthesize audio and get URL
pipeline_id = self._resolve_pipeline() pipeline_id = self._resolve_pipeline()
pipeline = async_get_pipeline(self.hass, pipeline_id) pipeline = async_get_pipeline(self.hass, pipeline_id)
engine = tts.async_resolve_engine(self.hass, pipeline.tts_engine)
if engine is None:
raise HomeAssistantError(f"TTS engine {pipeline.tts_engine} not found")
tts_options: dict[str, Any] = {} tts_options: dict[str, Any] = {}
if pipeline.tts_voice is not None: if pipeline.tts_voice is not None:
tts_options[tts.ATTR_VOICE] = pipeline.tts_voice tts_options[tts.ATTR_VOICE] = pipeline.tts_voice
@ -491,14 +520,23 @@ class AssistSatelliteEntity(entity.Entity):
if self.tts_options is not None: if self.tts_options is not None:
tts_options.update(self.tts_options) tts_options.update(self.tts_options)
media_id = tts_generate_media_source_id( stream = tts.async_create_stream(
self.hass, self.hass,
message, engine=engine,
engine=pipeline.tts_engine, language=pipeline.tts_language,
options=tts_options,
)
stream.async_set_message(message)
tts_token = stream.token
media_id = stream.url
original_media_id = tts.generate_media_source_id(
self.hass,
message,
engine=engine,
language=pipeline.tts_language, language=pipeline.tts_language,
options=tts_options, options=tts_options,
) )
original_media_id = media_id
if media_source.is_media_source_id(media_id): if media_source.is_media_source_id(media_id):
if not media_id_source: if not media_id_source:
@ -516,6 +554,26 @@ class AssistSatelliteEntity(entity.Entity):
# Resolve to full URL # Resolve to full URL
media_id = async_process_play_media_url(self.hass, media_id) media_id = async_process_play_media_url(self.hass, media_id)
# Resolve preannounce media id
if preannounce_media_id:
if media_source.is_media_source_id(preannounce_media_id):
preannounce_media = await media_source.async_resolve_media(
self.hass,
preannounce_media_id,
None,
)
preannounce_media_id = preannounce_media.url
# Resolve to full URL
preannounce_media_id = async_process_play_media_url(
self.hass, preannounce_media_id
)
return AssistSatelliteAnnouncement( return AssistSatelliteAnnouncement(
message, media_id, original_media_id, media_id_source message=message,
media_id=media_id,
original_media_id=original_media_id,
tts_token=tts_token,
media_id_source=media_id_source,
preannounce_media_id=preannounce_media_id,
) )

View File

@ -8,12 +8,22 @@ announce:
message: message:
required: false required: false
example: "Time to wake up!" example: "Time to wake up!"
default: ""
selector: selector:
text: text:
media_id: media_id:
required: false required: false
selector: selector:
text: text:
preannounce:
required: false
default: true
selector:
boolean:
preannounce_media_id:
required: false
selector:
text:
start_conversation: start_conversation:
target: target:
entity: entity:
@ -24,6 +34,7 @@ start_conversation:
start_message: start_message:
required: false required: false
example: "You left the lights on in the living room. Turn them off?" example: "You left the lights on in the living room. Turn them off?"
default: ""
selector: selector:
text: text:
start_media_id: start_media_id:
@ -34,3 +45,12 @@ start_conversation:
required: false required: false
selector: selector:
text: text:
preannounce:
required: false
default: true
selector:
boolean:
preannounce_media_id:
required: false
selector:
text:

Some files were not shown because too many files have changed in this diff Show More