Compare commits

..

No commits in common. "main" and "223" have entirely different histories.
main ... 223

4157 changed files with 117574 additions and 88135 deletions

51
.devcontainer/Dockerfile Normal file
View File

@ -0,0 +1,51 @@
FROM python:3.7
WORKDIR /workspaces
# Install Node/Yarn for Frontent
RUN apt-get update && apt-get install -y --no-install-recommends \
curl \
git \
apt-utils \
apt-transport-https \
&& curl -sS https://dl.yarnpkg.com/debian/pubkey.gpg | apt-key add - \
&& echo "deb https://dl.yarnpkg.com/debian/ stable main" | tee /etc/apt/sources.list.d/yarn.list \
&& apt-get update && apt-get install -y --no-install-recommends \
nodejs \
yarn \
&& curl -o - https://raw.githubusercontent.com/creationix/nvm/v0.34.0/install.sh | bash \
&& rm -rf /var/lib/apt/lists/*
ENV NVM_DIR /root/.nvm
# Install docker
# https://docs.docker.com/engine/installation/linux/docker-ce/ubuntu/
RUN apt-get update && apt-get install -y --no-install-recommends \
apt-transport-https \
ca-certificates \
curl \
software-properties-common \
gpg-agent \
&& curl -fsSL https://download.docker.com/linux/debian/gpg | apt-key add - \
&& add-apt-repository "deb https://download.docker.com/linux/debian $(lsb_release -cs) stable" \
&& apt-get update && apt-get install -y --no-install-recommends \
docker-ce \
docker-ce-cli \
containerd.io \
&& rm -rf /var/lib/apt/lists/*
# Install tools
RUN apt-get update && apt-get install -y --no-install-recommends \
jq \
dbus \
network-manager \
libpulse0 \
&& rm -rf /var/lib/apt/lists/*
# Install Python dependencies from requirements.txt if it exists
COPY requirements.txt requirements_tests.txt ./
RUN pip3 install -r requirements.txt -r requirements_tests.txt \
&& pip3 install tox \
&& rm -f requirements.txt requirements_tests.txt
# Set the default shell to bash instead of sh
ENV SHELL /bin/bash

View File

@ -1,51 +1,24 @@
// See https://aka.ms/vscode-remote/devcontainer.json for format details.
{
"name": "Supervisor dev",
"image": "ghcr.io/home-assistant/devcontainer:2-supervisor",
"containerEnv": {
"WORKSPACE_DIRECTORY": "${containerWorkspaceFolder}"
},
"remoteEnv": {
"PATH": "${containerEnv:VIRTUAL_ENV}/bin:${containerEnv:PATH}"
},
"appPort": ["9123:8123", "7357:4357"],
"postCreateCommand": "bash devcontainer_setup",
"postStartCommand": "bash devcontainer_bootstrap",
"context": "..",
"dockerFile": "Dockerfile",
"appPort": "9123:8123",
"runArgs": ["-e", "GIT_EDITOR=code --wait", "--privileged"],
"customizations": {
"vscode": {
"extensions": [
"charliermarsh.ruff",
"ms-python.pylint",
"ms-python.vscode-pylance",
"visualstudioexptteam.vscodeintellicode",
"redhat.vscode-yaml",
"esbenp.prettier-vscode",
"GitHub.vscode-pull-request-github"
],
"settings": {
"python.defaultInterpreterPath": "/home/vscode/.local/ha-venv/bin/python",
"python.pythonPath": "/home/vscode/.local/ha-venv/bin/python",
"python.terminal.activateEnvInCurrentTerminal": true,
"python.testing.pytestArgs": ["--no-cov"],
"pylint.importStrategy": "fromEnvironment",
"editor.formatOnPaste": false,
"editor.formatOnSave": true,
"editor.formatOnType": true,
"files.trimTrailingWhitespace": true,
"terminal.integrated.profiles.linux": {
"zsh": {
"path": "/usr/bin/zsh"
}
},
"terminal.integrated.defaultProfile.linux": "zsh",
"[python]": {
"editor.defaultFormatter": "charliermarsh.ruff"
}
}
}
},
"mounts": [
"type=volume,target=/var/lib/docker",
"type=volume,target=/mnt/supervisor"
]
"extensions": [
"ms-python.python",
"visualstudioexptteam.vscodeintellicode",
"esbenp.prettier-vscode"
],
"settings": {
"python.pythonPath": "/usr/local/bin/python",
"python.linting.pylintEnabled": true,
"python.linting.enabled": true,
"python.formatting.provider": "black",
"python.formatting.blackArgs": ["--target-version", "py37"],
"editor.formatOnPaste": false,
"editor.formatOnSave": true,
"editor.formatOnType": true,
"files.trimTrailingWhitespace": true
}
}

29
.github/ISSUE_TEMPLATE.md vendored Normal file
View File

@ -0,0 +1,29 @@
<!-- READ THIS FIRST:
- If you need additional help with this template please refer to https://www.home-assistant.io/help/reporting_issues/
- Make sure you are running the latest version of Home Assistant before reporting an issue: https://github.com/home-assistant/home-assistant/releases
- Do not report issues for components here, plaese refer to https://github.com/home-assistant/home-assistant/issues
- This is for bugs only. Feature and enhancement requests should go in our community forum: https://community.home-assistant.io/c/feature-requests
- Provide as many details as possible. Paste logs, configuration sample and code into the backticks. Do not delete any text from this template!
- If you have a problem with a Add-on, make a issue on there repository.
-->
**Home Assistant release with the issue:**
<!--
- Frontend -> Developer tools -> Info
- Or use this command: hass --version
-->
**Operating environment (HassOS/Generic):**
<!--
Please provide details about your environment.
-->
**Supervisor logs:**
<!--
- Frontend -> Hass.io -> System
- Or use this command: hassio su logs
-->
**Description of problem:**

View File

@ -1,96 +0,0 @@
name: Report an issue with Home Assistant Supervisor
description: Report an issue related to the Home Assistant Supervisor.
body:
- type: markdown
attributes:
value: |
This issue form is for reporting bugs with **supported** setups only!
If you have a feature or enhancement request, please use the [feature request][fr] section of our [Community Forum][fr].
[fr]: https://github.com/orgs/home-assistant/discussions
- type: textarea
validations:
required: true
attributes:
label: Describe the issue you are experiencing
description: Provide a clear and concise description of what the bug is.
- type: markdown
attributes:
value: |
## Environment
- type: dropdown
validations:
required: true
attributes:
label: What type of installation are you running?
description: >
If you don't know, can be found in [Settings -> System -> Repairs -> (three dot menu) -> System Information](https://my.home-assistant.io/redirect/system_health/).
It is listed as the `Installation Type` value.
options:
- Home Assistant OS
- Home Assistant Supervised
- type: dropdown
validations:
required: true
attributes:
label: Which operating system are you running on?
options:
- Home Assistant Operating System
- Debian
- Other (e.g., Raspbian/Raspberry Pi OS/Fedora)
- type: markdown
attributes:
value: |
# Details
- type: textarea
validations:
required: true
attributes:
label: Steps to reproduce the issue
description: |
Please tell us exactly how to reproduce your issue.
Provide clear and concise step by step instructions and add code snippets if needed.
value: |
1.
2.
3.
...
- type: textarea
validations:
required: true
attributes:
label: Anything in the Supervisor logs that might be useful for us?
description: >
Supervisor Logs can be found in [Settings -> System -> Logs](https://my.home-assistant.io/redirect/logs/)
then choose `Supervisor` in the top right.
[![Open your Home Assistant instance and show your Supervisor system logs.](https://my.home-assistant.io/badges/supervisor_logs.svg)](https://my.home-assistant.io/redirect/supervisor_logs/)
render: txt
- type: textarea
validations:
required: true
attributes:
label: System information
description: >
The System information can be found in [Settings -> System -> Repairs -> (three dot menu) -> System Information](https://my.home-assistant.io/redirect/system_health/).
Click the copy button at the bottom of the pop-up and paste it here.
[![Open your Home Assistant instance and show health information about your system.](https://my.home-assistant.io/badges/system_health.svg)](https://my.home-assistant.io/redirect/system_health/)
- type: textarea
attributes:
label: Supervisor diagnostics
placeholder: "drag-and-drop the diagnostics data file here (do not copy-and-paste the content)"
description: >-
Supervisor diagnostics can be found in [Settings -> Devices & services](https://my.home-assistant.io/redirect/integrations/).
Find the card that says `Home Assistant Supervisor`, open it, and select the three dot menu of the Supervisor integration entry
and select 'Download diagnostics'.
**Please drag-and-drop the downloaded file into the textbox below. Do not copy and paste its contents.**
- type: textarea
attributes:
label: Additional information
description: >
If you have any additional information for us, use the field below.
Please note, you can attach screenshots or screen recordings here, by
dragging and dropping files in the field below.

View File

@ -1,25 +0,0 @@
blank_issues_enabled: false
contact_links:
- name: Report a bug/issues with an unsupported Supervisor
url: https://community.home-assistant.io
about: The Community guide can help or was updated to solve your issue
- name: Report a bug for the Supervisor panel
url: https://github.com/home-assistant/frontend/issues
about: The Supervisor panel is a part of the Home Assistant frontend
- name: Report incorrect or missing information on our developer documentation
url: https://github.com/home-assistant/developers.home-assistant.io/issues
about: Our documentation has its own issue tracker. Please report issues with the website there.
- name: Request a feature for the Supervisor
url: https://github.com/orgs/home-assistant/discussions
about: Request an new feature for the Supervisor.
- name: I have a question or need support
url: https://www.home-assistant.io/help
about: We use GitHub for tracking bugs, check our website for resources on getting help.
- name: I'm unsure where to go?
url: https://www.home-assistant.io/join-chat
about: If you are unsure where to go, then joining our chat is recommended; Just ask!

View File

@ -1,53 +0,0 @@
name: Task
description: For staff only - Create a task
type: Task
body:
- type: markdown
attributes:
value: |
## ⚠️ RESTRICTED ACCESS
**This form is restricted to Open Home Foundation staff and authorized contributors only.**
If you are a community member wanting to contribute, please:
- For bug reports: Use the [bug report form](https://github.com/home-assistant/supervisor/issues/new?template=bug_report.yml)
- For feature requests: Submit to [Feature Requests](https://github.com/orgs/home-assistant/discussions)
---
### For authorized contributors
Use this form to create tasks for development work, improvements, or other actionable items that need to be tracked.
- type: textarea
id: description
attributes:
label: Description
description: |
Provide a clear and detailed description of the task that needs to be accomplished.
Be specific about what needs to be done, why it's important, and any constraints or requirements.
placeholder: |
Describe the task, including:
- What needs to be done
- Why this task is needed
- Expected outcome
- Any constraints or requirements
validations:
required: true
- type: textarea
id: additional_context
attributes:
label: Additional context
description: |
Any additional information, links, research, or context that would be helpful.
Include links to related issues, research, prototypes, roadmap opportunities etc.
placeholder: |
- Roadmap opportunity: [link]
- Epic: [link]
- Feature request: [link]
- Technical design documents: [link]
- Prototype/mockup: [link]
- Dependencies: [links]
validations:
required: false

View File

@ -1,74 +0,0 @@
<!--
You are amazing! Thanks for contributing to our project!
Please, DO NOT DELETE ANY TEXT from this template! (unless instructed).
-->
## Proposed change
<!--
Describe the big picture of your changes here to communicate to the
maintainers why we should accept this pull request. If it fixes a bug
or resolves a feature request, be sure to link to that issue in the
additional information section.
-->
## Type of change
<!--
What type of change does your PR introduce to Home Assistant?
NOTE: Please, check only 1! box!
If your PR requires multiple boxes to be checked, you'll most likely need to
split it into multiple PRs. This makes things easier and faster to code review.
-->
- [ ] Dependency upgrade
- [ ] Bugfix (non-breaking change which fixes an issue)
- [ ] New feature (which adds functionality to the supervisor)
- [ ] Breaking change (fix/feature causing existing functionality to break)
- [ ] Code quality improvements to existing code or addition of tests
## Additional information
<!--
Details are important, and help maintainers processing your PR.
Please be sure to fill out additional details, if applicable.
-->
- This PR fixes or closes issue: fixes #
- This PR is related to issue:
- Link to documentation pull request:
- Link to cli pull request:
- Link to client library pull request:
## Checklist
<!--
Put an `x` in the boxes that apply. You can also fill these out after
creating the PR. If you're unsure about any of them, don't hesitate to ask.
We're here to help! This is simply a reminder of what we are going to look
for before merging your code.
-->
- [ ] The code change is tested and works locally.
- [ ] Local tests pass. **Your PR cannot be merged unless tests pass**
- [ ] There is no commented out code in this PR.
- [ ] I have followed the [development checklist][dev-checklist]
- [ ] The code has been formatted using Ruff (`ruff format supervisor tests`)
- [ ] Tests have been added to verify that the new code works.
If API endpoints or add-on configuration are added/changed:
- [ ] Documentation added/updated for [developers.home-assistant.io][docs-repository]
- [ ] [CLI][cli-repository] updated (if necessary)
- [ ] [Client library][client-library-repository] updated (if necessary)
<!--
Thank you for contributing <3
Below, some useful links you could explore:
-->
[dev-checklist]: https://developers.home-assistant.io/docs/en/development_checklist.html
[docs-repository]: https://github.com/home-assistant/developers.home-assistant
[cli-repository]: https://github.com/home-assistant/cli
[client-library-repository]: https://github.com/home-assistant-libs/python-supervisor-client/

View File

@ -1,288 +0,0 @@
# GitHub Copilot & Claude Code Instructions
This repository contains the Home Assistant Supervisor, a Python 3 based container
orchestration and management system for Home Assistant.
## Supervisor Capabilities & Features
### Architecture Overview
Home Assistant Supervisor is a Python-based container orchestration system that
communicates with the Docker daemon to manage containerized components. It is tightly
integrated with the underlying Operating System and core Operating System components
through D-Bus.
**Managed Components:**
- **Home Assistant Core**: The main home automation application running in its own
container (also provides the web interface)
- **Add-ons**: Third-party applications and services (each add-on runs in its own
container)
- **Plugins**: Built-in system services like DNS, Audio, CLI, Multicast, and Observer
- **Host System Integration**: OS-level operations and hardware access via D-Bus
- **Container Networking**: Internal Docker network management and external
connectivity
- **Storage & Backup**: Data persistence and backup management across all containers
**Key Dependencies:**
- **Docker Engine**: Required for all container operations
- **D-Bus**: System-level communication with the host OS
- **systemd**: Service management for host system operations
- **NetworkManager**: Network configuration and management
### Add-on System
**Add-on Architecture**: Add-ons are containerized applications available through
add-on stores. Each store contains multiple add-ons, and each add-on includes metadata
that tells Supervisor the version, startup configuration (permissions), and available
user configurable options. Add-on metadata typically references a container image that
Supervisor fetches during installation. If not, the Supervisor builds the container
image from a Dockerfile.
**Built-in Stores**: Supervisor comes with several pre-configured stores:
- **Core Add-ons**: Official add-ons maintained by the Home Assistant team
- **Community Add-ons**: Popular third-party add-ons repository
- **ESPHome**: Add-ons for ESPHome ecosystem integration
- **Music Assistant**: Audio and music-related add-ons
- **Local Development**: Local folder for testing custom add-ons during development
**Store Management**: Stores are Git-based repositories that are periodically updated.
When updates are available, users receive notifications.
**Add-on Lifecycle**:
- **Installation**: Supervisor fetches or builds container images based on add-on
metadata
- **Configuration**: Schema-validated options with integrated UI management
- **Runtime**: Full container lifecycle management, health monitoring
- **Updates**: Automatic or manual version management
### Update System
**Core Components**: Supervisor, Home Assistant Core, HAOS, and built-in plugins
receive version information from a central JSON file fetched from
`https://version.home-assistant.io/{channel}.json`. The `Updater` class handles
fetching this data, validating signatures, and updating internal version tracking.
**Update Channels**: Three channels (`stable`/`beta`/`dev`) determine which version
JSON file is fetched, allowing users to opt into different release streams.
**Add-on Updates**: Add-on version information comes from store repository updates, not
the central JSON file. When repositories are refreshed via the store system, add-ons
compare their local versions against repository versions to determine update
availability.
### Backup & Recovery System
**Backup Capabilities**:
- **Full Backups**: Complete system state capture including all add-ons,
configuration, and data
- **Partial Backups**: Selective backup of specific components (Home Assistant,
add-ons, folders)
- **Encrypted Backups**: Optional backup encryption with user-provided passwords
- **Multiple Storage Locations**: Local storage and remote backup destinations
**Recovery Features**:
- **One-click Restore**: Simple restoration from backup files
- **Selective Restore**: Choose specific components to restore
- **Automatic Recovery**: Self-healing for common system issues
---
## Supervisor Development
### Python Requirements
- **Compatibility**: Python 3.13+
- **Language Features**: Use modern Python features:
- Type hints with `typing` module
- f-strings (preferred over `%` or `.format()`)
- Dataclasses and enum classes
- Async/await patterns
- Pattern matching where appropriate
### Code Quality Standards
- **Formatting**: Ruff
- **Linting**: PyLint and Ruff
- **Type Checking**: MyPy
- **Testing**: pytest with asyncio support
- **Language**: American English for all code, comments, and documentation
### Code Organization
**Core Structure**:
```
supervisor/
├── __init__.py # Package initialization
├── const.py # Constants and enums
├── coresys.py # Core system management
├── bootstrap.py # System initialization
├── exceptions.py # Custom exception classes
├── api/ # REST API endpoints
├── addons/ # Add-on management
├── backups/ # Backup system
├── docker/ # Docker integration
├── host/ # Host system interface
├── homeassistant/ # Home Assistant Core management
├── dbus/ # D-Bus system integration
├── hardware/ # Hardware detection and management
├── plugins/ # Plugin system
├── resolution/ # Issue detection and resolution
├── security/ # Security management
├── services/ # Service discovery and management
├── store/ # Add-on store management
└── utils/ # Utility functions
```
**Shared Constants**: Use constants from `supervisor/const.py` instead of hardcoding
values. Define new constants following existing patterns and group related constants
together.
### Supervisor Architecture Patterns
**CoreSysAttributes Inheritance Pattern**: Nearly all major classes in Supervisor
inherit from `CoreSysAttributes`, providing access to the centralized system state
via `self.coresys` and convenient `sys_*` properties.
```python
# Standard Supervisor class pattern
class MyManager(CoreSysAttributes):
"""Manage my functionality."""
def __init__(self, coresys: CoreSys):
"""Initialize manager."""
self.coresys: CoreSys = coresys
self._component: MyComponent = MyComponent(coresys)
@property
def component(self) -> MyComponent:
"""Return component handler."""
return self._component
# Access system components via inherited properties
async def do_something(self):
await self.sys_docker.containers.get("my_container")
self.sys_bus.fire_event(BusEvent.MY_EVENT, {"data": "value"})
```
**Key Inherited Properties from CoreSysAttributes**:
- `self.sys_docker` - Docker API access
- `self.sys_run_in_executor()` - Execute blocking operations
- `self.sys_create_task()` - Create async tasks
- `self.sys_bus` - Event bus for system events
- `self.sys_config` - System configuration
- `self.sys_homeassistant` - Home Assistant Core management
- `self.sys_addons` - Add-on management
- `self.sys_host` - Host system access
- `self.sys_dbus` - D-Bus system interface
**Load Pattern**: Many components implement a `load()` method which effectively
initialize the component from external sources (containers, files, D-Bus services).
### API Development
**REST API Structure**:
- **Base Path**: `/api/` for all endpoints
- **Authentication**: Bearer token authentication
- **Consistent Response Format**: `{"result": "ok", "data": {...}}` or
`{"result": "error", "message": "..."}`
- **Validation**: Use voluptuous schemas with `api_validate()`
**Use `@api_process` Decorator**: This decorator handles all standard error handling
and response formatting automatically. The decorator catches `APIError`, `HassioError`,
and other exceptions, returning appropriate HTTP responses.
```python
from ..api.utils import api_process, api_validate
@api_process
async def backup_full(self, request: web.Request) -> dict[str, Any]:
"""Create full backup."""
body = await api_validate(SCHEMA_BACKUP_FULL, request)
job = await self.sys_backups.do_backup_full(**body)
return {ATTR_JOB_ID: job.uuid}
```
### Docker Integration
- **Container Management**: Use Supervisor's Docker manager instead of direct
Docker API
- **Networking**: Supervisor manages internal Docker networks with predefined IP
ranges
- **Security**: AppArmor profiles, capability restrictions, and user namespace
isolation
- **Health Checks**: Implement health monitoring for all managed containers
### D-Bus Integration
- **Use dbus-fast**: Async D-Bus library for system integration
- **Service Management**: systemd, NetworkManager, hostname management
- **Error Handling**: Wrap D-Bus exceptions in Supervisor-specific exceptions
### Async Programming
- **All I/O operations must be async**: File operations, network calls, subprocess
execution
- **Use asyncio patterns**: Prefer `asyncio.gather()` over sequential awaits
- **Executor jobs**: Use `self.sys_run_in_executor()` for blocking operations
- **Two-phase initialization**: `__init__` for sync setup, `post_init()` for async
initialization
### Testing
- **Location**: `tests/` directory with module mirroring
- **Fixtures**: Extensive use of pytest fixtures for CoreSys setup
- **Mocking**: Mock external dependencies (Docker, D-Bus, network calls)
- **Coverage**: Minimum 90% test coverage, 100% for security-sensitive code
### Error Handling
- **Custom Exceptions**: Defined in `exceptions.py` with clear inheritance hierarchy
- **Error Propagation**: Use `from` clause for exception chaining
- **API Errors**: Use `APIError` with appropriate HTTP status codes
### Security Considerations
- **Container Security**: AppArmor profiles mandatory for add-ons, minimal
capabilities
- **Authentication**: Token-based API authentication with role-based access
- **Data Protection**: Backup encryption, secure secret management, comprehensive
input validation
### Development Commands
```bash
# Run tests, adjust paths as necessary
pytest -qsx tests/
# Linting and formatting
ruff check supervisor/
ruff format supervisor/
# Type checking
mypy --ignore-missing-imports supervisor/
# Pre-commit hooks
pre-commit run --all-files
```
Always run the pre-commit hooks at the end of code editing.
### Common Patterns to Follow
**✅ Use These Patterns**:
- Inherit from `CoreSysAttributes` for system access
- Use `@api_process` decorator for API endpoints
- Use `self.sys_run_in_executor()` for blocking operations
- Access Docker via `self.sys_docker` not direct Docker API
- Use constants from `const.py` instead of hardcoding
- Store types in (per-module) `const.py` (e.g. supervisor/store/const.py)
**❌ Avoid These Patterns**:
- Direct Docker API usage - use Supervisor's Docker manager
- Blocking operations in async context (use asyncio alternatives)
- Hardcoded values - use constants from `const.py`
- Manual error handling in API endpoints - let `@api_process` handle it
This guide provides the foundation for contributing to Home Assistant Supervisor.
Follow these patterns and guidelines to ensure code quality, security, and
maintainability.

View File

@ -1,14 +0,0 @@
version: 2
updates:
- package-ecosystem: pip
directory: "/"
schedule:
interval: daily
time: "06:00"
open-pull-requests-limit: 10
- package-ecosystem: "github-actions"
directory: "/"
schedule:
interval: daily
time: "06:00"
open-pull-requests-limit: 10

27
.github/lock.yml vendored Normal file
View File

@ -0,0 +1,27 @@
# Configuration for Lock Threads - https://github.com/dessant/lock-threads
# Number of days of inactivity before a closed issue or pull request is locked
daysUntilLock: 1
# Skip issues and pull requests created before a given timestamp. Timestamp must
# follow ISO 8601 (`YYYY-MM-DD`). Set to `false` to disable
skipCreatedBefore: 2020-01-01
# Issues and pull requests with these labels will be ignored. Set to `[]` to disable
exemptLabels: []
# Label to add before locking, such as `outdated`. Set to `false` to disable
lockLabel: false
# Comment to post before locking. Set to `false` to disable
lockComment: false
# Assign `resolved` as the reason for locking. Set to `false` to disable
setLockReason: false
# Limit to only `issues` or `pulls`
only: pulls
# Optionally, specify configuration settings just for `issues` or `pulls`
issues:
daysUntilLock: 30

View File

@ -1,50 +1,4 @@
change-template: "- #$NUMBER $TITLE @$AUTHOR"
sort-direction: ascending
categories:
- title: ":boom: Breaking Changes"
label: "breaking-change"
- title: ":wrench: Build"
label: "build"
- title: ":boar: Chore"
label: "chore"
- title: ":sparkles: New Features"
label: "new-feature"
- title: ":zap: Performance"
label: "performance"
- title: ":recycle: Refactor"
label: "refactor"
- title: ":green_heart: CI"
label: "ci"
- title: ":bug: Bug Fixes"
label: "bugfix"
- title: ":white_check_mark: Test"
label: "test"
- title: ":arrow_up: Dependency Updates"
label: "dependencies"
collapse-after: 1
include-labels:
- "breaking-change"
- "build"
- "chore"
- "performance"
- "refactor"
- "new-feature"
- "bugfix"
- "dependencies"
- "test"
- "ci"
template: |
## What's Changed
$CHANGES

17
.github/stale.yml vendored Normal file
View File

@ -0,0 +1,17 @@
# Number of days of inactivity before an issue becomes stale
daysUntilStale: 60
# Number of days of inactivity before a stale issue is closed
daysUntilClose: 7
# Issues with these labels will never be considered stale
exemptLabels:
- pinned
- security
# Label to use when marking an issue as stale
staleLabel: wontfix
# Comment to post when marking an issue as stale. Set to `false` to disable
markComment: >
This issue has been automatically marked as stale because it has not had
recent activity. It will be closed if no further activity occurs. Thank you
for your contributions.
# Comment to post when closing a stale issue. Set to `false` to disable
closeComment: false

View File

@ -1,380 +0,0 @@
name: Build supervisor
on:
workflow_dispatch:
inputs:
channel:
description: "Channel"
required: true
default: "dev"
version:
description: "Version"
required: true
publish:
description: "Publish"
required: true
default: "false"
stable:
description: "Stable"
required: true
default: "false"
pull_request:
branches: ["main"]
release:
types: ["published"]
push:
branches: ["main"]
paths:
- "rootfs/**"
- "supervisor/**"
- build.yaml
- Dockerfile
- requirements.txt
- setup.py
env:
DEFAULT_PYTHON: "3.13"
BUILD_NAME: supervisor
BUILD_TYPE: supervisor
concurrency:
group: "${{ github.workflow }}-${{ github.ref }}"
cancel-in-progress: true
jobs:
init:
name: Initialize build
runs-on: ubuntu-latest
outputs:
architectures: ${{ steps.info.outputs.architectures }}
version: ${{ steps.version.outputs.version }}
channel: ${{ steps.version.outputs.channel }}
publish: ${{ steps.version.outputs.publish }}
requirements: ${{ steps.requirements.outputs.changed }}
steps:
- name: Checkout the repository
uses: actions/checkout@v4.2.2
with:
fetch-depth: 0
- name: Get information
id: info
uses: home-assistant/actions/helpers/info@master
- name: Get version
id: version
uses: home-assistant/actions/helpers/version@master
with:
type: ${{ env.BUILD_TYPE }}
- name: Get changed files
id: changed_files
if: steps.version.outputs.publish == 'false'
uses: masesgroup/retrieve-changed-files@v3.0.0
- name: Check if requirements files changed
id: requirements
run: |
if [[ "${{ steps.changed_files.outputs.all }}" =~ (requirements.txt|build.yaml) ]]; then
echo "changed=true" >> "$GITHUB_OUTPUT"
fi
build:
name: Build ${{ matrix.arch }} supervisor
needs: init
runs-on: ubuntu-latest
permissions:
contents: read
id-token: write
packages: write
strategy:
matrix:
arch: ${{ fromJson(needs.init.outputs.architectures) }}
steps:
- name: Checkout the repository
uses: actions/checkout@v4.2.2
with:
fetch-depth: 0
- name: Write env-file
if: needs.init.outputs.requirements == 'true'
run: |
(
# Fix out of memory issues with rust
echo "CARGO_NET_GIT_FETCH_WITH_CLI=true"
) > .env_file
- name: Build wheels
if: needs.init.outputs.requirements == 'true'
uses: home-assistant/wheels@2025.07.0
with:
abi: cp313
tag: musllinux_1_2
arch: ${{ matrix.arch }}
wheels-key: ${{ secrets.WHEELS_KEY }}
apk: "libffi-dev;openssl-dev;yaml-dev"
skip-binary: aiohttp
env-file: true
requirements: "requirements.txt"
- name: Set version
if: needs.init.outputs.publish == 'true'
uses: home-assistant/actions/helpers/version@master
with:
type: ${{ env.BUILD_TYPE }}
- name: Set up Python ${{ env.DEFAULT_PYTHON }}
if: needs.init.outputs.publish == 'true'
uses: actions/setup-python@v5.6.0
with:
python-version: ${{ env.DEFAULT_PYTHON }}
- name: Install Cosign
if: needs.init.outputs.publish == 'true'
uses: sigstore/cosign-installer@v3.9.2
with:
cosign-release: "v2.4.3"
- name: Install dirhash and calc hash
if: needs.init.outputs.publish == 'true'
run: |
pip3 install setuptools dirhash
dir_hash="$(dirhash "${{ github.workspace }}/supervisor" -a sha256 --match "*.py")"
echo "${dir_hash}" > rootfs/supervisor.sha256
- name: Sign supervisor SHA256
if: needs.init.outputs.publish == 'true'
run: |
cosign sign-blob --yes rootfs/supervisor.sha256 --bundle rootfs/supervisor.sha256.sig
- name: Login to GitHub Container Registry
if: needs.init.outputs.publish == 'true'
uses: docker/login-action@v3.4.0
with:
registry: ghcr.io
username: ${{ github.repository_owner }}
password: ${{ secrets.GITHUB_TOKEN }}
- name: Set build arguments
if: needs.init.outputs.publish == 'false'
run: echo "BUILD_ARGS=--test" >> $GITHUB_ENV
- name: Build supervisor
uses: home-assistant/builder@2025.03.0
with:
args: |
$BUILD_ARGS \
--${{ matrix.arch }} \
--target /data \
--cosign \
--generic ${{ needs.init.outputs.version }}
env:
CAS_API_KEY: ${{ secrets.CAS_TOKEN }}
version:
name: Update version
needs: ["init", "run_supervisor"]
runs-on: ubuntu-latest
steps:
- name: Checkout the repository
if: needs.init.outputs.publish == 'true'
uses: actions/checkout@v4.2.2
- name: Initialize git
if: needs.init.outputs.publish == 'true'
uses: home-assistant/actions/helpers/git-init@master
with:
name: ${{ secrets.GIT_NAME }}
email: ${{ secrets.GIT_EMAIL }}
token: ${{ secrets.GIT_TOKEN }}
- name: Update version file
if: needs.init.outputs.publish == 'true'
uses: home-assistant/actions/helpers/version-push@master
with:
key: ${{ env.BUILD_NAME }}
version: ${{ needs.init.outputs.version }}
channel: ${{ needs.init.outputs.channel }}
run_supervisor:
runs-on: ubuntu-latest
name: Run the Supervisor
needs: ["build", "init"]
timeout-minutes: 60
steps:
- name: Checkout the repository
uses: actions/checkout@v4.2.2
- name: Build the Supervisor
if: needs.init.outputs.publish != 'true'
uses: home-assistant/builder@2025.03.0
with:
args: |
--test \
--amd64 \
--target /data \
--generic runner
- name: Pull Supervisor
if: needs.init.outputs.publish == 'true'
run: |
docker pull ghcr.io/home-assistant/amd64-hassio-supervisor:${{ needs.init.outputs.version }}
docker tag ghcr.io/home-assistant/amd64-hassio-supervisor:${{ needs.init.outputs.version }} ghcr.io/home-assistant/amd64-hassio-supervisor:runner
- name: Create the Supervisor
run: |
mkdir -p /tmp/supervisor/data
docker create --name hassio_supervisor \
--privileged \
--security-opt seccomp=unconfined \
--security-opt apparmor=unconfined \
-v /run/docker.sock:/run/docker.sock \
-v /run/dbus:/run/dbus \
-v /tmp/supervisor/data:/data \
-v /etc/machine-id:/etc/machine-id:ro \
-e SUPERVISOR_SHARE="/tmp/supervisor/data" \
-e SUPERVISOR_NAME=hassio_supervisor \
-e SUPERVISOR_DEV=1 \
-e SUPERVISOR_MACHINE="qemux86-64" \
ghcr.io/home-assistant/amd64-hassio-supervisor:runner
- name: Start the Supervisor
run: docker start hassio_supervisor
- name: Wait for Supervisor to come up
run: |
SUPERVISOR=$(docker inspect --format='{{.NetworkSettings.IPAddress}}' hassio_supervisor)
ping="error"
while [ "$ping" != "ok" ]; do
ping=$(curl -sSL "http://$SUPERVISOR/supervisor/ping" | jq -r '.result')
sleep 5
done
- name: Check the Supervisor
run: |
echo "Checking supervisor info"
test=$(docker exec hassio_cli ha supervisor info --no-progress --raw-json | jq -r '.result')
if [ "$test" != "ok" ]; then
exit 1
fi
echo "Checking supervisor network info"
test=$(docker exec hassio_cli ha network info --no-progress --raw-json | jq -r '.result')
if [ "$test" != "ok" ]; then
exit 1
fi
- name: Check the Store / Addon
run: |
echo "Install Core SSH Add-on"
test=$(docker exec hassio_cli ha addons install core_ssh --no-progress --raw-json | jq -r '.result')
if [ "$test" != "ok" ]; then
exit 1
fi
# Make sure it actually installed
test=$(docker exec hassio_cli ha addons info core_ssh --no-progress --raw-json | jq -r '.data.version')
if [[ "$test" == "null" ]]; then
exit 1
fi
echo "Start Core SSH Add-on"
test=$(docker exec hassio_cli ha addons start core_ssh --no-progress --raw-json | jq -r '.result')
if [ "$test" != "ok" ]; then
exit 1
fi
# Make sure its state is started
test="$(docker exec hassio_cli ha addons info core_ssh --no-progress --raw-json | jq -r '.data.state')"
if [ "$test" != "started" ]; then
exit 1
fi
- name: Check the Supervisor code sign
if: needs.init.outputs.publish == 'true'
run: |
echo "Enable Content-Trust"
test=$(docker exec hassio_cli ha security options --content-trust=true --no-progress --raw-json | jq -r '.result')
if [ "$test" != "ok" ]; then
exit 1
fi
echo "Run supervisor health check"
test=$(docker exec hassio_cli ha resolution healthcheck --no-progress --raw-json | jq -r '.result')
if [ "$test" != "ok" ]; then
exit 1
fi
echo "Check supervisor unhealthy"
test=$(docker exec hassio_cli ha resolution info --no-progress --raw-json | jq -r '.data.unhealthy[]')
if [ "$test" != "" ]; then
exit 1
fi
echo "Check supervisor supported"
test=$(docker exec hassio_cli ha resolution info --no-progress --raw-json | jq -r '.data.unsupported[]')
if [[ "$test" =~ source_mods ]]; then
exit 1
fi
- name: Create full backup
id: backup
run: |
test=$(docker exec hassio_cli ha backups new --no-progress --raw-json)
if [ "$(echo $test | jq -r '.result')" != "ok" ]; then
exit 1
fi
echo "slug=$(echo $test | jq -r '.data.slug')" >> "$GITHUB_OUTPUT"
- name: Uninstall SSH add-on
run: |
test=$(docker exec hassio_cli ha addons uninstall core_ssh --no-progress --raw-json | jq -r '.result')
if [ "$test" != "ok" ]; then
exit 1
fi
- name: Restart supervisor
run: |
test=$(docker exec hassio_cli ha supervisor restart --no-progress --raw-json | jq -r '.result')
if [ "$test" != "ok" ]; then
exit 1
fi
- name: Wait for Supervisor to come up
run: |
SUPERVISOR=$(docker inspect --format='{{.NetworkSettings.IPAddress}}' hassio_supervisor)
ping="error"
while [ "$ping" != "ok" ]; do
ping=$(curl -sSL "http://$SUPERVISOR/supervisor/ping" | jq -r '.result')
sleep 5
done
- name: Restore SSH add-on from backup
run: |
test=$(docker exec hassio_cli ha backups restore ${{ steps.backup.outputs.slug }} --addons core_ssh --no-progress --raw-json | jq -r '.result')
if [ "$test" != "ok" ]; then
exit 1
fi
# Make sure it actually installed
test=$(docker exec hassio_cli ha addons info core_ssh --no-progress --raw-json | jq -r '.data.version')
if [[ "$test" == "null" ]]; then
exit 1
fi
# Make sure its state is started
test="$(docker exec hassio_cli ha addons info core_ssh --no-progress --raw-json | jq -r '.data.state')"
if [ "$test" != "started" ]; then
exit 1
fi
- name: Restore SSL directory from backup
run: |
test=$(docker exec hassio_cli ha backups restore ${{ steps.backup.outputs.slug }} --folders ssl --no-progress --raw-json | jq -r '.result')
if [ "$test" != "ok" ]; then
exit 1
fi
- name: Get supervisor logs on failiure
if: ${{ cancelled() || failure() }}
run: docker logs hassio_supervisor

View File

@ -1,19 +0,0 @@
name: Check PR
on:
pull_request:
branches: ["main"]
types: [labeled, unlabeled, synchronize]
jobs:
init:
name: Check labels
runs-on: ubuntu-latest
steps:
- name: Check labels
run: |
labels=$(jq -r '.pull_request.labels[] | .name' ${{github.event_path }})
echo "$labels"
if [ "$labels" == "cla-signed" ]; then
exit 1
fi

View File

@ -1,428 +0,0 @@
name: CI
# yamllint disable-line rule:truthy
on:
push:
branches:
- main
pull_request: ~
env:
DEFAULT_PYTHON: "3.13"
PRE_COMMIT_CACHE: ~/.cache/pre-commit
MYPY_CACHE_VERSION: 1
concurrency:
group: "${{ github.workflow }}-${{ github.ref }}"
cancel-in-progress: true
jobs:
# Separate job to pre-populate the base dependency cache
# This prevent upcoming jobs to do the same individually
prepare:
runs-on: ubuntu-latest
outputs:
python-version: ${{ steps.python.outputs.python-version }}
name: Prepare Python dependencies
steps:
- name: Check out code from GitHub
uses: actions/checkout@v4.2.2
- name: Set up Python
id: python
uses: actions/setup-python@v5.6.0
with:
python-version: ${{ env.DEFAULT_PYTHON }}
- name: Restore Python virtual environment
id: cache-venv
uses: actions/cache@v4.2.3
with:
path: venv
key: |
${{ runner.os }}-venv-${{ steps.python.outputs.python-version }}-${{ hashFiles('requirements.txt') }}-${{ hashFiles('requirements_tests.txt') }}
- name: Create Python virtual environment
if: steps.cache-venv.outputs.cache-hit != 'true'
run: |
python -m venv venv
. venv/bin/activate
pip install -U pip setuptools
pip install -r requirements.txt -r requirements_tests.txt
- name: Restore pre-commit environment from cache
id: cache-precommit
uses: actions/cache@v4.2.3
with:
path: ${{ env.PRE_COMMIT_CACHE }}
lookup-only: true
key: |
${{ runner.os }}-pre-commit-${{ hashFiles('.pre-commit-config.yaml') }}
restore-keys: |
${{ runner.os }}-pre-commit-
- name: Install pre-commit dependencies
if: steps.cache-precommit.outputs.cache-hit != 'true'
run: |
. venv/bin/activate
pre-commit install-hooks
lint-ruff-format:
name: Check ruff-format
runs-on: ubuntu-latest
needs: prepare
steps:
- name: Check out code from GitHub
uses: actions/checkout@v4.2.2
- name: Set up Python ${{ needs.prepare.outputs.python-version }}
uses: actions/setup-python@v5.6.0
id: python
with:
python-version: ${{ needs.prepare.outputs.python-version }}
- name: Restore Python virtual environment
id: cache-venv
uses: actions/cache@v4.2.3
with:
path: venv
key: |
${{ runner.os }}-venv-${{ needs.prepare.outputs.python-version }}-${{ hashFiles('requirements.txt') }}-${{ hashFiles('requirements_tests.txt') }}
- name: Fail job if Python cache restore failed
if: steps.cache-venv.outputs.cache-hit != 'true'
run: |
echo "Failed to restore Python virtual environment from cache"
exit 1
- name: Restore pre-commit environment from cache
id: cache-precommit
uses: actions/cache@v4.2.3
with:
path: ${{ env.PRE_COMMIT_CACHE }}
key: |
${{ runner.os }}-pre-commit-${{ hashFiles('.pre-commit-config.yaml') }}
- name: Fail job if cache restore failed
if: steps.cache-venv.outputs.cache-hit != 'true'
run: |
echo "Failed to restore Python virtual environment from cache"
exit 1
- name: Run ruff-format
run: |
. venv/bin/activate
pre-commit run --hook-stage manual ruff-format --all-files --show-diff-on-failure
env:
RUFF_OUTPUT_FORMAT: github
lint-ruff:
name: Check ruff
runs-on: ubuntu-latest
needs: prepare
steps:
- name: Check out code from GitHub
uses: actions/checkout@v4.2.2
- name: Set up Python ${{ needs.prepare.outputs.python-version }}
uses: actions/setup-python@v5.6.0
id: python
with:
python-version: ${{ needs.prepare.outputs.python-version }}
- name: Restore Python virtual environment
id: cache-venv
uses: actions/cache@v4.2.3
with:
path: venv
key: |
${{ runner.os }}-venv-${{ needs.prepare.outputs.python-version }}-${{ hashFiles('requirements.txt') }}-${{ hashFiles('requirements_tests.txt') }}
- name: Fail job if Python cache restore failed
if: steps.cache-venv.outputs.cache-hit != 'true'
run: |
echo "Failed to restore Python virtual environment from cache"
exit 1
- name: Restore pre-commit environment from cache
id: cache-precommit
uses: actions/cache@v4.2.3
with:
path: ${{ env.PRE_COMMIT_CACHE }}
key: |
${{ runner.os }}-pre-commit-${{ hashFiles('.pre-commit-config.yaml') }}
- name: Fail job if cache restore failed
if: steps.cache-venv.outputs.cache-hit != 'true'
run: |
echo "Failed to restore Python virtual environment from cache"
exit 1
- name: Run ruff
run: |
. venv/bin/activate
pre-commit run --hook-stage manual ruff --all-files --show-diff-on-failure
env:
RUFF_OUTPUT_FORMAT: github
lint-dockerfile:
name: Check Dockerfile
runs-on: ubuntu-latest
needs: prepare
steps:
- name: Check out code from GitHub
uses: actions/checkout@v4.2.2
- name: Register hadolint problem matcher
run: |
echo "::add-matcher::.github/workflows/matchers/hadolint.json"
- name: Check Dockerfile
uses: docker://hadolint/hadolint:v1.18.0
with:
args: hadolint Dockerfile
lint-executable-shebangs:
name: Check executables
runs-on: ubuntu-latest
needs: prepare
steps:
- name: Check out code from GitHub
uses: actions/checkout@v4.2.2
- name: Set up Python ${{ needs.prepare.outputs.python-version }}
uses: actions/setup-python@v5.6.0
id: python
with:
python-version: ${{ needs.prepare.outputs.python-version }}
- name: Restore Python virtual environment
id: cache-venv
uses: actions/cache@v4.2.3
with:
path: venv
key: |
${{ runner.os }}-venv-${{ needs.prepare.outputs.python-version }}-${{ hashFiles('requirements.txt') }}-${{ hashFiles('requirements_tests.txt') }}
- name: Fail job if Python cache restore failed
if: steps.cache-venv.outputs.cache-hit != 'true'
run: |
echo "Failed to restore Python virtual environment from cache"
exit 1
- name: Restore pre-commit environment from cache
id: cache-precommit
uses: actions/cache@v4.2.3
with:
path: ${{ env.PRE_COMMIT_CACHE }}
key: |
${{ runner.os }}-pre-commit-${{ hashFiles('.pre-commit-config.yaml') }}
- name: Fail job if cache restore failed
if: steps.cache-venv.outputs.cache-hit != 'true'
run: |
echo "Failed to restore Python virtual environment from cache"
exit 1
- name: Register check executables problem matcher
run: |
echo "::add-matcher::.github/workflows/matchers/check-executables-have-shebangs.json"
- name: Run executables check
run: |
. venv/bin/activate
pre-commit run --hook-stage manual check-executables-have-shebangs --all-files
lint-json:
name: Check JSON
runs-on: ubuntu-latest
needs: prepare
steps:
- name: Check out code from GitHub
uses: actions/checkout@v4.2.2
- name: Set up Python ${{ needs.prepare.outputs.python-version }}
uses: actions/setup-python@v5.6.0
id: python
with:
python-version: ${{ needs.prepare.outputs.python-version }}
- name: Restore Python virtual environment
id: cache-venv
uses: actions/cache@v4.2.3
with:
path: venv
key: |
${{ runner.os }}-venv-${{ needs.prepare.outputs.python-version }}-${{ hashFiles('requirements.txt') }}-${{ hashFiles('requirements_tests.txt') }}
- name: Fail job if Python cache restore failed
if: steps.cache-venv.outputs.cache-hit != 'true'
run: |
echo "Failed to restore Python virtual environment from cache"
exit 1
- name: Restore pre-commit environment from cache
id: cache-precommit
uses: actions/cache@v4.2.3
with:
path: ${{ env.PRE_COMMIT_CACHE }}
key: |
${{ runner.os }}-pre-commit-${{ hashFiles('.pre-commit-config.yaml') }}
- name: Fail job if cache restore failed
if: steps.cache-venv.outputs.cache-hit != 'true'
run: |
echo "Failed to restore Python virtual environment from cache"
exit 1
- name: Register check-json problem matcher
run: |
echo "::add-matcher::.github/workflows/matchers/check-json.json"
- name: Run check-json
run: |
. venv/bin/activate
pre-commit run --hook-stage manual check-json --all-files
lint-pylint:
name: Check pylint
runs-on: ubuntu-latest
needs: prepare
steps:
- name: Check out code from GitHub
uses: actions/checkout@v4.2.2
- name: Set up Python ${{ needs.prepare.outputs.python-version }}
uses: actions/setup-python@v5.6.0
id: python
with:
python-version: ${{ needs.prepare.outputs.python-version }}
- name: Restore Python virtual environment
id: cache-venv
uses: actions/cache@v4.2.3
with:
path: venv
key: |
${{ runner.os }}-venv-${{ needs.prepare.outputs.python-version }}-${{ hashFiles('requirements.txt') }}-${{ hashFiles('requirements_tests.txt') }}
- name: Fail job if Python cache restore failed
if: steps.cache-venv.outputs.cache-hit != 'true'
run: |
echo "Failed to restore Python virtual environment from cache"
exit 1
- name: Install additional system dependencies
run: |
sudo apt-get update
sudo apt-get install -y --no-install-recommends libpulse0
- name: Register pylint problem matcher
run: |
echo "::add-matcher::.github/workflows/matchers/pylint.json"
- name: Run pylint
run: |
. venv/bin/activate
pylint supervisor tests
mypy:
name: Check mypy
runs-on: ubuntu-latest
needs: prepare
steps:
- name: Check out code from GitHub
uses: actions/checkout@v4.2.2
- name: Set up Python ${{ needs.prepare.outputs.python-version }}
uses: actions/setup-python@v5.6.0
id: python
with:
python-version: ${{ needs.prepare.outputs.python-version }}
- name: Generate partial mypy restore key
id: generate-mypy-key
run: |
mypy_version=$(cat requirements_test.txt | grep mypy | cut -d '=' -f 3)
echo "version=$mypy_version" >> $GITHUB_OUTPUT
echo "key=mypy-${{ env.MYPY_CACHE_VERSION }}-$mypy_version-$(date -u '+%Y-%m-%dT%H:%M:%s')" >> $GITHUB_OUTPUT
- name: Restore Python virtual environment
id: cache-venv
uses: actions/cache@v4.2.3
with:
path: venv
key: >-
${{ runner.os }}-venv-${{ needs.prepare.outputs.python-version }}-${{ hashFiles('requirements.txt') }}-${{ hashFiles('requirements_tests.txt') }}
- name: Fail job if Python cache restore failed
if: steps.cache-venv.outputs.cache-hit != 'true'
run: |
echo "Failed to restore Python virtual environment from cache"
exit 1
- name: Restore mypy cache
uses: actions/cache@v4.2.3
with:
path: .mypy_cache
key: >-
${{ runner.os }}-mypy-${{ needs.prepare.outputs.python-version }}-${{ steps.generate-mypy-key.outputs.key }}
restore-keys: >-
${{ runner.os }}-venv-${{ needs.prepare.outputs.python-version }}-mypy-${{ env.MYPY_CACHE_VERSION }}-${{ steps.generate-mypy-key.outputs.version }}
- name: Register mypy problem matcher
run: |
echo "::add-matcher::.github/workflows/matchers/mypy.json"
- name: Run mypy
run: |
. venv/bin/activate
mypy --ignore-missing-imports supervisor
pytest:
runs-on: ubuntu-latest
needs: prepare
name: Run tests Python ${{ needs.prepare.outputs.python-version }}
steps:
- name: Check out code from GitHub
uses: actions/checkout@v4.2.2
- name: Set up Python ${{ needs.prepare.outputs.python-version }}
uses: actions/setup-python@v5.6.0
id: python
with:
python-version: ${{ needs.prepare.outputs.python-version }}
- name: Install Cosign
uses: sigstore/cosign-installer@v3.9.2
with:
cosign-release: "v2.4.3"
- name: Restore Python virtual environment
id: cache-venv
uses: actions/cache@v4.2.3
with:
path: venv
key: |
${{ runner.os }}-venv-${{ needs.prepare.outputs.python-version }}-${{ hashFiles('requirements.txt') }}-${{ hashFiles('requirements_tests.txt') }}
- name: Fail job if Python cache restore failed
if: steps.cache-venv.outputs.cache-hit != 'true'
run: |
echo "Failed to restore Python virtual environment from cache"
exit 1
- name: Install additional system dependencies
run: |
sudo apt-get update
sudo apt-get install -y --no-install-recommends libpulse0 libudev1 dbus-daemon
- name: Register Python problem matcher
run: |
echo "::add-matcher::.github/workflows/matchers/python.json"
- name: Install Pytest Annotation plugin
run: |
. venv/bin/activate
# Ideally this should be part of our dependencies
# However this plugin is fairly new and doesn't run correctly
# on a non-GitHub environment.
pip install pytest-github-actions-annotate-failures
- name: Run pytest
run: |
. venv/bin/activate
pytest \
-qq \
--timeout=10 \
--durations=10 \
--cov supervisor \
-o console_output_style=count \
tests
- name: Upload coverage artifact
uses: actions/upload-artifact@v4.6.2
with:
name: coverage-${{ matrix.python-version }}
path: .coverage
include-hidden-files: true
coverage:
name: Process test coverage
runs-on: ubuntu-latest
needs: ["pytest", "prepare"]
steps:
- name: Check out code from GitHub
uses: actions/checkout@v4.2.2
- name: Set up Python ${{ needs.prepare.outputs.python-version }}
uses: actions/setup-python@v5.6.0
id: python
with:
python-version: ${{ needs.prepare.outputs.python-version }}
- name: Restore Python virtual environment
id: cache-venv
uses: actions/cache@v4.2.3
with:
path: venv
key: |
${{ runner.os }}-venv-${{ needs.prepare.outputs.python-version }}-${{ hashFiles('requirements.txt') }}-${{ hashFiles('requirements_tests.txt') }}
- name: Fail job if Python cache restore failed
if: steps.cache-venv.outputs.cache-hit != 'true'
run: |
echo "Failed to restore Python virtual environment from cache"
exit 1
- name: Download all coverage artifacts
uses: actions/download-artifact@v4.3.0
- name: Combine coverage results
run: |
. venv/bin/activate
coverage combine coverage*/.coverage*
coverage report
coverage xml
- name: Upload coverage to Codecov
uses: codecov/codecov-action@v5.4.3

View File

@ -1,20 +0,0 @@
name: Lock
# yamllint disable-line rule:truthy
on:
schedule:
- cron: "0 0 * * *"
jobs:
lock:
runs-on: ubuntu-latest
steps:
- uses: dessant/lock-threads@v5.0.1
with:
github-token: ${{ github.token }}
issue-inactive-days: "30"
exclude-issue-created-before: "2020-10-01T00:00:00Z"
issue-lock-reason: ""
pr-inactive-days: "1"
exclude-pr-created-before: "2020-11-01T00:00:00Z"
pr-lock-reason: ""

View File

@ -1,14 +0,0 @@
{
"problemMatcher": [
{
"owner": "check-executables-have-shebangs",
"pattern": [
{
"regexp": "^(.+):\\s(.+)$",
"file": 1,
"message": 2
}
]
}
]
}

View File

@ -1,16 +0,0 @@
{
"problemMatcher": [
{
"owner": "check-json",
"pattern": [
{
"regexp": "^(.+):\\s(.+\\sline\\s(\\d+)\\scolumn\\s(\\d+).+)$",
"file": 1,
"message": 2,
"line": 3,
"column": 4
}
]
}
]
}

View File

@ -1,16 +0,0 @@
{
"problemMatcher": [
{
"owner": "hadolint",
"pattern": [
{
"regexp": "^(.+):(\\d+)\\s+((DL\\d{4}).+)$",
"file": 1,
"line": 2,
"message": 3,
"code": 4
}
]
}
]
}

View File

@ -1,16 +0,0 @@
{
"problemMatcher": [
{
"owner": "mypy",
"pattern": [
{
"regexp": "^(.+):(\\d+):\\s(error|warning):\\s(.+)$",
"file": 1,
"line": 2,
"severity": 3,
"message": 4
}
]
}
]
}

View File

@ -1,32 +0,0 @@
{
"problemMatcher": [
{
"owner": "pylint-error",
"severity": "error",
"pattern": [
{
"regexp": "^(.+):(\\d+):(\\d+):\\s(([EF]\\d{4}):\\s.+)$",
"file": 1,
"line": 2,
"column": 3,
"message": 4,
"code": 5
}
]
},
{
"owner": "pylint-warning",
"severity": "warning",
"pattern": [
{
"regexp": "^(.+):(\\d+):(\\d+):\\s(([CRW]\\d{4}):\\s.+)$",
"file": 1,
"line": 2,
"column": 3,
"message": 4,
"code": 5
}
]
}
]
}

View File

@ -1,18 +0,0 @@
{
"problemMatcher": [
{
"owner": "python",
"pattern": [
{
"regexp": "^\\s*File\\s\\\"(.*)\\\",\\sline\\s(\\d+),\\sin\\s(.*)$",
"file": 1,
"line": 2
},
{
"regexp": "^\\s*raise\\s(.*)\\(\\'(.*)\\'\\)$",
"message": 2
}
]
}
]
}

View File

@ -1,44 +0,0 @@
name: Release Drafter
on:
push:
branches:
- main
jobs:
update_release_draft:
runs-on: ubuntu-latest
name: Release Drafter
steps:
- name: Checkout the repository
uses: actions/checkout@v4.2.2
with:
fetch-depth: 0
- name: Find Next Version
id: version
run: |
declare -i newpost
latest=$(git describe --tags $(git rev-list --tags --max-count=1))
latestpre=$(echo "$latest" | awk '{split($0,a,"."); print a[1] "." a[2]}')
datepre=$(date --utc '+%Y.%m')
if [[ "$latestpre" == "$datepre" ]]; then
latestpost=$(echo "$latest" | awk '{split($0,a,"."); print a[3]}')
newpost=$latestpost+1
else
newpost=0
fi
echo Current version: $latest
echo New target version: $datepre.$newpost
echo "version=$datepre.$newpost" >> "$GITHUB_OUTPUT"
- name: Run Release Drafter
uses: release-drafter/release-drafter@v6.1.0
with:
tag: ${{ steps.version.outputs.version }}
name: ${{ steps.version.outputs.version }}
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}

View File

@ -1,58 +0,0 @@
name: Restrict task creation
# yamllint disable-line rule:truthy
on:
issues:
types: [opened]
jobs:
check-authorization:
runs-on: ubuntu-latest
# Only run if this is a Task issue type (from the issue form)
if: github.event.issue.issue_type == 'Task'
steps:
- name: Check if user is authorized
uses: actions/github-script@v7
with:
script: |
const issueAuthor = context.payload.issue.user.login;
// Check if user is an organization member
try {
await github.rest.orgs.checkMembershipForUser({
org: 'home-assistant',
username: issueAuthor
});
console.log(`✅ ${issueAuthor} is an organization member`);
return; // Authorized
} catch (error) {
console.log(`❌ ${issueAuthor} is not authorized to create Task issues`);
}
// Close the issue with a comment
await github.rest.issues.createComment({
owner: context.repo.owner,
repo: context.repo.repo,
issue_number: context.issue.number,
body: `Hi @${issueAuthor}, thank you for your contribution!\n\n` +
`Task issues are restricted to Open Home Foundation staff and authorized contributors.\n\n` +
`If you would like to:\n` +
`- Report a bug: Please use the [bug report form](https://github.com/home-assistant/supervisor/issues/new?template=bug_report.yml)\n` +
`- Request a feature: Please submit to [Feature Requests](https://github.com/orgs/home-assistant/discussions)\n\n` +
`If you believe you should have access to create Task issues, please contact the maintainers.`
});
await github.rest.issues.update({
owner: context.repo.owner,
repo: context.repo.repo,
issue_number: context.issue.number,
state: 'closed'
});
// Add a label to indicate this was auto-closed
await github.rest.issues.addLabels({
owner: context.repo.owner,
repo: context.repo.repo,
issue_number: context.issue.number,
labels: ['auto-closed']
});

View File

@ -1,21 +0,0 @@
name: Sentry Release
# yamllint disable-line rule:truthy
on:
release:
types: [published, prereleased]
jobs:
createSentryRelease:
runs-on: ubuntu-latest
steps:
- name: Check out code from GitHub
uses: actions/checkout@v4.2.2
- name: Sentry Release
uses: getsentry/action-release@v3.2.0
env:
SENTRY_AUTH_TOKEN: ${{ secrets.SENTRY_AUTH_TOKEN }}
SENTRY_ORG: ${{ secrets.SENTRY_ORG }}
SENTRY_PROJECT: ${{ secrets.SENTRY_PROJECT }}
with:
environment: production

View File

@ -1,39 +0,0 @@
name: Stale
# yamllint disable-line rule:truthy
on:
schedule:
- cron: "0 * * * *"
jobs:
stale:
runs-on: ubuntu-latest
steps:
- uses: actions/stale@v9.1.0
with:
repo-token: ${{ secrets.GITHUB_TOKEN }}
days-before-stale: 30
days-before-close: 7
stale-issue-label: "stale"
exempt-issue-labels: "no-stale,Help%20wanted,help-wanted,pinned,rfc,security"
stale-issue-message: >
There hasn't been any activity on this issue recently. Due to the
high number of incoming GitHub notifications, we have to clean some
of the old issues, as many of them have already been resolved with
the latest updates.
Please make sure to update to the latest version and check if that
solves the issue. Let us know if that works for you by
adding a comment 👍
This issue has now been marked as stale and will be closed if no
further activity occurs. Thank you for your contributions.
stale-pr-label: "stale"
exempt-pr-labels: "no-stale,pinned,rfc,security"
stale-pr-message: >
There hasn't been any activity on this pull request recently. This
pull request has been automatically marked as stale because of that
and will be closed if no further activity occurs within 7 days.
Thank you for your contributions.

View File

@ -1,82 +0,0 @@
name: Update frontend
on:
schedule: # once a day
- cron: "0 0 * * *"
workflow_dispatch:
jobs:
check-version:
runs-on: ubuntu-latest
outputs:
skip: ${{ steps.check_version.outputs.skip || steps.check_existing_pr.outputs.skip }}
current_version: ${{ steps.check_version.outputs.current_version }}
latest_version: ${{ steps.latest_frontend_version.outputs.latest_tag }}
steps:
- name: Checkout code
uses: actions/checkout@v4
- name: Get latest frontend release
id: latest_frontend_version
uses: abatilo/release-info-action@v1.3.3
with:
owner: home-assistant
repo: frontend
- name: Check if version is up to date
id: check_version
run: |
current_version="$(cat .ha-frontend-version)"
latest_version="${{ steps.latest_frontend_version.outputs.latest_tag }}"
echo "current_version=${current_version}" >> $GITHUB_OUTPUT
echo "LATEST_VERSION=${latest_version}" >> $GITHUB_ENV
if [[ ! "$current_version" < "$latest_version" ]]; then
echo "Frontend version is up to date"
echo "skip=true" >> $GITHUB_OUTPUT
fi
- name: Check if there is no open PR with this version
if: steps.check_version.outputs.skip != 'true'
id: check_existing_pr
env:
GH_TOKEN: ${{ github.token }}
run: |
PR=$(gh pr list --state open --base main --json title --search "Update frontend to version $LATEST_VERSION")
if [[ "$PR" != "[]" ]]; then
echo "Skipping - There is already a PR open for version $LATEST_VERSION"
echo "skip=true" >> $GITHUB_OUTPUT
fi
create-pr:
runs-on: ubuntu-latest
needs: check-version
if: needs.check-version.outputs.skip != 'true'
steps:
- name: Checkout code
uses: actions/checkout@v4
- name: Clear www folder
run: |
rm -rf supervisor/api/panel/*
- name: Update version file
run: |
echo "${{ needs.check-version.outputs.latest_version }}" > .ha-frontend-version
- name: Download release assets
uses: robinraju/release-downloader@v1
with:
repository: 'home-assistant/frontend'
tag: ${{ needs.check-version.outputs.latest_version }}
fileName: home_assistant_frontend_supervisor-${{ needs.check-version.outputs.latest_version }}.tar.gz
extract: true
out-file-path: supervisor/api/panel/
- name: Remove release assets archive
run: |
rm -f supervisor/api/panel/home_assistant_frontend_supervisor-*.tar.gz
- name: Create PR
uses: peter-evans/create-pull-request@v7
with:
commit-message: "Update frontend to version ${{ needs.check-version.outputs.latest_version }}"
branch: autoupdate-frontend
base: main
draft: true
sign-commits: true
title: "Update frontend to version ${{ needs.check-version.outputs.latest_version }}"
body: >
Update frontend from ${{ needs.check-version.outputs.current_version }} to
[${{ needs.check-version.outputs.latest_version }}](https://github.com/home-assistant/frontend/releases/tag/${{ needs.check-version.outputs.latest_version }})

4
.gitmodules vendored Normal file
View File

@ -0,0 +1,4 @@
[submodule "home-assistant-polymer"]
path = home-assistant-polymer
url = https://github.com/home-assistant/home-assistant-polymer
branch = dev

View File

@ -1 +0,0 @@
20250401.0

View File

@ -1,7 +1,5 @@
ignored:
- DL3003
- DL3018
- DL3006
- DL3013
- DL3018
- DL3042
- SC2155

View File

@ -1,27 +0,0 @@
repos:
- repo: https://github.com/astral-sh/ruff-pre-commit
rev: v0.11.10
hooks:
- id: ruff
args:
- --fix
- id: ruff-format
files: ^((supervisor|tests)/.+)?[^/]+\.py$
- repo: https://github.com/pre-commit/pre-commit-hooks
rev: v5.0.0
hooks:
- id: check-executables-have-shebangs
stages: [manual]
- id: check-json
- repo: local
hooks:
# Run mypy through our wrapper script in order to get the possible
# pyenv and/or virtualenv activated; it may not have been e.g. if
# committing from a GUI tool that was not launched from an activated
# shell.
- id: mypy
name: mypy
entry: script/run-in-env.sh mypy --ignore-missing-imports
language: script
types_or: [python, pyi]
files: ^supervisor/.+\.(py|pyi)$

View File

@ -1,21 +0,0 @@
# Byte-compiled / optimized / DLL files
__pycache__/
*.py[cod]
*$py.class
# Distribution / packaging
*.egg-info/
# General files
.git
.github
.devcontainer
.vscode
.tox
# Data
home-assistant-polymer/
script/
tests/
data/
venv/

7
.vscode/launch.json vendored
View File

@ -13,13 +13,6 @@
"remoteRoot": "/usr/src/supervisor"
}
]
},
{
"name": "Debug Tests",
"type": "python",
"request": "test",
"console": "internalConsole",
"justMyCode": false
}
]
}

35
.vscode/tasks.json vendored
View File

@ -2,9 +2,9 @@
"version": "2.0.0",
"tasks": [
{
"label": "Run Supervisor",
"label": "Run Testenv",
"type": "shell",
"command": "supervisor_run",
"command": "./scripts/test_env.sh",
"group": {
"kind": "test",
"isDefault": true
@ -16,7 +16,7 @@
"problemMatcher": []
},
{
"label": "Run Supervisor CLI",
"label": "Run Testenv CLI",
"type": "shell",
"command": "docker exec -ti hassio_cli /usr/bin/cli.sh",
"group": {
@ -30,9 +30,9 @@
"problemMatcher": []
},
{
"label": "Update Supervisor Panel",
"label": "Update UI",
"type": "shell",
"command": "LOKALISE_TOKEN='${input:localiseToken}' ./scripts/update-frontend.sh",
"command": "./scripts/update-frontend.sh",
"group": {
"kind": "build",
"isDefault": true
@ -58,23 +58,9 @@
"problemMatcher": []
},
{
"label": "Ruff Check",
"label": "Flake8",
"type": "shell",
"command": "ruff check --fix supervisor tests",
"group": {
"kind": "test",
"isDefault": true
},
"presentation": {
"reveal": "always",
"panel": "new"
},
"problemMatcher": []
},
{
"label": "Ruff Format",
"type": "shell",
"command": "ruff format supervisor tests",
"command": "flake8 supervisor tests",
"group": {
"kind": "test",
"isDefault": true
@ -100,12 +86,5 @@
},
"problemMatcher": []
}
],
"inputs": [
{
"id": "localiseToken",
"type": "promptString",
"description": "Paste your lokalise token to download frontend translations"
}
]
}

1124
API.md Normal file

File diff suppressed because it is too large Load Diff

View File

@ -1 +0,0 @@
.github/copilot-instructions.md

View File

@ -1,51 +1,38 @@
ARG BUILD_FROM
FROM ${BUILD_FROM}
FROM $BUILD_FROM
ENV \
S6_SERVICES_GRACETIME=10000 \
SUPERVISOR_API=http://localhost \
CRYPTOGRAPHY_OPENSSL_NO_LEGACY=1 \
UV_SYSTEM_PYTHON=true
ARG \
COSIGN_VERSION \
BUILD_ARCH \
QEMU_CPU
S6_SERVICES_GRACETIME=10000
# Install base
WORKDIR /usr/src
RUN \
set -x \
&& apk add --no-cache \
findutils \
apk add --no-cache \
eudev \
eudev-libs \
git \
glib \
libffi \
libpulse \
musl \
openssl \
yaml \
\
&& curl -Lso /usr/bin/cosign "https://github.com/home-assistant/cosign/releases/download/${COSIGN_VERSION}/cosign_${BUILD_ARCH}" \
&& chmod a+x /usr/bin/cosign \
&& pip3 install uv==0.6.17
socat
ARG BUILD_ARCH
WORKDIR /usr/src
# Install requirements
COPY requirements.txt .
RUN \
if [ "${BUILD_ARCH}" = "i386" ]; then \
setarch="linux32"; \
else \
setarch=""; \
fi \
&& ${setarch} uv pip install --compile-bytecode --no-cache --no-build -r requirements.txt \
export MAKEFLAGS="-j$(nproc)" \
&& pip3 install --no-cache-dir --no-index --only-binary=:all: --find-links \
"https://wheels.home-assistant.io/alpine-$(cut -d '.' -f 1-2 < /etc/alpine-release)/${BUILD_ARCH}/" \
-r ./requirements.txt \
&& rm -f requirements.txt
# Install Home Assistant Supervisor
COPY . supervisor
RUN \
uv pip install --no-cache -e ./supervisor \
pip3 install --no-cache-dir -e ./supervisor \
&& python3 -m compileall ./supervisor/supervisor

811
LICENSE
View File

@ -1,201 +1,674 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
GNU GENERAL PUBLIC LICENSE
Version 3, 29 June 2007
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
Copyright (C) 2007 Free Software Foundation, Inc. <https://fsf.org/>
Everyone is permitted to copy and distribute verbatim copies
of this license document, but changing it is not allowed.
1. Definitions.
Preamble
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
The GNU General Public License is a free, copyleft license for
software and other kinds of works.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
The licenses for most software and other practical works are designed
to take away your freedom to share and change the works. By contrast,
the GNU General Public License is intended to guarantee your freedom to
share and change all versions of a program--to make sure it remains free
software for all its users. We, the Free Software Foundation, use the
GNU General Public License for most of our software; it applies also to
any other work released this way by its authors. You can apply it to
your programs, too.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
When we speak of free software, we are referring to freedom, not
price. Our General Public Licenses are designed to make sure that you
have the freedom to distribute copies of free software (and charge for
them if you wish), that you receive source code or can get it if you
want it, that you can change the software or use pieces of it in new
free programs, and that you know you can do these things.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
To protect your rights, we need to prevent others from denying you
these rights or asking you to surrender the rights. Therefore, you have
certain responsibilities if you distribute copies of the software, or if
you modify it: responsibilities to respect the freedom of others.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
For example, if you distribute copies of such a program, whether
gratis or for a fee, you must pass on to the recipients the same
freedoms that you received. You must make sure that they, too, receive
or can get the source code. And you must show them these terms so they
know their rights.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
Developers that use the GNU GPL protect your rights with two steps:
(1) assert copyright on the software, and (2) offer you this License
giving you legal permission to copy, distribute and/or modify it.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
For the developers' and authors' protection, the GPL clearly explains
that there is no warranty for this free software. For both users' and
authors' sake, the GPL requires that modified versions be marked as
changed, so that their problems will not be attributed erroneously to
authors of previous versions.
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
Some devices are designed to deny users access to install or run
modified versions of the software inside them, although the manufacturer
can do so. This is fundamentally incompatible with the aim of
protecting users' freedom to change the software. The systematic
pattern of such abuse occurs in the area of products for individuals to
use, which is precisely where it is most unacceptable. Therefore, we
have designed this version of the GPL to prohibit the practice for those
products. If such problems arise substantially in other domains, we
stand ready to extend this provision to those domains in future versions
of the GPL, as needed to protect the freedom of users.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
Finally, every program is threatened constantly by software patents.
States should not allow patents to restrict development and use of
software on general-purpose computers, but in those that do, we wish to
avoid the special danger that patents applied to a free program could
make it effectively proprietary. To prevent this, the GPL assures that
patents cannot be used to render the program non-free.
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
The precise terms and conditions for copying, distribution and
modification follow.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
TERMS AND CONDITIONS
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
0. Definitions.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
"This License" refers to version 3 of the GNU General Public License.
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
"Copyright" also means copyright-like laws that apply to other kinds of
works, such as semiconductor masks.
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
"The Program" refers to any copyrightable work licensed under this
License. Each licensee is addressed as "you". "Licensees" and
"recipients" may be individuals or organizations.
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
To "modify" a work means to copy from or adapt all or part of the work
in a fashion requiring copyright permission, other than the making of an
exact copy. The resulting work is called a "modified version" of the
earlier work or a work "based on" the earlier work.
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
A "covered work" means either the unmodified Program or a work based
on the Program.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
To "propagate" a work means to do anything with it that, without
permission, would make you directly or secondarily liable for
infringement under applicable copyright law, except executing it on a
computer or modifying a private copy. Propagation includes copying,
distribution (with or without modification), making available to the
public, and in some countries other activities as well.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
To "convey" a work means any kind of propagation that enables other
parties to make or receive copies. Mere interaction with a user through
a computer network, with no transfer of a copy, is not conveying.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
An interactive user interface displays "Appropriate Legal Notices"
to the extent that it includes a convenient and prominently visible
feature that (1) displays an appropriate copyright notice, and (2)
tells the user that there is no warranty for the work (except to the
extent that warranties are provided), that licensees may convey the
work under this License, and how to view a copy of this License. If
the interface presents a list of user commands or options, such as a
menu, a prominent item in the list meets this criterion.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
1. Source Code.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
The "source code" for a work means the preferred form of the work
for making modifications to it. "Object code" means any non-source
form of a work.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
A "Standard Interface" means an interface that either is an official
standard defined by a recognized standards body, or, in the case of
interfaces specified for a particular programming language, one that
is widely used among developers working in that language.
END OF TERMS AND CONDITIONS
The "System Libraries" of an executable work include anything, other
than the work as a whole, that (a) is included in the normal form of
packaging a Major Component, but which is not part of that Major
Component, and (b) serves only to enable use of the work with that
Major Component, or to implement a Standard Interface for which an
implementation is available to the public in source code form. A
"Major Component", in this context, means a major essential component
(kernel, window system, and so on) of the specific operating system
(if any) on which the executable work runs, or a compiler used to
produce the work, or an object code interpreter used to run it.
APPENDIX: How to apply the Apache License to your work.
The "Corresponding Source" for a work in object code form means all
the source code needed to generate, install, and (for an executable
work) run the object code and to modify the work, including scripts to
control those activities. However, it does not include the work's
System Libraries, or general-purpose tools or generally available free
programs which are used unmodified in performing those activities but
which are not part of the work. For example, Corresponding Source
includes interface definition files associated with source files for
the work, and the source code for shared libraries and dynamically
linked subprograms that the work is specifically designed to require,
such as by intimate data communication or control flow between those
subprograms and other parts of the work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
The Corresponding Source need not include anything that users
can regenerate automatically from other parts of the Corresponding
Source.
Copyright [yyyy] [name of copyright owner]
The Corresponding Source for a work in source code form is that
same work.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
2. Basic Permissions.
http://www.apache.org/licenses/LICENSE-2.0
All rights granted under this License are granted for the term of
copyright on the Program, and are irrevocable provided the stated
conditions are met. This License explicitly affirms your unlimited
permission to run the unmodified Program. The output from running a
covered work is covered by this License only if the output, given its
content, constitutes a covered work. This License acknowledges your
rights of fair use or other equivalent, as provided by copyright law.
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
You may make, run and propagate covered works that you do not
convey, without conditions so long as your license otherwise remains
in force. You may convey covered works to others for the sole purpose
of having them make modifications exclusively for you, or provide you
with facilities for running those works, provided that you comply with
the terms of this License in conveying all material for which you do
not control copyright. Those thus making or running the covered works
for you must do so exclusively on your behalf, under your direction
and control, on terms that prohibit them from making any copies of
your copyrighted material outside their relationship with you.
Conveying under any other circumstances is permitted solely under
the conditions stated below. Sublicensing is not allowed; section 10
makes it unnecessary.
3. Protecting Users' Legal Rights From Anti-Circumvention Law.
No covered work shall be deemed part of an effective technological
measure under any applicable law fulfilling obligations under article
11 of the WIPO copyright treaty adopted on 20 December 1996, or
similar laws prohibiting or restricting circumvention of such
measures.
When you convey a covered work, you waive any legal power to forbid
circumvention of technological measures to the extent such circumvention
is effected by exercising rights under this License with respect to
the covered work, and you disclaim any intention to limit operation or
modification of the work as a means of enforcing, against the work's
users, your or third parties' legal rights to forbid circumvention of
technological measures.
4. Conveying Verbatim Copies.
You may convey verbatim copies of the Program's source code as you
receive it, in any medium, provided that you conspicuously and
appropriately publish on each copy an appropriate copyright notice;
keep intact all notices stating that this License and any
non-permissive terms added in accord with section 7 apply to the code;
keep intact all notices of the absence of any warranty; and give all
recipients a copy of this License along with the Program.
You may charge any price or no price for each copy that you convey,
and you may offer support or warranty protection for a fee.
5. Conveying Modified Source Versions.
You may convey a work based on the Program, or the modifications to
produce it from the Program, in the form of source code under the
terms of section 4, provided that you also meet all of these conditions:
a) The work must carry prominent notices stating that you modified
it, and giving a relevant date.
b) The work must carry prominent notices stating that it is
released under this License and any conditions added under section
7. This requirement modifies the requirement in section 4 to
"keep intact all notices".
c) You must license the entire work, as a whole, under this
License to anyone who comes into possession of a copy. This
License will therefore apply, along with any applicable section 7
additional terms, to the whole of the work, and all its parts,
regardless of how they are packaged. This License gives no
permission to license the work in any other way, but it does not
invalidate such permission if you have separately received it.
d) If the work has interactive user interfaces, each must display
Appropriate Legal Notices; however, if the Program has interactive
interfaces that do not display Appropriate Legal Notices, your
work need not make them do so.
A compilation of a covered work with other separate and independent
works, which are not by their nature extensions of the covered work,
and which are not combined with it such as to form a larger program,
in or on a volume of a storage or distribution medium, is called an
"aggregate" if the compilation and its resulting copyright are not
used to limit the access or legal rights of the compilation's users
beyond what the individual works permit. Inclusion of a covered work
in an aggregate does not cause this License to apply to the other
parts of the aggregate.
6. Conveying Non-Source Forms.
You may convey a covered work in object code form under the terms
of sections 4 and 5, provided that you also convey the
machine-readable Corresponding Source under the terms of this License,
in one of these ways:
a) Convey the object code in, or embodied in, a physical product
(including a physical distribution medium), accompanied by the
Corresponding Source fixed on a durable physical medium
customarily used for software interchange.
b) Convey the object code in, or embodied in, a physical product
(including a physical distribution medium), accompanied by a
written offer, valid for at least three years and valid for as
long as you offer spare parts or customer support for that product
model, to give anyone who possesses the object code either (1) a
copy of the Corresponding Source for all the software in the
product that is covered by this License, on a durable physical
medium customarily used for software interchange, for a price no
more than your reasonable cost of physically performing this
conveying of source, or (2) access to copy the
Corresponding Source from a network server at no charge.
c) Convey individual copies of the object code with a copy of the
written offer to provide the Corresponding Source. This
alternative is allowed only occasionally and noncommercially, and
only if you received the object code with such an offer, in accord
with subsection 6b.
d) Convey the object code by offering access from a designated
place (gratis or for a charge), and offer equivalent access to the
Corresponding Source in the same way through the same place at no
further charge. You need not require recipients to copy the
Corresponding Source along with the object code. If the place to
copy the object code is a network server, the Corresponding Source
may be on a different server (operated by you or a third party)
that supports equivalent copying facilities, provided you maintain
clear directions next to the object code saying where to find the
Corresponding Source. Regardless of what server hosts the
Corresponding Source, you remain obligated to ensure that it is
available for as long as needed to satisfy these requirements.
e) Convey the object code using peer-to-peer transmission, provided
you inform other peers where the object code and Corresponding
Source of the work are being offered to the general public at no
charge under subsection 6d.
A separable portion of the object code, whose source code is excluded
from the Corresponding Source as a System Library, need not be
included in conveying the object code work.
A "User Product" is either (1) a "consumer product", which means any
tangible personal property which is normally used for personal, family,
or household purposes, or (2) anything designed or sold for incorporation
into a dwelling. In determining whether a product is a consumer product,
doubtful cases shall be resolved in favor of coverage. For a particular
product received by a particular user, "normally used" refers to a
typical or common use of that class of product, regardless of the status
of the particular user or of the way in which the particular user
actually uses, or expects or is expected to use, the product. A product
is a consumer product regardless of whether the product has substantial
commercial, industrial or non-consumer uses, unless such uses represent
the only significant mode of use of the product.
"Installation Information" for a User Product means any methods,
procedures, authorization keys, or other information required to install
and execute modified versions of a covered work in that User Product from
a modified version of its Corresponding Source. The information must
suffice to ensure that the continued functioning of the modified object
code is in no case prevented or interfered with solely because
modification has been made.
If you convey an object code work under this section in, or with, or
specifically for use in, a User Product, and the conveying occurs as
part of a transaction in which the right of possession and use of the
User Product is transferred to the recipient in perpetuity or for a
fixed term (regardless of how the transaction is characterized), the
Corresponding Source conveyed under this section must be accompanied
by the Installation Information. But this requirement does not apply
if neither you nor any third party retains the ability to install
modified object code on the User Product (for example, the work has
been installed in ROM).
The requirement to provide Installation Information does not include a
requirement to continue to provide support service, warranty, or updates
for a work that has been modified or installed by the recipient, or for
the User Product in which it has been modified or installed. Access to a
network may be denied when the modification itself materially and
adversely affects the operation of the network or violates the rules and
protocols for communication across the network.
Corresponding Source conveyed, and Installation Information provided,
in accord with this section must be in a format that is publicly
documented (and with an implementation available to the public in
source code form), and must require no special password or key for
unpacking, reading or copying.
7. Additional Terms.
"Additional permissions" are terms that supplement the terms of this
License by making exceptions from one or more of its conditions.
Additional permissions that are applicable to the entire Program shall
be treated as though they were included in this License, to the extent
that they are valid under applicable law. If additional permissions
apply only to part of the Program, that part may be used separately
under those permissions, but the entire Program remains governed by
this License without regard to the additional permissions.
When you convey a copy of a covered work, you may at your option
remove any additional permissions from that copy, or from any part of
it. (Additional permissions may be written to require their own
removal in certain cases when you modify the work.) You may place
additional permissions on material, added by you to a covered work,
for which you have or can give appropriate copyright permission.
Notwithstanding any other provision of this License, for material you
add to a covered work, you may (if authorized by the copyright holders of
that material) supplement the terms of this License with terms:
a) Disclaiming warranty or limiting liability differently from the
terms of sections 15 and 16 of this License; or
b) Requiring preservation of specified reasonable legal notices or
author attributions in that material or in the Appropriate Legal
Notices displayed by works containing it; or
c) Prohibiting misrepresentation of the origin of that material, or
requiring that modified versions of such material be marked in
reasonable ways as different from the original version; or
d) Limiting the use for publicity purposes of names of licensors or
authors of the material; or
e) Declining to grant rights under trademark law for use of some
trade names, trademarks, or service marks; or
f) Requiring indemnification of licensors and authors of that
material by anyone who conveys the material (or modified versions of
it) with contractual assumptions of liability to the recipient, for
any liability that these contractual assumptions directly impose on
those licensors and authors.
All other non-permissive additional terms are considered "further
restrictions" within the meaning of section 10. If the Program as you
received it, or any part of it, contains a notice stating that it is
governed by this License along with a term that is a further
restriction, you may remove that term. If a license document contains
a further restriction but permits relicensing or conveying under this
License, you may add to a covered work material governed by the terms
of that license document, provided that the further restriction does
not survive such relicensing or conveying.
If you add terms to a covered work in accord with this section, you
must place, in the relevant source files, a statement of the
additional terms that apply to those files, or a notice indicating
where to find the applicable terms.
Additional terms, permissive or non-permissive, may be stated in the
form of a separately written license, or stated as exceptions;
the above requirements apply either way.
8. Termination.
You may not propagate or modify a covered work except as expressly
provided under this License. Any attempt otherwise to propagate or
modify it is void, and will automatically terminate your rights under
this License (including any patent licenses granted under the third
paragraph of section 11).
However, if you cease all violation of this License, then your
license from a particular copyright holder is reinstated (a)
provisionally, unless and until the copyright holder explicitly and
finally terminates your license, and (b) permanently, if the copyright
holder fails to notify you of the violation by some reasonable means
prior to 60 days after the cessation.
Moreover, your license from a particular copyright holder is
reinstated permanently if the copyright holder notifies you of the
violation by some reasonable means, this is the first time you have
received notice of violation of this License (for any work) from that
copyright holder, and you cure the violation prior to 30 days after
your receipt of the notice.
Termination of your rights under this section does not terminate the
licenses of parties who have received copies or rights from you under
this License. If your rights have been terminated and not permanently
reinstated, you do not qualify to receive new licenses for the same
material under section 10.
9. Acceptance Not Required for Having Copies.
You are not required to accept this License in order to receive or
run a copy of the Program. Ancillary propagation of a covered work
occurring solely as a consequence of using peer-to-peer transmission
to receive a copy likewise does not require acceptance. However,
nothing other than this License grants you permission to propagate or
modify any covered work. These actions infringe copyright if you do
not accept this License. Therefore, by modifying or propagating a
covered work, you indicate your acceptance of this License to do so.
10. Automatic Licensing of Downstream Recipients.
Each time you convey a covered work, the recipient automatically
receives a license from the original licensors, to run, modify and
propagate that work, subject to this License. You are not responsible
for enforcing compliance by third parties with this License.
An "entity transaction" is a transaction transferring control of an
organization, or substantially all assets of one, or subdividing an
organization, or merging organizations. If propagation of a covered
work results from an entity transaction, each party to that
transaction who receives a copy of the work also receives whatever
licenses to the work the party's predecessor in interest had or could
give under the previous paragraph, plus a right to possession of the
Corresponding Source of the work from the predecessor in interest, if
the predecessor has it or can get it with reasonable efforts.
You may not impose any further restrictions on the exercise of the
rights granted or affirmed under this License. For example, you may
not impose a license fee, royalty, or other charge for exercise of
rights granted under this License, and you may not initiate litigation
(including a cross-claim or counterclaim in a lawsuit) alleging that
any patent claim is infringed by making, using, selling, offering for
sale, or importing the Program or any portion of it.
11. Patents.
A "contributor" is a copyright holder who authorizes use under this
License of the Program or a work on which the Program is based. The
work thus licensed is called the contributor's "contributor version".
A contributor's "essential patent claims" are all patent claims
owned or controlled by the contributor, whether already acquired or
hereafter acquired, that would be infringed by some manner, permitted
by this License, of making, using, or selling its contributor version,
but do not include claims that would be infringed only as a
consequence of further modification of the contributor version. For
purposes of this definition, "control" includes the right to grant
patent sublicenses in a manner consistent with the requirements of
this License.
Each contributor grants you a non-exclusive, worldwide, royalty-free
patent license under the contributor's essential patent claims, to
make, use, sell, offer for sale, import and otherwise run, modify and
propagate the contents of its contributor version.
In the following three paragraphs, a "patent license" is any express
agreement or commitment, however denominated, not to enforce a patent
(such as an express permission to practice a patent or covenant not to
sue for patent infringement). To "grant" such a patent license to a
party means to make such an agreement or commitment not to enforce a
patent against the party.
If you convey a covered work, knowingly relying on a patent license,
and the Corresponding Source of the work is not available for anyone
to copy, free of charge and under the terms of this License, through a
publicly available network server or other readily accessible means,
then you must either (1) cause the Corresponding Source to be so
available, or (2) arrange to deprive yourself of the benefit of the
patent license for this particular work, or (3) arrange, in a manner
consistent with the requirements of this License, to extend the patent
license to downstream recipients. "Knowingly relying" means you have
actual knowledge that, but for the patent license, your conveying the
covered work in a country, or your recipient's use of the covered work
in a country, would infringe one or more identifiable patents in that
country that you have reason to believe are valid.
If, pursuant to or in connection with a single transaction or
arrangement, you convey, or propagate by procuring conveyance of, a
covered work, and grant a patent license to some of the parties
receiving the covered work authorizing them to use, propagate, modify
or convey a specific copy of the covered work, then the patent license
you grant is automatically extended to all recipients of the covered
work and works based on it.
A patent license is "discriminatory" if it does not include within
the scope of its coverage, prohibits the exercise of, or is
conditioned on the non-exercise of one or more of the rights that are
specifically granted under this License. You may not convey a covered
work if you are a party to an arrangement with a third party that is
in the business of distributing software, under which you make payment
to the third party based on the extent of your activity of conveying
the work, and under which the third party grants, to any of the
parties who would receive the covered work from you, a discriminatory
patent license (a) in connection with copies of the covered work
conveyed by you (or copies made from those copies), or (b) primarily
for and in connection with specific products or compilations that
contain the covered work, unless you entered into that arrangement,
or that patent license was granted, prior to 28 March 2007.
Nothing in this License shall be construed as excluding or limiting
any implied license or other defenses to infringement that may
otherwise be available to you under applicable patent law.
12. No Surrender of Others' Freedom.
If conditions are imposed on you (whether by court order, agreement or
otherwise) that contradict the conditions of this License, they do not
excuse you from the conditions of this License. If you cannot convey a
covered work so as to satisfy simultaneously your obligations under this
License and any other pertinent obligations, then as a consequence you may
not convey it at all. For example, if you agree to terms that obligate you
to collect a royalty for further conveying from those to whom you convey
the Program, the only way you could satisfy both those terms and this
License would be to refrain entirely from conveying the Program.
13. Use with the GNU Affero General Public License.
Notwithstanding any other provision of this License, you have
permission to link or combine any covered work with a work licensed
under version 3 of the GNU Affero General Public License into a single
combined work, and to convey the resulting work. The terms of this
License will continue to apply to the part which is the covered work,
but the special requirements of the GNU Affero General Public License,
section 13, concerning interaction through a network will apply to the
combination as such.
14. Revised Versions of this License.
The Free Software Foundation may publish revised and/or new versions of
the GNU General Public License from time to time. Such new versions will
be similar in spirit to the present version, but may differ in detail to
address new problems or concerns.
Each version is given a distinguishing version number. If the
Program specifies that a certain numbered version of the GNU General
Public License "or any later version" applies to it, you have the
option of following the terms and conditions either of that numbered
version or of any later version published by the Free Software
Foundation. If the Program does not specify a version number of the
GNU General Public License, you may choose any version ever published
by the Free Software Foundation.
If the Program specifies that a proxy can decide which future
versions of the GNU General Public License can be used, that proxy's
public statement of acceptance of a version permanently authorizes you
to choose that version for the Program.
Later license versions may give you additional or different
permissions. However, no additional obligations are imposed on any
author or copyright holder as a result of your choosing to follow a
later version.
15. Disclaimer of Warranty.
THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
16. Limitation of Liability.
IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
SUCH DAMAGES.
17. Interpretation of Sections 15 and 16.
If the disclaimer of warranty and limitation of liability provided
above cannot be given local legal effect according to their terms,
reviewing courts shall apply local law that most closely approximates
an absolute waiver of all civil liability in connection with the
Program, unless a warranty or assumption of liability accompanies a
copy of the Program in return for a fee.
END OF TERMS AND CONDITIONS
How to Apply These Terms to Your New Programs
If you develop a new program, and you want it to be of the greatest
possible use to the public, the best way to achieve this is to make it
free software which everyone can redistribute and change under these terms.
To do so, attach the following notices to the program. It is safest
to attach them to the start of each source file to most effectively
state the exclusion of warranty; and each file should have at least
the "copyright" line and a pointer to where the full notice is found.
<one line to give the program's name and a brief idea of what it does.>
Copyright (C) <year> <name of author>
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <https://www.gnu.org/licenses/>.
Also add information on how to contact you by electronic and paper mail.
If the program does terminal interaction, make it output a short
notice like this when it starts in an interactive mode:
<program> Copyright (C) <year> <name of author>
This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
This is free software, and you are welcome to redistribute it
under certain conditions; type `show c' for details.
The hypothetical commands `show w' and `show c' should show the appropriate
parts of the General Public License. Of course, your program's commands
might be different; for a GUI interface, you would use an "about box".
You should also get your employer (if you work as a programmer) or school,
if any, to sign a "copyright disclaimer" for the program, if necessary.
For more information on this, and how to apply and follow the GNU GPL, see
<https://www.gnu.org/licenses/>.
The GNU General Public License does not permit incorporating your program
into proprietary programs. If your program is a subroutine library, you
may consider it more useful to permit linking proprietary applications with
the library. If this is what you want to do, use the GNU Lesser General
Public License instead of this License. But first, please read
<https://www.gnu.org/licenses/why-not-lgpl.html>.

View File

@ -10,25 +10,17 @@ network settings or installing and updating software.
## Installation
Installation instructions can be found at https://home-assistant.io/getting-started.
Installation instructions can be found at https://home-assistant.io/hassio.
## Development
For small changes and bugfixes you can just follow this, but for significant changes open a RFC first.
Development instructions can be found [here][development].
The development of the Supervisor is not difficult but tricky.
## Release
- You can use the builder to create your Supervisor: https://github.com/home-assistant/hassio-builder
- Access a HassOS device or VM and pull your Supervisor.
- Set the developer modus with the CLI tool: `ha supervisor options --channel=dev`
- Tag it as `homeassistant/xy-hassio-supervisor:latest`
- Restart the service with `systemctl restart hassos-supervisor | journalctl -fu hassos-supervisor`
- Test your changes
Releases are done in 3 stages (channels) with this structure:
1. Pull requests are merged to the `main` branch.
2. A new build is pushed to the `dev` stage.
3. Releases are published.
4. A new build is pushed to the `beta` stage.
5. The [`stable.json`][stable] file is updated.
6. The build that was pushed to `beta` will now be pushed to `stable`.
[development]: https://developers.home-assistant.io/docs/supervisor/development
[stable]: https://github.com/home-assistant/version/blob/master/stable.json
[![Home Assistant - A project from the Open Home Foundation](https://www.openhomefoundation.org/badges/home-assistant.png)](https://www.openhomefoundation.org/)
For small bugfixes or improvements, make a PR. For significant changes open a RFC first, please. Thanks.

52
azure-pipelines-ci.yml Normal file
View File

@ -0,0 +1,52 @@
# https://dev.azure.com/home-assistant
trigger:
batch: true
branches:
include:
- master
- dev
pr:
- dev
variables:
- name: versionHadolint
value: "v1.16.3"
jobs:
- job: "Tox"
pool:
vmImage: "ubuntu-latest"
steps:
- script: |
sudo apt-get update
sudo apt-get install -y libpulse0 libudev1
displayName: "Install Host library"
- task: UsePythonVersion@0
displayName: "Use Python 3.7"
inputs:
versionSpec: "3.7"
- script: pip install tox
displayName: "Install Tox"
- script: tox
displayName: "Run Tox"
- job: "JQ"
pool:
vmImage: "ubuntu-latest"
steps:
- script: sudo apt-get install -y jq
displayName: "Install JQ"
- bash: |
shopt -s globstar
cat **/*.json | jq '.'
displayName: "Run JQ"
- job: "Hadolint"
pool:
vmImage: "ubuntu-latest"
steps:
- script: sudo docker pull hadolint/hadolint:$(versionHadolint)
displayName: "Install Hadolint"
- script: |
sudo docker run --rm -i \
-v $(pwd)/.hadolint.yaml:/.hadolint.yaml:ro \
hadolint/hadolint:$(versionHadolint) < Dockerfile
displayName: "Run Hadolint"

View File

@ -0,0 +1,53 @@
# https://dev.azure.com/home-assistant
trigger:
batch: true
branches:
include:
- dev
tags:
include:
- "*"
pr: none
variables:
- name: versionBuilder
value: "7.0"
- group: docker
jobs:
- job: "VersionValidate"
pool:
vmImage: "ubuntu-latest"
steps:
- task: UsePythonVersion@0
displayName: "Use Python 3.7"
inputs:
versionSpec: "3.7"
- script: |
setup_version="$(python setup.py -V)"
branch_version="$(Build.SourceBranchName)"
if [ "${branch_version}" == "dev" ]; then
exit 0
elif [ "${setup_version}" != "${branch_version}" ]; then
echo "Version of tag ${branch_version} don't match with ${setup_version}!"
exit 1
fi
displayName: "Check version of branch/tag"
- job: "Release"
dependsOn:
- "VersionValidate"
pool:
vmImage: "ubuntu-latest"
steps:
- script: sudo docker login -u $(dockerUser) -p $(dockerPassword)
displayName: "Docker hub login"
- script: sudo docker pull homeassistant/amd64-builder:$(versionBuilder)
displayName: "Install Builder"
- script: |
sudo docker run --rm --privileged \
-v ~/.docker:/root/.docker \
-v /run/docker.sock:/run/docker.sock:rw -v $(pwd):/data:ro \
homeassistant/amd64-builder:$(versionBuilder) \
--generic $(Build.SourceBranchName) --all -t /data
displayName: "Build Release"

View File

@ -0,0 +1,26 @@
# https://dev.azure.com/home-assistant
trigger:
batch: true
branches:
include:
- dev
pr: none
variables:
- name: versionWheels
value: '1.6.1-3.7-alpine3.11'
resources:
repositories:
- repository: azure
type: github
name: 'home-assistant/ci-azure'
endpoint: 'home-assistant'
jobs:
- template: templates/azp-job-wheels.yaml@azure
parameters:
builderVersion: '$(versionWheels)'
builderApk: 'build-base;libffi-dev;openssl-dev'
builderPip: 'Cython'
wheelsRequirement: 'requirements.txt'

13
build.json Normal file
View File

@ -0,0 +1,13 @@
{
"image": "homeassistant/{arch}-hassio-supervisor",
"build_from": {
"aarch64": "homeassistant/aarch64-base-python:3.7-alpine3.11",
"armhf": "homeassistant/armhf-base-python:3.7-alpine3.11",
"armv7": "homeassistant/armv7-base-python:3.7-alpine3.11",
"amd64": "homeassistant/amd64-base-python:3.7-alpine3.11",
"i386": "homeassistant/i386-base-python:3.7-alpine3.11"
},
"labels": {
"io.hass.type": "supervisor"
}
}

View File

@ -1,24 +0,0 @@
image: ghcr.io/home-assistant/{arch}-hassio-supervisor
build_from:
aarch64: ghcr.io/home-assistant/aarch64-base-python:3.13-alpine3.22
armhf: ghcr.io/home-assistant/armhf-base-python:3.13-alpine3.22
armv7: ghcr.io/home-assistant/armv7-base-python:3.13-alpine3.22
amd64: ghcr.io/home-assistant/amd64-base-python:3.13-alpine3.22
i386: ghcr.io/home-assistant/i386-base-python:3.13-alpine3.22
codenotary:
signer: notary@home-assistant.io
base_image: notary@home-assistant.io
cosign:
base_identity: https://github.com/home-assistant/docker-base/.*
identity: https://github.com/home-assistant/supervisor/.*
args:
COSIGN_VERSION: 2.4.3
labels:
io.hass.type: supervisor
org.opencontainers.image.title: Home Assistant Supervisor
org.opencontainers.image.description: Container-based system for managing Home Assistant Core installation
org.opencontainers.image.source: https://github.com/home-assistant/supervisor
org.opencontainers.image.authors: The Home Assistant Authors
org.opencontainers.image.url: https://www.home-assistant.io/
org.opencontainers.image.documentation: https://www.home-assistant.io/docs/
org.opencontainers.image.licenses: Apache License 2.0

View File

@ -1,11 +0,0 @@
codecov:
branch: dev
coverage:
status:
project:
default:
target: 40
threshold: 0.09
comment: false
github_checks:
annotations: false

@ -0,0 +1 @@
Subproject commit 300c8d06c4c73601bcefbc1b0baeceef007fdba9

44
pylintrc Normal file
View File

@ -0,0 +1,44 @@
[MASTER]
reports=no
jobs=2
# Reasons disabled:
# format - handled by black
# locally-disabled - it spams too much
# duplicate-code - unavoidable
# cyclic-import - doesn't test if both import on load
# abstract-class-little-used - prevents from setting right foundation
# abstract-class-not-used - is flaky, should not show up but does
# unused-argument - generic callbacks and setup methods create a lot of warnings
# redefined-variable-type - this is Python, we're duck typing!
# too-many-* - are not enforced for the sake of readability
# too-few-* - same as too-many-*
# abstract-method - with intro of async there are always methods missing
disable=
format,
abstract-class-little-used,
abstract-method,
cyclic-import,
duplicate-code,
locally-disabled,
no-else-return,
no-self-use,
not-context-manager,
redefined-variable-type,
too-few-public-methods,
too-many-arguments,
too-many-branches,
too-many-instance-attributes,
too-many-lines,
too-many-locals,
too-many-public-methods,
too-many-return-statements,
too-many-statements,
unused-argument,
[EXCEPTIONS]
overgeneral-exceptions=Exception
[TYPECHECK]
ignored-modules = distutils

View File

@ -1,376 +0,0 @@
[build-system]
requires = ["setuptools~=80.9.0", "wheel~=0.46.1"]
build-backend = "setuptools.build_meta"
[project]
name = "Supervisor"
dynamic = ["version", "dependencies"]
license = { text = "Apache-2.0" }
description = "Open-source private cloud os for Home-Assistant based on HassOS"
readme = "README.md"
authors = [
{ name = "The Home Assistant Authors", email = "hello@home-assistant.io" },
]
keywords = ["docker", "home-assistant", "api"]
requires-python = ">=3.13.0"
[project.urls]
"Homepage" = "https://www.home-assistant.io/"
"Source Code" = "https://github.com/home-assistant/supervisor"
"Bug Reports" = "https://github.com/home-assistant/supervisor/issues"
"Docs: Dev" = "https://developers.home-assistant.io/"
"Discord" = "https://www.home-assistant.io/join-chat/"
"Forum" = "https://community.home-assistant.io/"
[tool.setuptools]
platforms = ["any"]
zip-safe = false
include-package-data = true
[tool.setuptools.packages.find]
include = ["supervisor*"]
[tool.pylint.MAIN]
py-version = "3.13"
# Use a conservative default here; 2 should speed up most setups and not hurt
# any too bad. Override on command line as appropriate.
jobs = 2
persistent = false
extension-pkg-allow-list = ["ciso8601"]
[tool.pylint.BASIC]
class-const-naming-style = "any"
good-names = ["id", "i", "j", "k", "ex", "Run", "_", "fp", "T", "os"]
[tool.pylint."MESSAGES CONTROL"]
# Reasons disabled:
# format - handled by ruff
# abstract-method - with intro of async there are always methods missing
# cyclic-import - doesn't test if both import on load
# duplicate-code - unavoidable
# locally-disabled - it spams too much
# too-many-* - are not enforced for the sake of readability
# too-few-* - same as too-many-*
# unused-argument - generic callbacks and setup methods create a lot of warnings
disable = [
"format",
"abstract-method",
"cyclic-import",
"duplicate-code",
"locally-disabled",
"no-else-return",
"not-context-manager",
"too-few-public-methods",
"too-many-arguments",
"too-many-branches",
"too-many-instance-attributes",
"too-many-lines",
"too-many-locals",
"too-many-public-methods",
"too-many-return-statements",
"too-many-statements",
"unused-argument",
"consider-using-with",
# Handled by ruff
# Ref: <https://github.com/astral-sh/ruff/issues/970>
"await-outside-async", # PLE1142
"bad-str-strip-call", # PLE1310
"bad-string-format-type", # PLE1307
"bidirectional-unicode", # PLE2502
"continue-in-finally", # PLE0116
"duplicate-bases", # PLE0241
"format-needs-mapping", # F502
"function-redefined", # F811
# Needed because ruff does not understand type of __all__ generated by a function
# "invalid-all-format", # PLE0605
"invalid-all-object", # PLE0604
"invalid-character-backspace", # PLE2510
"invalid-character-esc", # PLE2513
"invalid-character-nul", # PLE2514
"invalid-character-sub", # PLE2512
"invalid-character-zero-width-space", # PLE2515
"logging-too-few-args", # PLE1206
"logging-too-many-args", # PLE1205
"missing-format-string-key", # F524
"mixed-format-string", # F506
"no-method-argument", # N805
"no-self-argument", # N805
"nonexistent-operator", # B002
"nonlocal-without-binding", # PLE0117
"not-in-loop", # F701, F702
"notimplemented-raised", # F901
"return-in-init", # PLE0101
"return-outside-function", # F706
"syntax-error", # E999
"too-few-format-args", # F524
"too-many-format-args", # F522
"too-many-star-expressions", # F622
"truncated-format-string", # F501
"undefined-all-variable", # F822
"undefined-variable", # F821
"used-prior-global-declaration", # PLE0118
"yield-inside-async-function", # PLE1700
"yield-outside-function", # F704
"anomalous-backslash-in-string", # W605
"assert-on-string-literal", # PLW0129
"assert-on-tuple", # F631
"bad-format-string", # W1302, F
"bad-format-string-key", # W1300, F
"bare-except", # E722
"binary-op-exception", # PLW0711
"cell-var-from-loop", # B023
# "dangerous-default-value", # B006, ruff catches new occurrences, needs more work
"duplicate-except", # B014
"duplicate-key", # F601
"duplicate-string-formatting-argument", # F
"duplicate-value", # F
"eval-used", # PGH001
"exec-used", # S102
# "expression-not-assigned", # B018, ruff catches new occurrences, needs more work
"f-string-without-interpolation", # F541
"forgotten-debug-statement", # T100
"format-string-without-interpolation", # F
# "global-statement", # PLW0603, ruff catches new occurrences, needs more work
"global-variable-not-assigned", # PLW0602
"implicit-str-concat", # ISC001
"import-self", # PLW0406
"inconsistent-quotes", # Q000
"invalid-envvar-default", # PLW1508
"keyword-arg-before-vararg", # B026
"logging-format-interpolation", # G
"logging-fstring-interpolation", # G
"logging-not-lazy", # G
"misplaced-future", # F404
"named-expr-without-context", # PLW0131
"nested-min-max", # PLW3301
# "pointless-statement", # B018, ruff catches new occurrences, needs more work
"raise-missing-from", # TRY200
# "redefined-builtin", # A001, ruff is way more stricter, needs work
"try-except-raise", # TRY203
"unused-argument", # ARG001, we don't use it
"unused-format-string-argument", #F507
"unused-format-string-key", # F504
"unused-import", # F401
"unused-variable", # F841
"useless-else-on-loop", # PLW0120
"wildcard-import", # F403
"bad-classmethod-argument", # N804
"consider-iterating-dictionary", # SIM118
"empty-docstring", # D419
"invalid-name", # N815
"line-too-long", # E501, disabled globally
"missing-class-docstring", # D101
"missing-final-newline", # W292
"missing-function-docstring", # D103
"missing-module-docstring", # D100
"multiple-imports", #E401
"singleton-comparison", # E711, E712
"subprocess-run-check", # PLW1510
"superfluous-parens", # UP034
"ungrouped-imports", # I001
"unidiomatic-typecheck", # E721
"unnecessary-direct-lambda-call", # PLC3002
"unnecessary-lambda-assignment", # PLC3001
"unneeded-not", # SIM208
"useless-import-alias", # PLC0414
"wrong-import-order", # I001
"wrong-import-position", # E402
"comparison-of-constants", # PLR0133
"comparison-with-itself", # PLR0124
# "consider-alternative-union-syntax", # UP007, typing extension
"consider-merging-isinstance", # PLR1701
# "consider-using-alias", # UP006, typing extension
"consider-using-dict-comprehension", # C402
"consider-using-generator", # C417
"consider-using-get", # SIM401
"consider-using-set-comprehension", # C401
"consider-using-sys-exit", # PLR1722
"consider-using-ternary", # SIM108
"literal-comparison", # F632
"property-with-parameters", # PLR0206
"super-with-arguments", # UP008
"too-many-branches", # PLR0912
"too-many-return-statements", # PLR0911
"too-many-statements", # PLR0915
"trailing-comma-tuple", # COM818
"unnecessary-comprehension", # C416
"use-a-generator", # C417
"use-dict-literal", # C406
"use-list-literal", # C405
"useless-object-inheritance", # UP004
"useless-return", # PLR1711
# "no-self-use", # PLR6301 # Optional plugin, not enabled
]
[tool.pylint.REPORTS]
score = false
[tool.pylint.TYPECHECK]
ignored-modules = ["distutils"]
[tool.pylint.FORMAT]
expected-line-ending-format = "LF"
[tool.pylint.EXCEPTIONS]
overgeneral-exceptions = ["builtins.BaseException", "builtins.Exception"]
[tool.pylint.DESIGN]
max-positional-arguments = 10
[tool.pytest.ini_options]
testpaths = ["tests"]
norecursedirs = [".git"]
log_format = "%(asctime)s.%(msecs)03d %(levelname)-8s %(threadName)s %(name)s:%(filename)s:%(lineno)s %(message)s"
log_date_format = "%Y-%m-%d %H:%M:%S"
asyncio_default_fixture_loop_scope = "function"
asyncio_mode = "auto"
filterwarnings = [
"error",
"ignore:pkg_resources is deprecated as an API:DeprecationWarning:dirhash",
"ignore::pytest.PytestUnraisableExceptionWarning",
]
markers = [
"no_mock_init_websession: disable the autouse mock of init_websession for this test",
]
[tool.ruff]
lint.select = [
"B002", # Python does not support the unary prefix increment
"B007", # Loop control variable {name} not used within loop body
"B014", # Exception handler with duplicate exception
"B023", # Function definition does not bind loop variable {name}
"B026", # Star-arg unpacking after a keyword argument is strongly discouraged
"B904", # Use raise from to specify exception cause
"C", # complexity
"COM818", # Trailing comma on bare tuple prohibited
"D", # docstrings
"DTZ003", # Use datetime.now(tz=) instead of datetime.utcnow()
"DTZ004", # Use datetime.fromtimestamp(ts, tz=) instead of datetime.utcfromtimestamp(ts)
"E", # pycodestyle
"F", # pyflakes/autoflake
"G", # flake8-logging-format
"I", # isort
"ICN001", # import concentions; {name} should be imported as {asname}
"N804", # First argument of a class method should be named cls
"N805", # First argument of a method should be named self
"N815", # Variable {name} in class scope should not be mixedCase
"PGH004", # Use specific rule codes when using noqa
"PLC0414", # Useless import alias. Import alias does not rename original package.
"PLC", # pylint
"PLE", # pylint
"PLR", # pylint
"PLW", # pylint
"Q000", # Double quotes found but single quotes preferred
"RUF006", # Store a reference to the return value of asyncio.create_task
"S102", # Use of exec detected
"S103", # bad-file-permissions
"S108", # hardcoded-temp-file
"S306", # suspicious-mktemp-usage
"S307", # suspicious-eval-usage
"S313", # suspicious-xmlc-element-tree-usage
"S314", # suspicious-xml-element-tree-usage
"S315", # suspicious-xml-expat-reader-usage
"S316", # suspicious-xml-expat-builder-usage
"S317", # suspicious-xml-sax-usage
"S318", # suspicious-xml-mini-dom-usage
"S319", # suspicious-xml-pull-dom-usage
"S601", # paramiko-call
"S602", # subprocess-popen-with-shell-equals-true
"S604", # call-with-shell-equals-true
"S608", # hardcoded-sql-expression
"S609", # unix-command-wildcard-injection
"SIM105", # Use contextlib.suppress({exception}) instead of try-except-pass
"SIM117", # Merge with-statements that use the same scope
"SIM118", # Use {key} in {dict} instead of {key} in {dict}.keys()
"SIM201", # Use {left} != {right} instead of not {left} == {right}
"SIM208", # Use {expr} instead of not (not {expr})
"SIM212", # Use {a} if {a} else {b} instead of {b} if not {a} else {a}
"SIM300", # Yoda conditions. Use 'age == 42' instead of '42 == age'.
"SIM401", # Use get from dict with default instead of an if block
"T100", # Trace found: {name} used
"T20", # flake8-print
"TID251", # Banned imports
"TRY004", # Prefer TypeError exception for invalid type
"TRY203", # Remove exception handler; error is immediately re-raised
"UP", # pyupgrade
"W", # pycodestyle
]
lint.ignore = [
"D202", # No blank lines allowed after function docstring
"D203", # 1 blank line required before class docstring
"D213", # Multi-line docstring summary should start at the second line
"D406", # Section name should end with a newline
"D407", # Section name underlining
"E501", # line too long
"E731", # do not assign a lambda expression, use a def
# Ignore ignored, as the rule is now back in preview/nursery, which cannot
# be ignored anymore without warnings.
# https://github.com/astral-sh/ruff/issues/7491
# "PLC1901", # Lots of false positives
# False positives https://github.com/astral-sh/ruff/issues/5386
"PLC0208", # Use a sequence type instead of a `set` when iterating over values
"PLR0911", # Too many return statements ({returns} > {max_returns})
"PLR0912", # Too many branches ({branches} > {max_branches})
"PLR0913", # Too many arguments to function call ({c_args} > {max_args})
"PLR0915", # Too many statements ({statements} > {max_statements})
"PLR2004", # Magic value used in comparison, consider replacing {value} with a constant variable
"PLW2901", # Outer {outer_kind} variable {name} overwritten by inner {inner_kind} target
"UP006", # keep type annotation style as is
"UP007", # keep type annotation style as is
# Ignored due to performance: https://github.com/charliermarsh/ruff/issues/2923
"UP038", # Use `X | Y` in `isinstance` call instead of `(X, Y)`
# May conflict with the formatter, https://docs.astral.sh/ruff/formatter/#conflicting-lint-rules
"W191",
"E111",
"E114",
"E117",
"D206",
"D300",
"Q000",
"Q001",
"Q002",
"Q003",
"COM812",
"COM819",
"ISC001",
"ISC002",
# Disabled because ruff does not understand type of __all__ generated by a function
"PLE0605",
]
[tool.ruff.lint.flake8-import-conventions.extend-aliases]
voluptuous = "vol"
[tool.ruff.lint.flake8-pytest-style]
fixture-parentheses = false
[tool.ruff.lint.flake8-tidy-imports.banned-api]
"pytz".msg = "use zoneinfo instead"
[tool.ruff.lint.isort]
force-sort-within-sections = true
section-order = [
"future",
"standard-library",
"third-party",
"first-party",
"local-folder",
]
forced-separate = ["tests"]
known-first-party = ["supervisor", "tests"]
combine-as-imports = true
split-on-trailing-comma = false
[tool.ruff.lint.per-file-ignores]
# DBus Service Mocks must use typing and names understood by dbus-fast
"tests/dbus_service_mocks/*.py" = ["F722", "F821", "N815"]
[tool.ruff.lint.mccabe]
max-complexity = 25

View File

@ -1,30 +1,18 @@
aiodns==3.5.0
aiohttp==3.12.15
atomicwrites-homeassistant==1.4.1
attrs==25.3.0
awesomeversion==25.8.0
blockbuster==1.5.25
brotli==1.1.0
ciso8601==2.3.2
colorlog==6.9.0
cpe==1.3.1
cryptography==45.0.5
debugpy==1.8.15
deepmerge==2.0
dirhash==0.5.0
docker==7.1.0
faust-cchardet==2.1.19
gitpython==3.1.45
jinja2==3.1.6
log-rate-limit==1.4.2
orjson==3.11.1
pulsectl==24.12.0
pyudev==0.24.3
PyYAML==6.0.2
requests==2.32.4
securetar==2025.2.1
sentry-sdk==2.34.1
setuptools==80.9.0
voluptuous==0.15.2
dbus-fast==2.44.3
zlib-fast==0.2.1
aiohttp==3.6.1
async_timeout==3.0.1
attrs==19.3.0
cchardet==2.1.6
colorlog==4.1.0
cpe==1.2.1
cryptography==2.9.2
docker==4.2.0
gitpython==3.1.2
jinja2==2.11.2
packaging==20.4
ptvsd==4.3.2
pulsectl==20.5.0
pytz==2020.1
pyudev==0.22.0
ruamel.yaml==0.15.100
uvloop==0.14.0
voluptuous==0.11.7

View File

@ -1,16 +1,6 @@
astroid==3.3.11
coverage==7.10.2
mypy==1.17.1
pre-commit==4.2.0
pylint==3.3.7
pytest-aiohttp==1.1.0
pytest-asyncio==0.25.2
pytest-cov==6.2.1
pytest-timeout==2.4.0
pytest==8.4.1
ruff==0.12.7
time-machine==2.16.0
types-docker==7.1.0.20250705
types-pyyaml==6.0.12.20250516
types-requests==2.32.4.20250611
urllib3==2.5.0
flake8==3.8.1
pylint==2.5.2
pytest==5.4.2
pytest-timeout==1.3.4
pytest-aiohttp==0.3.0
black==19.10b0

17
rootfs/etc/cont-init.d/udev.sh Executable file → Normal file
View File

@ -2,19 +2,8 @@
# ==============================================================================
# Start udev service
# ==============================================================================
if bashio::fs.directory_exists /run/udev && ! bashio::fs.file_exists /run/.old_udev; then
bashio::log.info "Using udev information from host"
bashio::exit.ok
fi
bashio::log.info "Setup udev backend inside container"
udevd --daemon
bashio::log.info "Update udev information"
touch /run/.old_udev
if udevadm trigger; then
udevadm settle || true
else
bashio::log.warning "Triggering of udev rules fails!"
fi
bashio::log.info "Update udev informations"
udevadm trigger
udevadm settle

View File

@ -26,7 +26,7 @@ autospawn = no
; daemon-binary = /usr/bin/pulseaudio
; extra-arguments = --log-target=syslog
cookie-file = /run/pulse-cookie
; cookie-file =
; enable-shm = yes
; shm-size-bytes = 0 # setting this 0 will use the system-default, usually 64 MiB

10
rootfs/etc/services.d/supervisor/finish Executable file → Normal file
View File

@ -1,11 +1,5 @@
#!/usr/bin/env bashio
#!/usr/bin/execlineb -S0
# ==============================================================================
# Take down the S6 supervision tree when Supervisor fails
# ==============================================================================
if [[ "$1" -ne 100 ]] && [[ "$1" -ne 256 ]]; then
bashio::log.warning "Halt Supervisor"
/run/s6/basedir/bin/halt
fi
bashio::log.info "Supervisor restart after closing"
s6-svscanctl -t /var/run/s6/services

5
rootfs/etc/services.d/supervisor/run Executable file → Normal file
View File

@ -1,8 +1,7 @@
#!/usr/bin/with-contenv bashio
# ==============================================================================
# Start Supervisor service
# Start Service service
# ==============================================================================
export LD_PRELOAD="/usr/local/lib/libjemalloc.so.2"
export MALLOC_CONF="background_thread:true,metadata_thp:auto"
exec python3 -m supervisor
exec python3 -m supervisor

View File

@ -1,11 +0,0 @@
#!/usr/bin/env bashio
# ==============================================================================
# Take down the S6 supervision tree when Watchdog fails
# ==============================================================================
if [[ "$1" -ne 0 ]] && [[ "$1" -ne 256 ]]; then
bashio::log.warning "Halt Supervisor (Wuff)"
/run/s6/basedir/bin/halt
fi
bashio::log.info "Watchdog restart after closing"

View File

@ -1,34 +0,0 @@
#!/usr/bin/with-contenv bashio
# ==============================================================================
# Start Watchdog service
# ==============================================================================
declare failed_count=0
declare supervisor_state
bashio::log.info "Starting local supervisor watchdog..."
while [[ failed_count -lt 2 ]];
do
sleep 300
supervisor_state="$(cat /run/supervisor)"
if [[ "${supervisor_state}" = "running" ]]; then
# Check API
if bashio::supervisor.ping > /dev/null; then
failed_count=0
else
bashio::log.warning "Maybe found an issue on API healthy"
((failed_count++))
fi
elif [[ "close stopping" = *"${supervisor_state}"* ]]; then
bashio::log.warning "Maybe found an issue on shutdown"
((failed_count++))
else
failed_count=0
fi
done
bashio::exit.nok "Watchdog detected issue with Supervisor - taking container down!"

View File

@ -1,30 +0,0 @@
#!/usr/bin/env sh
set -eu
# Used in venv activate script.
# Would be an error if undefined.
OSTYPE="${OSTYPE-}"
# Activate pyenv and virtualenv if present, then run the specified command
# pyenv, pyenv-virtualenv
if [ -s .python-version ]; then
PYENV_VERSION=$(head -n 1 .python-version)
export PYENV_VERSION
fi
if [ -n "${VIRTUAL_ENV-}" ] && [ -f "${VIRTUAL_ENV}/bin/activate" ]; then
. "${VIRTUAL_ENV}/bin/activate"
else
# other common virtualenvs
my_path=$(git rev-parse --show-toplevel)
for venv in venv .venv .; do
if [ -f "${my_path}/${venv}/bin/activate" ]; then
. "${my_path}/${venv}/bin/activate"
break
fi
done
fi
exec "$@"

133
scripts/test_env.sh Executable file
View File

@ -0,0 +1,133 @@
#!/bin/bash
set -eE
DOCKER_TIMEOUT=30
DOCKER_PID=0
function start_docker() {
local starttime
local endtime
echo "Starting docker."
dockerd 2> /dev/null &
DOCKER_PID=$!
echo "Waiting for docker to initialize..."
starttime="$(date +%s)"
endtime="$(date +%s)"
until docker info >/dev/null 2>&1; do
if [ $((endtime - starttime)) -le $DOCKER_TIMEOUT ]; then
sleep 1
endtime=$(date +%s)
else
echo "Timeout while waiting for docker to come up"
exit 1
fi
done
echo "Docker was initialized"
}
function stop_docker() {
local starttime
local endtime
echo "Stopping in container docker..."
if [ "$DOCKER_PID" -gt 0 ] && kill -0 "$DOCKER_PID" 2> /dev/null; then
starttime="$(date +%s)"
endtime="$(date +%s)"
# Now wait for it to die
kill "$DOCKER_PID"
while kill -0 "$DOCKER_PID" 2> /dev/null; do
if [ $((endtime - starttime)) -le $DOCKER_TIMEOUT ]; then
sleep 1
endtime=$(date +%s)
else
echo "Timeout while waiting for container docker to die"
exit 1
fi
done
else
echo "Your host might have been left with unreleased resources"
fi
}
function build_supervisor() {
docker pull homeassistant/amd64-builder:dev
docker run --rm --privileged \
-v /run/docker.sock:/run/docker.sock -v "$(pwd):/data" \
homeassistant/amd64-builder:dev \
--generic dev -t /data --test --amd64 --no-cache
}
function cleanup_lastboot() {
if [[ -f /workspaces/test_supervisor/config.json ]]; then
echo "Cleaning up last boot"
cp /workspaces/test_supervisor/config.json /tmp/config.json
jq -rM 'del(.last_boot)' /tmp/config.json > /workspaces/test_supervisor/config.json
rm /tmp/config.json
fi
}
function cleanup_docker() {
echo "Cleaning up stopped containers..."
docker rm $(docker ps -a -q) || true
}
function setup_test_env() {
mkdir -p /workspaces/test_supervisor
echo "Start Supervisor"
docker run --rm --privileged \
--name hassio_supervisor \
--security-opt seccomp=unconfined \
--security-opt apparmor:unconfined \
-v /run/docker.sock:/run/docker.sock \
-v /run/dbus:/run/dbus \
-v "/workspaces/test_supervisor":/data \
-v /etc/machine-id:/etc/machine-id:ro \
-e SUPERVISOR_SHARE="/workspaces/test_supervisor" \
-e SUPERVISOR_NAME=hassio_supervisor \
-e SUPERVISOR_DEV=1 \
-e SUPERVISOR_MACHINE="qemux86-64" \
homeassistant/amd64-hassio-supervisor:latest
}
function init_dbus() {
if pgrep dbus-daemon; then
echo "Dbus is running"
return 0
fi
echo "Startup dbus"
mkdir -p /var/lib/dbus
cp -f /etc/machine-id /var/lib/dbus/machine-id
# cleanups
mkdir -p /run/dbus
rm -f /run/dbus/pid
# run
dbus-daemon --system --print-address
}
echo "Start Test-Env"
start_docker
trap "stop_docker" ERR
build_supervisor
cleanup_lastboot
cleanup_docker
init_dbus
setup_test_env
stop_docker

18
scripts/update-frontend.sh Executable file
View File

@ -0,0 +1,18 @@
#!/bin/bash
set -e
# Update frontend
git submodule update --init --recursive --remote
[ -s "$NVM_DIR/nvm.sh" ] && \. "$NVM_DIR/nvm.sh"
cd home-assistant-polymer
nvm install
script/bootstrap
# build frontend
cd hassio
./script/build_hassio
# Copy frontend
rm -f ../../supervisor/hassio/api/panel/chunk.*
cp -rf build/* ../../supervisor/api/panel/

17
setup.cfg Normal file
View File

@ -0,0 +1,17 @@
[isort]
multi_line_output = 3
include_trailing_comma=True
force_grid_wrap=0
line_length=88
indent = " "
not_skip = __init__.py
force_sort_within_sections = true
sections = FUTURE,STDLIB,INBETWEENS,THIRDPARTY,FIRSTPARTY,LOCALFOLDER
default_section = THIRDPARTY
forced_separate = tests
combine_as_imports = true
use_parentheses = true
[flake8]
max-line-length = 88
ignore = E501, W503

View File

@ -1,28 +1,44 @@
"""Home Assistant Supervisor setup."""
from pathlib import Path
import re
from setuptools import setup
RE_SUPERVISOR_VERSION = re.compile(r"^SUPERVISOR_VERSION =\s*(.+)$")
SUPERVISOR_DIR = Path(__file__).parent
REQUIREMENTS_FILE = SUPERVISOR_DIR / "requirements.txt"
CONST_FILE = SUPERVISOR_DIR / "supervisor/const.py"
REQUIREMENTS = REQUIREMENTS_FILE.read_text(encoding="utf-8")
CONSTANTS = CONST_FILE.read_text(encoding="utf-8")
def _get_supervisor_version():
for line in CONSTANTS.split("/n"):
if match := RE_SUPERVISOR_VERSION.match(line):
return match.group(1)
return "9999.09.9.dev9999"
from supervisor.const import SUPERVISOR_VERSION
setup(
version=_get_supervisor_version(),
dependencies=REQUIREMENTS.split("/n"),
name="Supervisor",
version=SUPERVISOR_VERSION,
license="BSD License",
author="The Home Assistant Authors",
author_email="hello@home-assistant.io",
url="https://home-assistant.io/",
description=("Open-source private cloud os for Home-Assistant" " based on HassOS"),
long_description=(
"A maintainless private cloud operator system that"
"setup a Home-Assistant instance. Based on HassOS"
),
classifiers=[
"Intended Audience :: End Users/Desktop",
"Intended Audience :: Developers",
"License :: OSI Approved :: Apache Software License",
"Operating System :: OS Independent",
"Topic :: Home Automation",
"Topic :: Software Development :: Libraries :: Python Modules",
"Topic :: Scientific/Engineering :: Atmospheric Science",
"Development Status :: 5 - Production/Stable",
"Intended Audience :: Developers",
"Programming Language :: Python :: 3.7",
],
keywords=["docker", "home-assistant", "api"],
zip_safe=False,
platforms="any",
packages=[
"supervisor",
"supervisor.docker",
"supervisor.addons",
"supervisor.api",
"supervisor.misc",
"supervisor.utils",
"supervisor.plugins",
"supervisor.snapshots",
],
include_package_data=True,
)

View File

@ -1,37 +1,25 @@
"""Main file for Supervisor."""
import asyncio
from concurrent.futures import ThreadPoolExecutor
import logging
from pathlib import Path
import sys
import zlib_fast
# Enable fast zlib before importing supervisor
zlib_fast.enable()
# pylint: disable=wrong-import-position
from supervisor import bootstrap # noqa: E402
from supervisor.utils.blockbuster import BlockBusterManager # noqa: E402
from supervisor.utils.logging import activate_log_queue_handler # noqa: E402
# pylint: enable=wrong-import-position
from supervisor import bootstrap
_LOGGER: logging.Logger = logging.getLogger(__name__)
CONTAINER_OS_STARTUP_CHECK = Path("/run/os/startup-marker")
def run_os_startup_check_cleanup() -> None:
"""Cleanup OS startup check."""
if not CONTAINER_OS_STARTUP_CHECK.exists():
return
def initialize_event_loop():
"""Attempt to use uvloop."""
try:
CONTAINER_OS_STARTUP_CHECK.unlink()
except OSError as err:
_LOGGER.warning("Not able to remove the startup health file: %s", err)
# pylint: disable=import-outside-toplevel
import uvloop
uvloop.install()
except ImportError:
pass
return asyncio.get_event_loop()
# pylint: disable=invalid-name
@ -39,8 +27,7 @@ if __name__ == "__main__":
bootstrap.initialize_logging()
# Init async event loop
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
loop = initialize_event_loop()
# Check if all information are available to setup Supervisor
bootstrap.check_environment()
@ -49,38 +36,27 @@ if __name__ == "__main__":
executor = ThreadPoolExecutor(thread_name_prefix="SyncWorker")
loop.set_default_executor(executor)
activate_log_queue_handler()
_LOGGER.info("Initializing Supervisor setup")
_LOGGER.info("Initialize Supervisor setup")
coresys = loop.run_until_complete(bootstrap.initialize_coresys())
loop.set_debug(coresys.config.debug)
if coresys.config.detect_blocking_io:
BlockBusterManager.activate()
loop.run_until_complete(coresys.core.connect())
loop.run_until_complete(bootstrap.supervisor_debugger(coresys))
bootstrap.supervisor_debugger(coresys)
bootstrap.migrate_system_env(coresys)
# Signal health startup for container
run_os_startup_check_cleanup()
_LOGGER.info("Setting up Supervisor")
_LOGGER.info("Setup Supervisor")
loop.run_until_complete(coresys.core.setup())
bootstrap.register_signal_handlers(loop, coresys)
loop.call_soon_threadsafe(loop.create_task, coresys.core.start())
loop.call_soon_threadsafe(bootstrap.reg_signal, loop)
try:
loop.run_until_complete(coresys.core.start())
except Exception as err: # pylint: disable=broad-except
# Supervisor itself is running at this point, just something didn't
# start as expected. Log with traceback to get more insights for
# such cases.
_LOGGER.critical("Supervisor start failed: %s", err, exc_info=True)
try:
_LOGGER.info("Running Supervisor")
_LOGGER.info("Run Supervisor")
loop.run_forever()
finally:
_LOGGER.info("Stopping Supervisor")
loop.run_until_complete(coresys.core.stop())
executor.shutdown(wait=False)
loop.close()
_LOGGER.info("Closing Supervisor")
sys.exit(coresys.core.exit_code)
_LOGGER.info("Close Supervisor")
sys.exit(0)

View File

@ -1 +1,337 @@
"""Init file for Supervisor add-ons."""
import asyncio
from contextlib import suppress
import logging
import tarfile
from typing import Dict, List, Optional, Union
from ..const import BOOT_AUTO, STATE_STARTED
from ..coresys import CoreSys, CoreSysAttributes
from ..exceptions import (
AddonsError,
AddonsNotSupportedError,
CoreDNSError,
DockerAPIError,
HomeAssistantAPIError,
HostAppArmorError,
)
from ..store.addon import AddonStore
from .addon import Addon
from .data import AddonsData
_LOGGER: logging.Logger = logging.getLogger(__name__)
AnyAddon = Union[Addon, AddonStore]
class AddonManager(CoreSysAttributes):
"""Manage add-ons inside Supervisor."""
def __init__(self, coresys: CoreSys):
"""Initialize Docker base wrapper."""
self.coresys: CoreSys = coresys
self.data: AddonsData = AddonsData(coresys)
self.local: Dict[str, Addon] = {}
self.store: Dict[str, AddonStore] = {}
@property
def all(self) -> List[AnyAddon]:
"""Return a list of all add-ons."""
addons = {**self.store, **self.local}
return list(addons.values())
@property
def installed(self) -> List[Addon]:
"""Return a list of all installed add-ons."""
return list(self.local.values())
def get(self, addon_slug: str) -> Optional[AnyAddon]:
"""Return an add-on from slug.
Prio:
1 - Local
2 - Store
"""
if addon_slug in self.local:
return self.local[addon_slug]
return self.store.get(addon_slug)
def from_token(self, token: str) -> Optional[Addon]:
"""Return an add-on from Supervisor token."""
for addon in self.installed:
if token == addon.supervisor_token:
return addon
return None
async def load(self) -> None:
"""Start up add-on management."""
tasks = []
for slug in self.data.system:
addon = self.local[slug] = Addon(self.coresys, slug)
tasks.append(addon.load())
# Run initial tasks
_LOGGER.info("Found %d installed add-ons", len(tasks))
if tasks:
await asyncio.wait(tasks)
# Sync DNS
await self.sync_dns()
async def boot(self, stage: str) -> None:
"""Boot add-ons with mode auto."""
tasks = []
for addon in self.installed:
if addon.boot != BOOT_AUTO or addon.startup != stage:
continue
tasks.append(addon.start())
_LOGGER.info("Phase '%s' start %d add-ons", stage, len(tasks))
if tasks:
await asyncio.wait(tasks)
await asyncio.sleep(self.sys_config.wait_boot)
async def shutdown(self, stage: str) -> None:
"""Shutdown addons."""
tasks = []
for addon in self.installed:
if await addon.state() != STATE_STARTED or addon.startup != stage:
continue
tasks.append(addon.stop())
_LOGGER.info("Phase '%s' stop %d add-ons", stage, len(tasks))
if tasks:
await asyncio.wait(tasks)
async def install(self, slug: str) -> None:
"""Install an add-on."""
if slug in self.local:
_LOGGER.warning("Add-on %s is already installed", slug)
return
store = self.store.get(slug)
if not store:
_LOGGER.error("Add-on %s not exists", slug)
raise AddonsError()
if not store.available:
_LOGGER.error("Add-on %s not supported on that platform", slug)
raise AddonsNotSupportedError()
self.data.install(store)
addon = Addon(self.coresys, slug)
if not addon.path_data.is_dir():
_LOGGER.info("Create Home Assistant add-on data folder %s", addon.path_data)
addon.path_data.mkdir()
# Setup/Fix AppArmor profile
await addon.install_apparmor()
try:
await addon.instance.install(store.version, store.image)
except DockerAPIError:
self.data.uninstall(addon)
raise AddonsError() from None
else:
self.local[slug] = addon
_LOGGER.info("Add-on '%s' successfully installed", slug)
async def uninstall(self, slug: str) -> None:
"""Remove an add-on."""
if slug not in self.local:
_LOGGER.warning("Add-on %s is not installed", slug)
return
addon = self.local.get(slug)
try:
await addon.instance.remove()
except DockerAPIError:
raise AddonsError() from None
await addon.remove_data()
# Cleanup audio settings
if addon.path_pulse.exists():
with suppress(OSError):
addon.path_pulse.unlink()
# Cleanup AppArmor profile
with suppress(HostAppArmorError):
await addon.uninstall_apparmor()
# Cleanup Ingress panel from sidebar
if addon.ingress_panel:
addon.ingress_panel = False
with suppress(HomeAssistantAPIError):
await self.sys_ingress.update_hass_panel(addon)
# Cleanup Ingress dynamic port assignment
self.sys_ingress.del_dynamic_port(slug)
# Cleanup discovery data
for message in self.sys_discovery.list_messages:
if message.addon != addon.slug:
continue
self.sys_discovery.remove(message)
# Cleanup services data
for service in self.sys_services.list_services:
if addon.slug not in service.active:
continue
service.del_service_data(addon)
self.data.uninstall(addon)
self.local.pop(slug)
_LOGGER.info("Add-on '%s' successfully removed", slug)
async def update(self, slug: str) -> None:
"""Update add-on."""
if slug not in self.local:
_LOGGER.error("Add-on %s is not installed", slug)
raise AddonsError()
addon = self.local.get(slug)
if addon.is_detached:
_LOGGER.error("Add-on %s is not available inside store", slug)
raise AddonsError()
store = self.store.get(slug)
if addon.version == store.version:
_LOGGER.warning("No update available for add-on %s", slug)
return
# Check if available, Maybe something have changed
if not store.available:
_LOGGER.error("Add-on %s not supported on that platform", slug)
raise AddonsNotSupportedError()
# Update instance
last_state = await addon.state()
try:
await addon.instance.update(store.version, store.image)
# Cleanup
with suppress(DockerAPIError):
await addon.instance.cleanup()
except DockerAPIError:
raise AddonsError() from None
else:
self.data.update(store)
_LOGGER.info("Add-on '%s' successfully updated", slug)
# Setup/Fix AppArmor profile
await addon.install_apparmor()
# restore state
if last_state == STATE_STARTED:
await addon.start()
async def rebuild(self, slug: str) -> None:
"""Perform a rebuild of local build add-on."""
if slug not in self.local:
_LOGGER.error("Add-on %s is not installed", slug)
raise AddonsError()
addon = self.local.get(slug)
if addon.is_detached:
_LOGGER.error("Add-on %s is not available inside store", slug)
raise AddonsError()
store = self.store.get(slug)
# Check if a rebuild is possible now
if addon.version != store.version:
_LOGGER.error("Version changed, use Update instead Rebuild")
raise AddonsError()
if not addon.need_build:
_LOGGER.error("Can't rebuild a image based add-on")
raise AddonsNotSupportedError()
# remove docker container but not addon config
last_state = await addon.state()
try:
await addon.instance.remove()
await addon.instance.install(addon.version)
except DockerAPIError:
raise AddonsError() from None
else:
self.data.update(store)
_LOGGER.info("Add-on '%s' successfully rebuilt", slug)
# restore state
if last_state == STATE_STARTED:
await addon.start()
async def restore(self, slug: str, tar_file: tarfile.TarFile) -> None:
"""Restore state of an add-on."""
if slug not in self.local:
_LOGGER.debug("Add-on %s is not local available for restore", slug)
addon = Addon(self.coresys, slug)
else:
_LOGGER.debug("Add-on %s is local available for restore", slug)
addon = self.local[slug]
await addon.restore(tar_file)
# Check if new
if slug not in self.local:
_LOGGER.info("Detect new Add-on after restore %s", slug)
self.local[slug] = addon
# Update ingress
if addon.with_ingress:
with suppress(HomeAssistantAPIError):
await self.sys_ingress.update_hass_panel(addon)
async def repair(self) -> None:
"""Repair local add-ons."""
needs_repair: List[Addon] = []
# Evaluate Add-ons to repair
for addon in self.installed:
if await addon.instance.exists():
continue
needs_repair.append(addon)
_LOGGER.info("Found %d add-ons to repair", len(needs_repair))
if not needs_repair:
return
for addon in needs_repair:
_LOGGER.info("Start repair for add-on: %s", addon.slug)
await self.sys_run_in_executor(
self.sys_docker.network.stale_cleanup, addon.instance.name
)
with suppress(DockerAPIError, KeyError):
# Need pull a image again
if not addon.need_build:
await addon.instance.install(addon.version, addon.image)
continue
# Need local lookup
if addon.need_build and not addon.is_detached:
store = self.store[addon.slug]
# If this add-on is available for rebuild
if addon.version == store.version:
await addon.instance.install(addon.version, addon.image)
continue
_LOGGER.error("Can't repair %s", addon.slug)
with suppress(AddonsError):
await self.uninstall(addon.slug)
async def sync_dns(self) -> None:
"""Sync add-ons DNS names."""
# Update hosts
for addon in self.installed:
if not await addon.instance.is_running():
continue
self.sys_plugins.dns.add_host(
ipv4=addon.ip_address, names=[addon.hostname], write=False
)
# Write hosts files
with suppress(CoreDNSError):
self.sys_plugins.dns.write_hosts()

File diff suppressed because it is too large Load Diff

View File

@ -1,33 +1,18 @@
"""Supervisor add-on build environment."""
from __future__ import annotations
from functools import cached_property
from pathlib import Path
from typing import TYPE_CHECKING, Any
from typing import TYPE_CHECKING, Dict
from awesomeversion import AwesomeVersion
from ..const import (
ATTR_ARGS,
ATTR_BUILD_FROM,
ATTR_LABELS,
ATTR_SQUASH,
FILE_SUFFIX_CONFIGURATION,
META_ADDON,
SOCKET_DOCKER,
)
from ..const import ATTR_ARGS, ATTR_BUILD_FROM, ATTR_SQUASH, META_ADDON
from ..coresys import CoreSys, CoreSysAttributes
from ..docker.interface import MAP_ARCH
from ..exceptions import ConfigurationFileError, HassioArchNotFound
from ..utils.common import FileConfiguration, find_one_filetype
from ..utils.json import JsonConfig
from .validate import SCHEMA_BUILD_CONFIG
if TYPE_CHECKING:
from .manager import AnyAddon
from . import AnyAddon
class AddonBuild(FileConfiguration, CoreSysAttributes):
class AddonBuild(JsonConfig, CoreSysAttributes):
"""Handle build options for add-ons."""
def __init__(self, coresys: CoreSys, addon: AnyAddon) -> None:
@ -35,52 +20,20 @@ class AddonBuild(FileConfiguration, CoreSysAttributes):
self.coresys: CoreSys = coresys
self.addon = addon
# Search for build file later in executor
super().__init__(None, SCHEMA_BUILD_CONFIG)
super().__init__(
Path(self.addon.path_location, "build.json"), SCHEMA_BUILD_CONFIG
)
def _get_build_file(self) -> Path:
"""Get build file.
Must be run in executor.
"""
try:
return find_one_filetype(
self.addon.path_location, "build", FILE_SUFFIX_CONFIGURATION
)
except ConfigurationFileError:
return self.addon.path_location / "build.json"
async def read_data(self) -> None:
"""Load data from file."""
if not self._file:
self._file = await self.sys_run_in_executor(self._get_build_file)
await super().read_data()
async def save_data(self):
def save_data(self):
"""Ignore save function."""
raise RuntimeError()
@cached_property
def arch(self) -> str:
"""Return arch of the add-on."""
return self.sys_arch.match([self.addon.arch])
@property
def base_image(self) -> str:
"""Return base image for this add-on."""
if not self._data[ATTR_BUILD_FROM]:
return f"ghcr.io/home-assistant/{self.sys_arch.default}-base:latest"
if isinstance(self._data[ATTR_BUILD_FROM], str):
return self._data[ATTR_BUILD_FROM]
# Evaluate correct base image
if self.arch not in self._data[ATTR_BUILD_FROM]:
raise HassioArchNotFound(
f"Add-on {self.addon.slug} is not supported on {self.arch}"
)
return self._data[ATTR_BUILD_FROM][self.arch]
"""Base images for this add-on."""
return self._data[ATTR_BUILD_FROM].get(
self.sys_arch.default, f"homeassistant/{self.sys_arch.default}-base:latest"
)
@property
def squash(self) -> bool:
@ -88,98 +41,37 @@ class AddonBuild(FileConfiguration, CoreSysAttributes):
return self._data[ATTR_SQUASH]
@property
def additional_args(self) -> dict[str, str]:
def additional_args(self) -> Dict[str, str]:
"""Return additional Docker build arguments."""
return self._data[ATTR_ARGS]
@property
def additional_labels(self) -> dict[str, str]:
"""Return additional Docker labels."""
return self._data[ATTR_LABELS]
def get_dockerfile(self) -> Path:
"""Return Dockerfile path.
Must be run in executor.
"""
if self.addon.path_location.joinpath(f"Dockerfile.{self.arch}").exists():
return self.addon.path_location.joinpath(f"Dockerfile.{self.arch}")
return self.addon.path_location.joinpath("Dockerfile")
async def is_valid(self) -> bool:
"""Return true if the build env is valid."""
def build_is_valid() -> bool:
return all(
[
self.addon.path_location.is_dir(),
self.get_dockerfile().is_file(),
]
)
try:
return await self.sys_run_in_executor(build_is_valid)
except HassioArchNotFound:
return False
def get_docker_args(
self, version: AwesomeVersion, image_tag: str
) -> dict[str, Any]:
"""Create a dict with Docker run args."""
dockerfile_path = self.get_dockerfile().relative_to(self.addon.path_location)
build_cmd = [
"docker",
"buildx",
"build",
".",
"--tag",
image_tag,
"--file",
str(dockerfile_path),
"--platform",
MAP_ARCH[self.arch],
"--pull",
]
labels = {
"io.hass.version": version,
"io.hass.arch": self.arch,
"io.hass.type": META_ADDON,
"io.hass.name": self._fix_label("name"),
"io.hass.description": self._fix_label("description"),
**self.additional_labels,
def get_docker_args(self, version):
"""Create a dict with Docker build arguments."""
args = {
"path": str(self.addon.path_location),
"tag": f"{self.addon.image}:{version}",
"pull": True,
"forcerm": True,
"squash": self.squash,
"labels": {
"io.hass.version": version,
"io.hass.arch": self.sys_arch.default,
"io.hass.type": META_ADDON,
"io.hass.name": self._fix_label("name"),
"io.hass.description": self._fix_label("description"),
},
"buildargs": {
"BUILD_FROM": self.base_image,
"BUILD_VERSION": version,
"BUILD_ARCH": self.sys_arch.default,
**self.additional_args,
},
}
if self.addon.url:
labels["io.hass.url"] = self.addon.url
args["labels"]["io.hass.url"] = self.addon.url
for key, value in labels.items():
build_cmd.extend(["--label", f"{key}={value}"])
build_args = {
"BUILD_FROM": self.base_image,
"BUILD_VERSION": version,
"BUILD_ARCH": self.sys_arch.default,
**self.additional_args,
}
for key, value in build_args.items():
build_cmd.extend(["--build-arg", f"{key}={value}"])
# The addon path will be mounted from the host system
addon_extern_path = self.sys_config.local_to_extern_path(
self.addon.path_location
)
return {
"command": build_cmd,
"volumes": {
SOCKET_DOCKER: {"bind": "/var/run/docker.sock", "mode": "rw"},
addon_extern_path: {"bind": "/addon", "mode": "ro"},
},
"working_dir": "/addon",
}
return args
def _fix_label(self, label_name: str) -> str:
"""Remove characters they are not supported."""

View File

@ -1,11 +0,0 @@
"""Confgiuration Objects for Addon Config."""
from dataclasses import dataclass
@dataclass(slots=True)
class FolderMapping:
"""Represent folder mapping configuration."""
path: str | None
read_only: bool

View File

@ -1,49 +0,0 @@
"""Add-on static data."""
from datetime import timedelta
from enum import StrEnum
from ..jobs.const import JobCondition
class AddonBackupMode(StrEnum):
"""Backup mode of an Add-on."""
HOT = "hot"
COLD = "cold"
class MappingType(StrEnum):
"""Mapping type of an Add-on Folder."""
DATA = "data"
CONFIG = "config"
SSL = "ssl"
ADDONS = "addons"
BACKUP = "backup"
SHARE = "share"
MEDIA = "media"
HOMEASSISTANT_CONFIG = "homeassistant_config"
ALL_ADDON_CONFIGS = "all_addon_configs"
ADDON_CONFIG = "addon_config"
ATTR_BACKUP = "backup"
ATTR_BREAKING_VERSIONS = "breaking_versions"
ATTR_CODENOTARY = "codenotary"
ATTR_READ_ONLY = "read_only"
ATTR_PATH = "path"
WATCHDOG_RETRY_SECONDS = 10
WATCHDOG_MAX_ATTEMPTS = 5
WATCHDOG_THROTTLE_PERIOD = timedelta(minutes=30)
WATCHDOG_THROTTLE_MAX_CALLS = 10
ADDON_UPDATE_CONDITIONS = [
JobCondition.FREE_SPACE,
JobCondition.HEALTHY,
JobCondition.INTERNET_HOST,
JobCondition.PLUGINS_UPDATED,
JobCondition.SUPERVISOR_UPDATED,
]
RE_SLUG = r"[-_.A-Za-z0-9]+"

View File

@ -1,7 +1,7 @@
"""Init file for Supervisor add-on data."""
from copy import deepcopy
from typing import Any
import logging
from typing import Any, Dict
from ..const import (
ATTR_IMAGE,
@ -12,15 +12,17 @@ from ..const import (
FILE_HASSIO_ADDONS,
)
from ..coresys import CoreSys, CoreSysAttributes
from ..utils.json import JsonConfig
from ..store.addon import AddonStore
from ..utils.common import FileConfiguration
from .addon import Addon
from .validate import SCHEMA_ADDONS_FILE
Config = dict[str, Any]
_LOGGER: logging.Logger = logging.getLogger(__name__)
Config = Dict[str, Any]
class AddonsData(FileConfiguration, CoreSysAttributes):
class AddonsData(JsonConfig, CoreSysAttributes):
"""Hold data for installed Add-ons inside Supervisor."""
def __init__(self, coresys: CoreSys):
@ -38,7 +40,7 @@ class AddonsData(FileConfiguration, CoreSysAttributes):
"""Return local add-on data."""
return self._data[ATTR_SYSTEM]
async def install(self, addon: AddonStore) -> None:
def install(self, addon: AddonStore) -> None:
"""Set addon as installed."""
self.system[addon.slug] = deepcopy(addon.data)
self.user[addon.slug] = {
@ -46,28 +48,26 @@ class AddonsData(FileConfiguration, CoreSysAttributes):
ATTR_VERSION: addon.version,
ATTR_IMAGE: addon.image,
}
await self.save_data()
self.save_data()
async def uninstall(self, addon: Addon) -> None:
def uninstall(self, addon: Addon) -> None:
"""Set add-on as uninstalled."""
self.system.pop(addon.slug, None)
self.user.pop(addon.slug, None)
await self.save_data()
self.save_data()
async def update(self, addon: AddonStore) -> None:
def update(self, addon: AddonStore) -> None:
"""Update version of add-on."""
self.system[addon.slug] = deepcopy(addon.data)
self.user[addon.slug].update(
{ATTR_VERSION: addon.version, ATTR_IMAGE: addon.image}
)
await self.save_data()
self.save_data()
async def restore(
self, slug: str, user: Config, system: Config, image: str
) -> None:
def restore(self, slug: str, user: Config, system: Config, image: str) -> None:
"""Restore data to add-on."""
self.user[slug] = deepcopy(user)
self.system[slug] = deepcopy(system)
self.user[slug][ATTR_IMAGE] = image
await self.save_data()
self.save_data()

View File

@ -1,408 +0,0 @@
"""Supervisor add-on manager."""
import asyncio
from collections.abc import Awaitable
from contextlib import suppress
import logging
import tarfile
from typing import Self, Union
from attr import evolve
from ..const import AddonBoot, AddonStartup, AddonState
from ..coresys import CoreSys, CoreSysAttributes
from ..exceptions import (
AddonsError,
AddonsJobError,
AddonsNotSupportedError,
CoreDNSError,
DockerError,
HassioError,
HomeAssistantAPIError,
)
from ..jobs.decorator import Job, JobCondition
from ..resolution.const import ContextType, IssueType, SuggestionType
from ..store.addon import AddonStore
from ..utils.sentry import async_capture_exception
from .addon import Addon
from .const import ADDON_UPDATE_CONDITIONS
from .data import AddonsData
_LOGGER: logging.Logger = logging.getLogger(__name__)
AnyAddon = Union[Addon, AddonStore]
class AddonManager(CoreSysAttributes):
"""Manage add-ons inside Supervisor."""
def __init__(self, coresys: CoreSys):
"""Initialize Docker base wrapper."""
self.coresys: CoreSys = coresys
self.data: AddonsData = AddonsData(coresys)
self.local: dict[str, Addon] = {}
self.store: dict[str, AddonStore] = {}
@property
def all(self) -> list[AnyAddon]:
"""Return a list of all add-ons."""
addons: dict[str, AnyAddon] = {**self.store, **self.local}
return list(addons.values())
@property
def installed(self) -> list[Addon]:
"""Return a list of all installed add-ons."""
return list(self.local.values())
def get(self, addon_slug: str, local_only: bool = False) -> AnyAddon | None:
"""Return an add-on from slug.
Prio:
1 - Local
2 - Store
"""
if addon_slug in self.local:
return self.local[addon_slug]
if not local_only:
return self.store.get(addon_slug)
return None
def get_local_only(self, addon_slug: str) -> Addon | None:
"""Return an installed add-on from slug."""
return self.local.get(addon_slug)
def from_token(self, token: str) -> Addon | None:
"""Return an add-on from Supervisor token."""
for addon in self.installed:
if token == addon.supervisor_token:
return addon
return None
async def load_config(self) -> Self:
"""Load config in executor."""
await self.data.read_data()
return self
async def load(self) -> None:
"""Start up add-on management."""
# Refresh cache for all store addons
tasks: list[Awaitable[None]] = [
store.refresh_path_cache() for store in self.store.values()
]
# Load all installed addons
for slug in self.data.system:
addon = self.local[slug] = Addon(self.coresys, slug)
tasks.append(addon.load())
# Run initial tasks
_LOGGER.info("Found %d installed add-ons", len(self.data.system))
if tasks:
await asyncio.gather(*tasks)
# Sync DNS
await self.sync_dns()
async def boot(self, stage: AddonStartup) -> None:
"""Boot add-ons with mode auto."""
tasks: list[Addon] = []
for addon in self.installed:
if addon.boot != AddonBoot.AUTO or addon.startup != stage:
continue
tasks.append(addon)
# Evaluate add-ons which need to be started
_LOGGER.info("Phase '%s' starting %d add-ons", stage, len(tasks))
if not tasks:
return
# Start Add-ons sequential
# avoid issue on slow IO
# Config.wait_boot is deprecated. Until addons update with healthchecks,
# add a sleep task for it to keep the same minimum amount of wait time
wait_boot: list[Awaitable[None]] = [asyncio.sleep(self.sys_config.wait_boot)]
for addon in tasks:
try:
if start_task := await addon.start():
wait_boot.append(start_task)
except HassioError:
self.sys_resolution.add_issue(
evolve(addon.boot_failed_issue),
suggestions=[
SuggestionType.EXECUTE_START,
SuggestionType.DISABLE_BOOT,
],
)
else:
continue
_LOGGER.warning("Can't start Add-on %s", addon.slug)
# Ignore exceptions from waiting for addon startup, addon errors handled elsewhere
await asyncio.gather(*wait_boot, return_exceptions=True)
# After waiting for startup, create an issue for boot addons that are error or unknown state
# Ignore stopped as single shot addons can be run at boot and this is successful exit
# Timeout waiting for startup is not a failure, addon is probably just slow
for addon in tasks:
if addon.state in {AddonState.ERROR, AddonState.UNKNOWN}:
self.sys_resolution.add_issue(
evolve(addon.boot_failed_issue),
suggestions=[
SuggestionType.EXECUTE_START,
SuggestionType.DISABLE_BOOT,
],
)
async def shutdown(self, stage: AddonStartup) -> None:
"""Shutdown addons."""
tasks: list[Addon] = []
for addon in self.installed:
if addon.state != AddonState.STARTED or addon.startup != stage:
continue
tasks.append(addon)
# Evaluate add-ons which need to be stopped
_LOGGER.info("Phase '%s' stopping %d add-ons", stage, len(tasks))
if not tasks:
return
# Stop Add-ons sequential
# avoid issue on slow IO
for addon in tasks:
try:
await addon.stop()
except Exception as err: # pylint: disable=broad-except
_LOGGER.warning("Can't stop Add-on %s: %s", addon.slug, err)
await async_capture_exception(err)
@Job(
name="addon_manager_install",
conditions=ADDON_UPDATE_CONDITIONS,
on_condition=AddonsJobError,
)
async def install(self, slug: str) -> None:
"""Install an add-on."""
self.sys_jobs.current.reference = slug
if slug in self.local:
raise AddonsError(f"Add-on {slug} is already installed", _LOGGER.warning)
store = self.store.get(slug)
if not store:
raise AddonsError(f"Add-on {slug} does not exist", _LOGGER.error)
store.validate_availability()
await Addon(self.coresys, slug).install()
_LOGGER.info("Add-on '%s' successfully installed", slug)
@Job(name="addon_manager_uninstall")
async def uninstall(self, slug: str, *, remove_config: bool = False) -> None:
"""Remove an add-on."""
if slug not in self.local:
_LOGGER.warning("Add-on %s is not installed", slug)
return
shared_image = any(
self.local[slug].image == addon.image
and self.local[slug].version == addon.version
for addon in self.installed
if addon.slug != slug
)
await self.local[slug].uninstall(
remove_config=remove_config, remove_image=not shared_image
)
_LOGGER.info("Add-on '%s' successfully removed", slug)
@Job(
name="addon_manager_update",
conditions=ADDON_UPDATE_CONDITIONS,
on_condition=AddonsJobError,
)
async def update(
self, slug: str, backup: bool | None = False
) -> asyncio.Task | None:
"""Update add-on.
Returns a Task that completes when addon has state 'started' (see addon.start)
if addon is started after update. Else nothing is returned.
"""
self.sys_jobs.current.reference = slug
if slug not in self.local:
raise AddonsError(f"Add-on {slug} is not installed", _LOGGER.error)
addon = self.local[slug]
if addon.is_detached:
raise AddonsError(
f"Add-on {slug} is not available inside store", _LOGGER.error
)
store = self.store[slug]
if addon.version == store.version:
raise AddonsError(f"No update available for add-on {slug}", _LOGGER.warning)
# Check if available, Maybe something have changed
store.validate_availability()
if backup:
await self.sys_backups.do_backup_partial(
name=f"addon_{addon.slug}_{addon.version}",
homeassistant=False,
addons=[addon.slug],
)
return await addon.update()
@Job(
name="addon_manager_rebuild",
conditions=[
JobCondition.FREE_SPACE,
JobCondition.INTERNET_HOST,
JobCondition.HEALTHY,
],
on_condition=AddonsJobError,
)
async def rebuild(self, slug: str, *, force: bool = False) -> asyncio.Task | None:
"""Perform a rebuild of local build add-on.
Returns a Task that completes when addon has state 'started' (see addon.start)
if addon is started after rebuild. Else nothing is returned.
"""
self.sys_jobs.current.reference = slug
if slug not in self.local:
raise AddonsError(f"Add-on {slug} is not installed", _LOGGER.error)
addon = self.local[slug]
if addon.is_detached:
raise AddonsError(
f"Add-on {slug} is not available inside store", _LOGGER.error
)
store = self.store[slug]
# Check if a rebuild is possible now
if addon.version != store.version:
raise AddonsError(
"Version changed, use Update instead Rebuild", _LOGGER.error
)
if not force and not addon.need_build:
raise AddonsNotSupportedError(
"Can't rebuild a image based add-on", _LOGGER.error
)
return await addon.rebuild()
@Job(
name="addon_manager_restore",
conditions=[
JobCondition.FREE_SPACE,
JobCondition.INTERNET_HOST,
JobCondition.HEALTHY,
],
on_condition=AddonsJobError,
)
async def restore(
self, slug: str, tar_file: tarfile.TarFile
) -> asyncio.Task | None:
"""Restore state of an add-on.
Returns a Task that completes when addon has state 'started' (see addon.start)
if addon is started after restore. Else nothing is returned.
"""
self.sys_jobs.current.reference = slug
if slug not in self.local:
_LOGGER.debug("Add-on %s is not local available for restore", slug)
addon = Addon(self.coresys, slug)
had_ingress: bool | None = False
else:
_LOGGER.debug("Add-on %s is local available for restore", slug)
addon = self.local[slug]
had_ingress = addon.ingress_panel
wait_for_start = await addon.restore(tar_file)
# Check if new
if slug not in self.local:
_LOGGER.info("Detect new Add-on after restore %s", slug)
self.local[slug] = addon
# Update ingress
if had_ingress != addon.ingress_panel:
await self.sys_ingress.reload()
with suppress(HomeAssistantAPIError):
await self.sys_ingress.update_hass_panel(addon)
return wait_for_start
@Job(
name="addon_manager_repair",
conditions=[JobCondition.FREE_SPACE, JobCondition.INTERNET_HOST],
)
async def repair(self) -> None:
"""Repair local add-ons."""
needs_repair: list[Addon] = []
# Evaluate Add-ons to repair
for addon in self.installed:
if await addon.instance.exists():
continue
needs_repair.append(addon)
_LOGGER.info("Found %d add-ons to repair", len(needs_repair))
if not needs_repair:
return
for addon in needs_repair:
_LOGGER.info("Repairing for add-on: %s", addon.slug)
with suppress(DockerError, KeyError):
# Need pull a image again
if not addon.need_build:
await addon.instance.install(addon.version, addon.image)
continue
# Need local lookup
if addon.need_build and not addon.is_detached:
store = self.store[addon.slug]
# If this add-on is available for rebuild
if addon.version == store.version:
await addon.instance.install(addon.version, addon.image)
continue
_LOGGER.error("Can't repair %s", addon.slug)
with suppress(AddonsError):
await self.uninstall(addon.slug)
async def sync_dns(self) -> None:
"""Sync add-ons DNS names."""
# Update hosts
add_host_coros: list[Awaitable[None]] = []
for addon in self.installed:
try:
if not await addon.instance.is_running():
continue
except DockerError as err:
_LOGGER.warning("Add-on %s is corrupt: %s", addon.slug, err)
self.sys_resolution.create_issue(
IssueType.CORRUPT_DOCKER,
ContextType.ADDON,
reference=addon.slug,
suggestions=[SuggestionType.EXECUTE_REPAIR],
)
await async_capture_exception(err)
else:
add_host_coros.append(
self.sys_plugins.dns.add_host(
ipv4=addon.ip_address, names=[addon.hostname], write=False
)
)
await asyncio.gather(*add_host_coros)
# Write hosts files
with suppress(CoreDNSError):
await self.sys_plugins.dns.write_hosts()

View File

@ -1,17 +1,9 @@
"""Init file for Supervisor add-ons."""
from abc import ABC, abstractmethod
from collections import defaultdict
from collections.abc import Awaitable, Callable
from contextlib import suppress
from datetime import datetime
import logging
from pathlib import Path
from typing import Any
from typing import Any, Awaitable, Dict, List, Optional
from awesomeversion import AwesomeVersion, AwesomeVersionException
from supervisor.utils.dt import utc_from_timestamp
from packaging import version as pkg_version
import voluptuous as vol
from ..const import (
ATTR_ADVANCED,
@ -19,9 +11,7 @@ from ..const import (
ATTR_ARCH,
ATTR_AUDIO,
ATTR_AUTH_API,
ATTR_BACKUP_EXCLUDE,
ATTR_BACKUP_POST,
ATTR_BACKUP_PRE,
ATTR_AUTO_UART,
ATTR_BOOT,
ATTR_DESCRIPTON,
ATTR_DEVICES,
@ -39,15 +29,12 @@ from ..const import (
ATTR_HOST_IPC,
ATTR_HOST_NETWORK,
ATTR_HOST_PID,
ATTR_HOST_UTS,
ATTR_IMAGE,
ATTR_INGRESS,
ATTR_INGRESS_STREAM,
ATTR_INIT,
ATTR_JOURNALD,
ATTR_KERNEL_MODULES,
ATTR_LEGACY,
ATTR_LOCATION,
ATTR_LOCATON,
ATTR_MACHINE,
ATTR_MAP,
ATTR_NAME,
@ -58,87 +45,51 @@ from ..const import (
ATTR_PORTS,
ATTR_PORTS_DESCRIPTION,
ATTR_PRIVILEGED,
ATTR_REALTIME,
ATTR_REPOSITORY,
ATTR_SCHEMA,
ATTR_SERVICES,
ATTR_SLUG,
ATTR_SNAPSHOT_EXCLUDE,
ATTR_STAGE,
ATTR_STARTUP,
ATTR_STDIN,
ATTR_TIMEOUT,
ATTR_TMPFS,
ATTR_TRANSLATIONS,
ATTR_TYPE,
ATTR_UART,
ATTR_UDEV,
ATTR_URL,
ATTR_USB,
ATTR_VERSION,
ATTR_VERSION_TIMESTAMP,
ATTR_VIDEO,
ATTR_WATCHDOG,
ATTR_WEBUI,
SECURITY_DEFAULT,
SECURITY_DISABLE,
SECURITY_PROFILE,
AddonBoot,
AddonBootConfig,
AddonStage,
AddonStartup,
AddonStages,
)
from ..coresys import CoreSys
from ..docker.const import Capabilities
from ..exceptions import AddonsNotSupportedError
from ..jobs.const import JOB_GROUP_ADDON
from ..jobs.job_group import JobGroup
from ..utils import version_is_new_enough
from .configuration import FolderMapping
from .const import (
ATTR_BACKUP,
ATTR_BREAKING_VERSIONS,
ATTR_CODENOTARY,
ATTR_PATH,
ATTR_READ_ONLY,
AddonBackupMode,
MappingType,
)
from .options import AddonOptions, UiOptions
from .validate import RE_SERVICE
from ..coresys import CoreSysAttributes
from .validate import RE_SERVICE, RE_VOLUME, schema_ui_options, validate_options
_LOGGER: logging.Logger = logging.getLogger(__name__)
Data = dict[str, Any]
Data = Dict[str, Any]
class AddonModel(JobGroup, ABC):
class AddonModel(CoreSysAttributes):
"""Add-on Data layout."""
def __init__(self, coresys: CoreSys, slug: str):
"""Initialize data holder."""
super().__init__(
coresys, JOB_GROUP_ADDON.format_map(defaultdict(str, slug=slug)), slug
)
self.slug: str = slug
self._path_icon_exists: bool = False
self._path_logo_exists: bool = False
self._path_changelog_exists: bool = False
self._path_documentation_exists: bool = False
slug: str = None
@property
@abstractmethod
def data(self) -> Data:
"""Return add-on config/data."""
"""Return Add-on config/data."""
raise NotImplementedError()
@property
@abstractmethod
def is_installed(self) -> bool:
"""Return True if an add-on is installed."""
raise NotImplementedError()
@property
@abstractmethod
def is_detached(self) -> bool:
"""Return True if add-on is detached."""
raise NotImplementedError()
@property
def available(self) -> bool:
@ -146,22 +97,17 @@ class AddonModel(JobGroup, ABC):
return self._available(self.data)
@property
def options(self) -> dict[str, Any]:
def options(self) -> Dict[str, Any]:
"""Return options with local changes."""
return self.data[ATTR_OPTIONS]
@property
def boot_config(self) -> AddonBootConfig:
"""Return boot config."""
def boot(self) -> bool:
"""Return boot config with prio local settings."""
return self.data[ATTR_BOOT]
@property
def boot(self) -> AddonBoot:
"""Return boot config with prio local settings unless config is forced."""
return AddonBoot(self.data[ATTR_BOOT])
@property
def auto_update(self) -> bool | None:
def auto_update(self) -> Optional[bool]:
"""Return if auto update is enable."""
return None
@ -176,7 +122,7 @@ class AddonModel(JobGroup, ABC):
return self.slug.replace("_", "-")
@property
def dns(self) -> list[str]:
def dns(self) -> List[str]:
"""Return list of DNS name for that add-on."""
return []
@ -186,22 +132,22 @@ class AddonModel(JobGroup, ABC):
return self.data[ATTR_TIMEOUT]
@property
def uuid(self) -> str | None:
def uuid(self) -> Optional[str]:
"""Return an API token for this add-on."""
return None
@property
def supervisor_token(self) -> str | None:
def supervisor_token(self) -> Optional[str]:
"""Return access token for Supervisor API."""
return None
@property
def ingress_token(self) -> str | None:
def ingress_token(self) -> Optional[str]:
"""Return access token for Supervisor API."""
return None
@property
def ingress_entry(self) -> str | None:
def ingress_entry(self) -> Optional[str]:
"""Return ingress external URL."""
return None
@ -210,28 +156,31 @@ class AddonModel(JobGroup, ABC):
"""Return description of add-on."""
return self.data[ATTR_DESCRIPTON]
@property
def long_description(self) -> Optional[str]:
"""Return README.md as long_description."""
readme = Path(self.path_location, "README.md")
# If readme not exists
if not readme.exists():
return None
# Return data
with readme.open("r") as readme_file:
return readme_file.read()
@property
def repository(self) -> str:
"""Return repository of add-on."""
return self.data[ATTR_REPOSITORY]
@property
def translations(self) -> dict:
"""Return add-on translations."""
return self.data[ATTR_TRANSLATIONS]
@property
def latest_version(self) -> AwesomeVersion:
def latest_version(self) -> str:
"""Return latest version of add-on."""
return self.data[ATTR_VERSION]
@property
def latest_version_timestamp(self) -> datetime:
"""Return when latest version was first seen."""
return utc_from_timestamp(self.data[ATTR_VERSION_TIMESTAMP])
@property
def version(self) -> AwesomeVersion:
def version(self) -> str:
"""Return version of add-on."""
return self.data[ATTR_VERSION]
@ -241,9 +190,9 @@ class AddonModel(JobGroup, ABC):
return True
@property
def startup(self) -> AddonStartup:
def startup(self) -> Optional[str]:
"""Return startup type of add-on."""
return self.data[ATTR_STARTUP]
return self.data.get(ATTR_STARTUP)
@property
def advanced(self) -> bool:
@ -251,55 +200,49 @@ class AddonModel(JobGroup, ABC):
return self.data[ATTR_ADVANCED]
@property
def stage(self) -> AddonStage:
def stage(self) -> AddonStages:
"""Return stage mode of add-on."""
return self.data[ATTR_STAGE]
@property
def services_role(self) -> dict[str, str]:
def services_role(self) -> Dict[str, str]:
"""Return dict of services with rights."""
services_list = self.data.get(ATTR_SERVICES, [])
services = {}
for data in services_list:
service = RE_SERVICE.match(data)
if service:
services[service.group("service")] = service.group("rights")
services[service.group("service")] = service.group("rights")
return services
@property
def discovery(self) -> list[str]:
def discovery(self) -> List[str]:
"""Return list of discoverable components/platforms."""
return self.data.get(ATTR_DISCOVERY, [])
@property
def ports_description(self) -> dict[str, str] | None:
def ports_description(self) -> Optional[Dict[str, str]]:
"""Return descriptions of ports."""
return self.data.get(ATTR_PORTS_DESCRIPTION)
@property
def ports(self) -> dict[str, int | None] | None:
def ports(self) -> Optional[Dict[str, Optional[int]]]:
"""Return ports of add-on."""
return self.data.get(ATTR_PORTS)
@property
def ingress_url(self) -> str | None:
def ingress_url(self) -> Optional[str]:
"""Return URL to ingress url."""
return None
@property
def webui(self) -> str | None:
def webui(self) -> Optional[str]:
"""Return URL to webui or None."""
return self.data.get(ATTR_WEBUI)
@property
def watchdog_url(self) -> str | None:
"""Return URL to for watchdog or None."""
return self.data.get(ATTR_WATCHDOG)
@property
def ingress_port(self) -> int | None:
def ingress_port(self) -> Optional[int]:
"""Return Ingress port."""
return None
@ -333,28 +276,33 @@ class AddonModel(JobGroup, ABC):
"""Return True if add-on run on host IPC namespace."""
return self.data[ATTR_HOST_IPC]
@property
def host_uts(self) -> bool:
"""Return True if add-on run on host UTS namespace."""
return self.data[ATTR_HOST_UTS]
@property
def host_dbus(self) -> bool:
"""Return True if add-on run on host D-BUS."""
return self.data[ATTR_HOST_DBUS]
@property
def static_devices(self) -> list[Path]:
"""Return static devices of add-on."""
return [Path(node) for node in self.data.get(ATTR_DEVICES, [])]
def devices(self) -> Optional[List[str]]:
"""Return devices of add-on."""
return self.data.get(ATTR_DEVICES, [])
@property
def environment(self) -> dict[str, str] | None:
def auto_uart(self) -> bool:
"""Return True if we should map all UART device."""
return self.data[ATTR_AUTO_UART]
@property
def tmpfs(self) -> Optional[str]:
"""Return tmpfs of add-on."""
return self.data.get(ATTR_TMPFS)
@property
def environment(self) -> Optional[Dict[str, str]]:
"""Return environment of add-on."""
return self.data.get(ATTR_ENVIRONMENT)
@property
def privileged(self) -> list[Capabilities]:
def privileged(self) -> List[str]:
"""Return list of privilege."""
return self.data.get(ATTR_PRIVILEGED, [])
@ -393,24 +341,9 @@ class AddonModel(JobGroup, ABC):
return self.data[ATTR_HASSIO_ROLE]
@property
def backup_exclude(self) -> list[str]:
"""Return Exclude list for backup."""
return self.data.get(ATTR_BACKUP_EXCLUDE, [])
@property
def backup_pre(self) -> str | None:
"""Return pre-backup command."""
return self.data.get(ATTR_BACKUP_PRE)
@property
def backup_post(self) -> str | None:
"""Return post-backup command."""
return self.data.get(ATTR_BACKUP_POST)
@property
def backup_mode(self) -> AddonBackupMode:
"""Return if backup is hot/cold."""
return self.data[ATTR_BACKUP]
def snapshot_exclude(self) -> List[str]:
"""Return Exclude list for snapshot."""
return self.data.get(ATTR_SNAPSHOT_EXCLUDE, [])
@property
def default_init(self) -> bool:
@ -428,30 +361,15 @@ class AddonModel(JobGroup, ABC):
return self.data[ATTR_INGRESS]
@property
def ingress_panel(self) -> bool | None:
def ingress_panel(self) -> Optional[bool]:
"""Return True if the add-on access support ingress."""
return None
@property
def ingress_stream(self) -> bool:
"""Return True if post requests to ingress should be streamed."""
return self.data[ATTR_INGRESS_STREAM]
@property
def with_gpio(self) -> bool:
"""Return True if the add-on access to GPIO interface."""
return self.data[ATTR_GPIO]
@property
def with_usb(self) -> bool:
"""Return True if the add-on need USB access."""
return self.data[ATTR_USB]
@property
def with_uart(self) -> bool:
"""Return True if we should map all UART device."""
return self.data[ATTR_UART]
@property
def with_udev(self) -> bool:
"""Return True if the add-on have his own udev."""
@ -462,11 +380,6 @@ class AddonModel(JobGroup, ABC):
"""Return True if the add-on access to kernel modules."""
return self.data[ATTR_KERNEL_MODULES]
@property
def with_realtime(self) -> bool:
"""Return True if the add-on need realtime schedule functions."""
return self.data[ATTR_REALTIME]
@property
def with_full_access(self) -> bool:
"""Return True if the add-on want full access to hardware."""
@ -477,11 +390,6 @@ class AddonModel(JobGroup, ABC):
"""Return True if the add-on read access to devicetree."""
return self.data[ATTR_DEVICETREE]
@property
def with_tmpfs(self) -> str | None:
"""Return if tmp is in memory of add-on."""
return self.data[ATTR_TMPFS]
@property
def access_auth_api(self) -> bool:
"""Return True if the add-on access to login/auth backend."""
@ -498,55 +406,47 @@ class AddonModel(JobGroup, ABC):
return self.data[ATTR_VIDEO]
@property
def homeassistant_version(self) -> str | None:
def homeassistant_version(self) -> Optional[str]:
"""Return min Home Assistant version they needed by Add-on."""
return self.data.get(ATTR_HOMEASSISTANT)
@property
def url(self) -> str | None:
def url(self) -> Optional[str]:
"""Return URL of add-on."""
return self.data.get(ATTR_URL)
@property
def with_icon(self) -> bool:
"""Return True if an icon exists."""
return self._path_icon_exists
return self.path_icon.exists()
@property
def with_logo(self) -> bool:
"""Return True if a logo exists."""
return self._path_logo_exists
return self.path_logo.exists()
@property
def with_changelog(self) -> bool:
"""Return True if a changelog exists."""
return self._path_changelog_exists
return self.path_changelog.exists()
@property
def with_documentation(self) -> bool:
"""Return True if a documentation exists."""
return self._path_documentation_exists
return self.path_documentation.exists()
@property
def supported_arch(self) -> list[str]:
def supported_arch(self) -> List[str]:
"""Return list of supported arch."""
return self.data[ATTR_ARCH]
@property
def supported_machine(self) -> list[str]:
def supported_machine(self) -> List[str]:
"""Return list of supported machine."""
return self.data.get(ATTR_MACHINE, [])
@property
def arch(self) -> str:
"""Return architecture to use for the addon's image."""
if ATTR_IMAGE in self.data:
return self.sys_arch.match(self.data[ATTR_ARCH])
return self.sys_arch.default
@property
def image(self) -> str | None:
def image(self) -> str:
"""Generate image name from data."""
return self._image(self.data)
@ -556,20 +456,19 @@ class AddonModel(JobGroup, ABC):
return ATTR_IMAGE not in self.data
@property
def map_volumes(self) -> dict[MappingType, FolderMapping]:
"""Return a dict of {MappingType: FolderMapping} from add-on."""
def map_volumes(self) -> Dict[str, str]:
"""Return a dict of {volume: policy} from add-on."""
volumes = {}
for volume in self.data[ATTR_MAP]:
volumes[MappingType(volume[ATTR_TYPE])] = FolderMapping(
volume.get(ATTR_PATH), volume[ATTR_READ_ONLY]
)
result = RE_VOLUME.match(volume)
volumes[result.group(1)] = result.group(2) or "ro"
return volumes
@property
def path_location(self) -> Path:
"""Return path to this add-on."""
return Path(self.data[ATTR_LOCATION])
return Path(self.data[ATTR_LOCATON])
@property
def path_icon(self) -> Path:
@ -597,120 +496,45 @@ class AddonModel(JobGroup, ABC):
return Path(self.path_location, "apparmor.txt")
@property
def schema(self) -> AddonOptions:
"""Return Addon options validation object."""
def schema(self) -> vol.Schema:
"""Create a schema for add-on options."""
raw_schema = self.data[ATTR_SCHEMA]
if isinstance(raw_schema, bool):
raw_schema = {}
return AddonOptions(self.coresys, raw_schema, self.name, self.slug)
if isinstance(raw_schema, bool):
return vol.Schema(dict)
return vol.Schema(vol.All(dict, validate_options(self.coresys, raw_schema)))
@property
def schema_ui(self) -> list[dict[Any, Any]] | None:
def schema_ui(self) -> Optional[List[Dict[str, Any]]]:
"""Create a UI schema for add-on options."""
raw_schema = self.data[ATTR_SCHEMA]
if isinstance(raw_schema, bool):
return None
return UiOptions(self.coresys)(raw_schema)
return schema_ui_options(raw_schema)
@property
def with_journald(self) -> bool:
"""Return True if the add-on accesses the system journal."""
return self.data[ATTR_JOURNALD]
@property
def signed(self) -> bool:
"""Return True if the image is signed."""
return ATTR_CODENOTARY in self.data
@property
def codenotary(self) -> str | None:
"""Return Signer email address for CAS."""
return self.data.get(ATTR_CODENOTARY)
@property
def breaking_versions(self) -> list[AwesomeVersion]:
"""Return breaking versions of addon."""
return self.data[ATTR_BREAKING_VERSIONS]
async def long_description(self) -> str | None:
"""Return README.md as long_description."""
def read_readme() -> str | None:
readme = Path(self.path_location, "README.md")
# If readme not exists
if not readme.exists():
return None
# Return data
return readme.read_text(encoding="utf-8")
return await self.sys_run_in_executor(read_readme)
def refresh_path_cache(self) -> Awaitable[None]:
"""Refresh cache of existing paths."""
def check_paths():
self._path_icon_exists = self.path_icon.exists()
self._path_logo_exists = self.path_logo.exists()
self._path_changelog_exists = self.path_changelog.exists()
self._path_documentation_exists = self.path_documentation.exists()
return self.sys_run_in_executor(check_paths)
def validate_availability(self) -> None:
"""Validate if addon is available for current system."""
return self._validate_availability(self.data, logger=_LOGGER.error)
def __eq__(self, other: Any) -> bool:
"""Compare add-on objects."""
def __eq__(self, other):
"""Compaired add-on objects."""
if not isinstance(other, AddonModel):
return False
return self.slug == other.slug
def __hash__(self) -> int:
"""Hash for add-on objects."""
return hash(self.slug)
def _validate_availability(
self, config, *, logger: Callable[..., None] | None = None
) -> None:
"""Validate if addon is available for current system."""
def _available(self, config) -> bool:
"""Return True if this add-on is available on this platform."""
# Architecture
if not self.sys_arch.is_supported(config[ATTR_ARCH]):
raise AddonsNotSupportedError(
f"Add-on {self.slug} not supported on this platform, supported architectures: {', '.join(config[ATTR_ARCH])}",
logger,
)
return False
# Machine / Hardware
machine = config.get(ATTR_MACHINE)
if machine and (
f"!{self.sys_machine}" in machine or self.sys_machine not in machine
):
raise AddonsNotSupportedError(
f"Add-on {self.slug} not supported on this machine, supported machine types: {', '.join(machine)}",
logger,
)
if machine and self.sys_machine not in machine:
return False
# Home Assistant
version: AwesomeVersion | None = config.get(ATTR_HOMEASSISTANT)
with suppress(AwesomeVersionException, TypeError):
if version and not version_is_new_enough(
self.sys_homeassistant.version, version
):
raise AddonsNotSupportedError(
f"Add-on {self.slug} not supported on this system, requires Home Assistant version {version} or greater",
logger,
)
def _available(self, config) -> bool:
"""Return True if this add-on is available on this platform."""
try:
self._validate_availability(config)
except AddonsNotSupportedError:
version = config.get(ATTR_HOMEASSISTANT) or self.sys_homeassistant.version
if pkg_version.parse(self.sys_homeassistant.version) < pkg_version.parse(
version
):
return False
return True
@ -724,3 +548,19 @@ class AddonModel(JobGroup, ABC):
# local build
return f"{config[ATTR_REPOSITORY]}/{self.sys_arch.default}-addon-{config[ATTR_SLUG]}"
def install(self) -> Awaitable[None]:
"""Install this add-on."""
return self.sys_addons.install(self.slug)
def uninstall(self) -> Awaitable[None]:
"""Uninstall this add-on."""
return self.sys_addons.uninstall(self.slug)
def update(self) -> Awaitable[None]:
"""Update this add-on."""
return self.sys_addons.update(self.slug)
def rebuild(self) -> Awaitable[None]:
"""Rebuild this add-on."""
return self.sys_addons.rebuild(self.slug)

View File

@ -1,423 +0,0 @@
"""Add-on Options / UI rendering."""
import hashlib
import logging
from pathlib import Path
import re
from typing import Any
import voluptuous as vol
from ..coresys import CoreSys, CoreSysAttributes
from ..exceptions import HardwareNotFound
from ..hardware.const import UdevSubsystem
from ..hardware.data import Device
from ..validate import network_port
_LOGGER: logging.Logger = logging.getLogger(__name__)
_STR = "str"
_INT = "int"
_FLOAT = "float"
_BOOL = "bool"
_PASSWORD = "password"
_EMAIL = "email"
_URL = "url"
_PORT = "port"
_MATCH = "match"
_LIST = "list"
_DEVICE = "device"
RE_SCHEMA_ELEMENT = re.compile(
r"^(?:"
r"|bool"
r"|email"
r"|url"
r"|port"
r"|device(?:\((?P<filter>subsystem=[a-z]+)\))?"
r"|str(?:\((?P<s_min>\d+)?,(?P<s_max>\d+)?\))?"
r"|password(?:\((?P<p_min>\d+)?,(?P<p_max>\d+)?\))?"
r"|int(?:\((?P<i_min>\d+)?,(?P<i_max>\d+)?\))?"
r"|float(?:\((?P<f_min>[\d\.]+)?,(?P<f_max>[\d\.]+)?\))?"
r"|match\((?P<match>.*)\)"
r"|list\((?P<list>.+)\)"
r")\??$"
)
_SCHEMA_LENGTH_PARTS = (
"i_min",
"i_max",
"f_min",
"f_max",
"s_min",
"s_max",
"p_min",
"p_max",
)
class AddonOptions(CoreSysAttributes):
"""Validate Add-ons Options."""
def __init__(
self, coresys: CoreSys, raw_schema: dict[str, Any], name: str, slug: str
):
"""Validate schema."""
self.coresys: CoreSys = coresys
self.raw_schema: dict[str, Any] = raw_schema
self.devices: set[Device] = set()
self.pwned: set[str] = set()
self._name = name
self._slug = slug
@property
def validate(self) -> vol.Schema:
"""Create a schema for add-on options."""
return vol.Schema(vol.All(dict, self))
def __call__(self, struct):
"""Create schema validator for add-ons options."""
options = {}
# read options
for key, value in struct.items():
# Ignore unknown options / remove from list
if key not in self.raw_schema:
_LOGGER.warning(
"Option '%s' does not exist in the schema for %s (%s)",
key,
self._name,
self._slug,
)
continue
typ = self.raw_schema[key]
try:
if isinstance(typ, list):
# nested value list
options[key] = self._nested_validate_list(typ[0], value, key)
elif isinstance(typ, dict):
# nested value dict
options[key] = self._nested_validate_dict(typ, value, key)
else:
# normal value
options[key] = self._single_validate(typ, value, key)
except (IndexError, KeyError):
raise vol.Invalid(
f"Type error for option '{key}' in {self._name} ({self._slug})"
) from None
self._check_missing_options(self.raw_schema, options, "root")
return options
# pylint: disable=no-value-for-parameter
def _single_validate(self, typ: str, value: Any, key: str):
"""Validate a single element."""
# if required argument
if value is None:
raise vol.Invalid(
f"Missing required option '{key}' in {self._name} ({self._slug})"
) from None
# Lookup secret
if str(value).startswith("!secret "):
secret: str = value.partition(" ")[2]
value = self.sys_homeassistant.secrets.get(secret)
if value is None:
raise vol.Invalid(
f"Unknown secret '{secret}' in {self._name} ({self._slug})"
) from None
# parse extend data from type
match = RE_SCHEMA_ELEMENT.match(typ)
if not match:
raise vol.Invalid(
f"Unknown type '{typ}' in {self._name} ({self._slug})"
) from None
# prepare range
range_args: dict[str, Any] = {}
for group_name in _SCHEMA_LENGTH_PARTS:
group_value = match.group(group_name)
if group_value:
range_args[group_name[2:]] = float(group_value)
if typ.startswith(_STR) or typ.startswith(_PASSWORD):
if typ.startswith(_PASSWORD) and value:
self.pwned.add(hashlib.sha1(str(value).encode()).hexdigest())
return vol.All(str(value), vol.Range(**range_args))(value)
elif typ.startswith(_INT):
return vol.All(vol.Coerce(int), vol.Range(**range_args))(value)
elif typ.startswith(_FLOAT):
return vol.All(vol.Coerce(float), vol.Range(**range_args))(value)
elif typ.startswith(_BOOL):
return vol.Boolean()(value)
elif typ.startswith(_EMAIL):
return vol.Email()(value)
elif typ.startswith(_URL):
return vol.Url()(value)
elif typ.startswith(_PORT):
return network_port(value)
elif typ.startswith(_MATCH):
return vol.Match(match.group("match"))(str(value))
elif typ.startswith(_LIST):
return vol.In(match.group("list").split("|"))(str(value))
elif typ.startswith(_DEVICE):
try:
device = self.sys_hardware.get_by_path(Path(value))
except HardwareNotFound:
raise vol.Invalid(
f"Device '{value}' does not exist in {self._name} ({self._slug})"
) from None
# Have filter
if match.group("filter"):
str_filter = match.group("filter")
device_filter = _create_device_filter(str_filter)
if device not in self.sys_hardware.filter_devices(**device_filter):
raise vol.Invalid(
f"Device '{value}' don't match the filter {str_filter}! in {self._name} ({self._slug})"
)
# Device valid
self.devices.add(device)
return str(device.path)
raise vol.Invalid(
f"Fatal error for option '{key}' with type '{typ}' in {self._name} ({self._slug})"
) from None
def _nested_validate_list(self, typ: Any, data_list: list[Any], key: str):
"""Validate nested items."""
options = []
# Make sure it is a list
if not isinstance(data_list, list):
raise vol.Invalid(
f"Invalid list for option '{key}' in {self._name} ({self._slug})"
) from None
# Process list
for element in data_list:
# Nested?
if isinstance(typ, dict):
c_options = self._nested_validate_dict(typ, element, key)
options.append(c_options)
else:
options.append(self._single_validate(typ, element, key))
return options
def _nested_validate_dict(
self, typ: dict[Any, Any], data_dict: dict[Any, Any], key: str
):
"""Validate nested items."""
options = {}
# Make sure it is a dict
if not isinstance(data_dict, dict):
raise vol.Invalid(
f"Invalid dict for option '{key}' in {self._name} ({self._slug})"
) from None
# Process dict
for c_key, c_value in data_dict.items():
# Ignore unknown options / remove from list
if c_key not in typ:
_LOGGER.warning(
"Unknown option '%s' for %s (%s)", c_key, self._name, self._slug
)
continue
# Nested?
if isinstance(typ[c_key], list):
options[c_key] = self._nested_validate_list(
typ[c_key][0], c_value, c_key
)
else:
options[c_key] = self._single_validate(typ[c_key], c_value, c_key)
self._check_missing_options(typ, options, key)
return options
def _check_missing_options(
self, origin: dict[Any, Any], exists: dict[Any, Any], root: str
) -> None:
"""Check if all options are exists."""
missing = set(origin) - set(exists)
for miss_opt in missing:
miss_schema = origin[miss_opt]
# If its a list then value in list decides if its optional like ["str?"]
if isinstance(miss_schema, list) and len(miss_schema) > 0:
miss_schema = miss_schema[0]
if isinstance(miss_schema, str) and miss_schema.endswith("?"):
continue
raise vol.Invalid(
f"Missing option '{miss_opt}' in {root} in {self._name} ({self._slug})"
) from None
class UiOptions(CoreSysAttributes):
"""Render UI Add-ons Options."""
def __init__(self, coresys: CoreSys) -> None:
"""Initialize UI option render."""
self.coresys = coresys
def __call__(self, raw_schema: dict[str, Any]) -> list[dict[str, Any]]:
"""Generate UI schema."""
ui_schema: list[dict[str, Any]] = []
# read options
for key, value in raw_schema.items():
if isinstance(value, list):
# nested value list
self._nested_ui_list(ui_schema, value, key)
elif isinstance(value, dict):
# nested value dict
self._nested_ui_dict(ui_schema, value, key)
else:
# normal value
self._single_ui_option(ui_schema, value, key)
return ui_schema
def _single_ui_option(
self,
ui_schema: list[dict[str, Any]],
value: str,
key: str,
multiple: bool = False,
) -> None:
"""Validate a single element."""
ui_node: dict[str, str | bool | float | list[str]] = {"name": key}
# If multiple
if multiple:
ui_node["multiple"] = True
# Parse extend data from type
match = RE_SCHEMA_ELEMENT.match(value)
if not match:
return
# Prepare range
for group_name in _SCHEMA_LENGTH_PARTS:
group_value = match.group(group_name)
if not group_value:
continue
if group_name[2:] == "min":
ui_node["lengthMin"] = float(group_value)
elif group_name[2:] == "max":
ui_node["lengthMax"] = float(group_value)
# If required
if value.endswith("?"):
ui_node["optional"] = True
else:
ui_node["required"] = True
# Data types
if value.startswith(_STR):
ui_node["type"] = "string"
elif value.startswith(_PASSWORD):
ui_node["type"] = "string"
ui_node["format"] = "password"
elif value.startswith(_INT):
ui_node["type"] = "integer"
elif value.startswith(_FLOAT):
ui_node["type"] = "float"
elif value.startswith(_BOOL):
ui_node["type"] = "boolean"
elif value.startswith(_EMAIL):
ui_node["type"] = "string"
ui_node["format"] = "email"
elif value.startswith(_URL):
ui_node["type"] = "string"
ui_node["format"] = "url"
elif value.startswith(_PORT):
ui_node["type"] = "integer"
elif value.startswith(_MATCH):
ui_node["type"] = "string"
elif value.startswith(_LIST):
ui_node["type"] = "select"
ui_node["options"] = match.group("list").split("|")
elif value.startswith(_DEVICE):
ui_node["type"] = "select"
# Have filter
if match.group("filter"):
device_filter = _create_device_filter(match.group("filter"))
ui_node["options"] = [
(device.by_id or device.path).as_posix()
for device in self.sys_hardware.filter_devices(**device_filter)
]
else:
ui_node["options"] = [
(device.by_id or device.path).as_posix()
for device in self.sys_hardware.devices
]
ui_schema.append(ui_node)
def _nested_ui_list(
self,
ui_schema: list[dict[str, Any]],
option_list: list[Any],
key: str,
) -> None:
"""UI nested list items."""
try:
element = option_list[0]
except IndexError:
_LOGGER.error("Invalid schema %s", key)
return
if isinstance(element, dict):
self._nested_ui_dict(ui_schema, element, key, multiple=True)
else:
self._single_ui_option(ui_schema, element, key, multiple=True)
def _nested_ui_dict(
self,
ui_schema: list[dict[str, Any]],
option_dict: dict[str, Any],
key: str,
multiple: bool = False,
) -> None:
"""UI nested dict items."""
ui_node: dict[str, Any] = {
"name": key,
"type": "schema",
"optional": True,
"multiple": multiple,
}
nested_schema: list[dict[str, Any]] = []
for c_key, c_value in option_dict.items():
# Nested?
if isinstance(c_value, list):
self._nested_ui_list(nested_schema, c_value, c_key)
else:
self._single_ui_option(nested_schema, c_value, c_key)
ui_node["schema"] = nested_schema
ui_schema.append(ui_node)
def _create_device_filter(str_filter: str) -> dict[str, Any]:
"""Generate device Filter."""
raw_filter = dict(value.split("=") for value in str_filter.split(";"))
clean_filter: dict[str, Any] = {}
for key, value in raw_filter.items():
if key == "subsystem":
clean_filter[key] = UdevSubsystem(value)
else:
clean_filter[key] = value
return clean_filter

View File

@ -1,14 +1,23 @@
"""Util add-ons functions."""
from __future__ import annotations
import asyncio
import logging
from pathlib import Path
import subprocess
from typing import TYPE_CHECKING
from ..const import ROLE_ADMIN, ROLE_MANAGER, SECURITY_DISABLE, SECURITY_PROFILE
from ..docker.const import Capabilities
from ..const import (
PRIVILEGED_DAC_READ_SEARCH,
PRIVILEGED_NET_ADMIN,
PRIVILEGED_SYS_ADMIN,
PRIVILEGED_SYS_MODULE,
PRIVILEGED_SYS_PTRACE,
PRIVILEGED_SYS_RAWIO,
ROLE_ADMIN,
ROLE_MANAGER,
SECURITY_DISABLE,
SECURITY_PROFILE,
)
if TYPE_CHECKING:
from .model import AddonModel
@ -17,10 +26,10 @@ _LOGGER: logging.Logger = logging.getLogger(__name__)
def rating_security(addon: AddonModel) -> int:
"""Return 1-8 for security rating.
"""Return 1-6 for security rating.
1 = not secure
8 = high secure
6 = high secure
"""
rating = 5
@ -36,28 +45,17 @@ def rating_security(addon: AddonModel) -> int:
elif addon.access_auth_api:
rating += 1
# Signed
if addon.signed:
rating += 1
# Privileged options
if (
any(
privilege in addon.privileged
for privilege in (
Capabilities.BPF,
Capabilities.CHECKPOINT_RESTORE,
Capabilities.DAC_READ_SEARCH,
Capabilities.NET_ADMIN,
Capabilities.NET_RAW,
Capabilities.PERFMON,
Capabilities.SYS_ADMIN,
Capabilities.SYS_MODULE,
Capabilities.SYS_PTRACE,
Capabilities.SYS_RAWIO,
)
if any(
privilege in addon.privileged
for privilege in (
PRIVILEGED_NET_ADMIN,
PRIVILEGED_SYS_ADMIN,
PRIVILEGED_SYS_RAWIO,
PRIVILEGED_SYS_PTRACE,
PRIVILEGED_SYS_MODULE,
PRIVILEGED_DAC_READ_SEARCH,
)
or addon.with_kernel_modules
):
rating += -1
@ -75,31 +73,29 @@ def rating_security(addon: AddonModel) -> int:
if addon.host_pid:
rating += -2
# UTS host namespace allows to set hostname only with SYS_ADMIN
if addon.host_uts and Capabilities.SYS_ADMIN in addon.privileged:
rating += -1
# Full Access
if addon.with_full_access:
rating += -2
# Docker Access & full Access
if addon.access_docker_api or addon.with_full_access:
# Docker Access
if addon.access_docker_api:
rating = 1
return max(min(8, rating), 1)
return max(min(6, rating), 1)
def remove_data(folder: Path) -> None:
"""Remove folder and reset privileged.
Must be run in executor.
"""
async def remove_data(folder: Path) -> None:
"""Remove folder and reset privileged."""
try:
subprocess.run(
["rm", "-rf", str(folder)], stdout=subprocess.DEVNULL, text=True, check=True
proc = await asyncio.create_subprocess_exec(
"rm", "-rf", str(folder), stdout=asyncio.subprocess.DEVNULL
)
_, error_msg = await proc.communicate()
except OSError as err:
error_msg = str(err)
except subprocess.CalledProcessError as procerr:
error_msg = procerr.stderr.strip()
else:
return
if proc.returncode == 0:
return
_LOGGER.error("Can't remove Add-on Data: %s", error_msg)

View File

@ -1,9 +1,8 @@
"""Validate add-ons options schema."""
import logging
import re
import secrets
from typing import Any
from typing import Any, Dict, List
import uuid
import voluptuous as vol
@ -19,13 +18,10 @@ from ..const import (
ATTR_AUDIO_INPUT,
ATTR_AUDIO_OUTPUT,
ATTR_AUTH_API,
ATTR_AUTO_UART,
ATTR_AUTO_UPDATE,
ATTR_BACKUP_EXCLUDE,
ATTR_BACKUP_POST,
ATTR_BACKUP_PRE,
ATTR_BOOT,
ATTR_BUILD_FROM,
ATTR_CONFIGURATION,
ATTR_DESCRIPTON,
ATTR_DEVICES,
ATTR_DEVICETREE,
@ -42,20 +38,16 @@ from ..const import (
ATTR_HOST_IPC,
ATTR_HOST_NETWORK,
ATTR_HOST_PID,
ATTR_HOST_UTS,
ATTR_IMAGE,
ATTR_INGRESS,
ATTR_INGRESS_ENTRY,
ATTR_INGRESS_PANEL,
ATTR_INGRESS_PORT,
ATTR_INGRESS_STREAM,
ATTR_INGRESS_TOKEN,
ATTR_INIT,
ATTR_JOURNALD,
ATTR_KERNEL_MODULES,
ATTR_LABELS,
ATTR_LEGACY,
ATTR_LOCATION,
ATTR_LOCATON,
ATTR_MACHINE,
ATTR_MAP,
ATTR_NAME,
@ -68,271 +60,141 @@ from ..const import (
ATTR_PORTS_DESCRIPTION,
ATTR_PRIVILEGED,
ATTR_PROTECTED,
ATTR_REALTIME,
ATTR_REPOSITORY,
ATTR_SCHEMA,
ATTR_SERVICES,
ATTR_SLUG,
ATTR_SNAPSHOT_EXCLUDE,
ATTR_SQUASH,
ATTR_STAGE,
ATTR_STARTUP,
ATTR_STATE,
ATTR_STDIN,
ATTR_SYSTEM,
ATTR_SYSTEM_MANAGED,
ATTR_SYSTEM_MANAGED_CONFIG_ENTRY,
ATTR_TIMEOUT,
ATTR_TMPFS,
ATTR_TRANSLATIONS,
ATTR_TYPE,
ATTR_UART,
ATTR_UDEV,
ATTR_URL,
ATTR_USB,
ATTR_USER,
ATTR_UUID,
ATTR_VERSION,
ATTR_VIDEO,
ATTR_WATCHDOG,
ATTR_WEBUI,
BOOT_AUTO,
BOOT_MANUAL,
PRIVILEGED_ALL,
ROLE_ALL,
ROLE_DEFAULT,
AddonBoot,
AddonBootConfig,
AddonStage,
AddonStartup,
AddonState,
STARTUP_ALL,
STARTUP_APPLICATION,
STARTUP_SERVICES,
STATE_STARTED,
STATE_STOPPED,
AddonStages,
)
from ..docker.const import Capabilities
from ..coresys import CoreSys
from ..discovery.validate import valid_discovery_service
from ..validate import (
docker_image,
docker_ports,
docker_ports_description,
DOCKER_PORTS,
DOCKER_PORTS_DESCRIPTION,
network_port,
token,
uuid_match,
version_tag,
)
from .const import (
ATTR_BACKUP,
ATTR_BREAKING_VERSIONS,
ATTR_CODENOTARY,
ATTR_PATH,
ATTR_READ_ONLY,
RE_SLUG,
AddonBackupMode,
MappingType,
)
from .options import RE_SCHEMA_ELEMENT
_LOGGER: logging.Logger = logging.getLogger(__name__)
RE_VOLUME = re.compile(
r"^(data|config|ssl|addons|backup|share|media|homeassistant_config|all_addon_configs|addon_config)(?::(rw|ro))?$"
)
RE_VOLUME = re.compile(r"^(config|ssl|addons|backup|share)(?::(rw|ro))?$")
RE_SERVICE = re.compile(r"^(?P<service>mqtt|mysql):(?P<rights>provide|want|need)$")
V_STR = "str"
V_INT = "int"
V_FLOAT = "float"
V_BOOL = "bool"
V_PASSWORD = "password"
V_EMAIL = "email"
V_URL = "url"
V_PORT = "port"
V_MATCH = "match"
V_LIST = "list"
RE_SCHEMA_ELEMENT = re.compile(
r"^(?:"
r"|bool|email|url|port"
r"|str(?:\((?P<s_min>\d+)?,(?P<s_max>\d+)?\))?"
r"|password(?:\((?P<p_min>\d+)?,(?P<p_max>\d+)?\))?"
r"|int(?:\((?P<i_min>\d+)?,(?P<i_max>\d+)?\))?"
r"|float(?:\((?P<f_min>[\d\.]+)?,(?P<f_max>[\d\.]+)?\))?"
r"|match\((?P<match>.*)\)"
r"|list\((?P<list>.+)\)"
r")\??$"
)
_SCHEMA_LENGTH_PARTS = (
"i_min",
"i_max",
"f_min",
"f_max",
"s_min",
"s_max",
"p_min",
"p_max",
)
RE_DOCKER_IMAGE = re.compile(r"^([a-zA-Z\-\.:\d{}]+/)*?([\-\w{}]+)/([\-\w{}]+)$")
RE_DOCKER_IMAGE_BUILD = re.compile(
r"^([a-zA-Z\-\.:\d{}]+/)*?([\-\w{}]+)/([\-\w{}]+)(:[\.\-\w{}]+)?$"
)
SCHEMA_ELEMENT = vol.Match(RE_SCHEMA_ELEMENT)
RE_MACHINE = re.compile(
r"^!?(?:"
r"|intel-nuc"
r"|generic-x86-64"
r"|odroid-c2"
r"|odroid-c4"
r"|odroid-m1"
r"|odroid-n2"
r"|odroid-xu"
r"|qemuarm-64"
r"|qemuarm"
r"|qemux86-64"
r"|qemux86"
r"|raspberrypi"
r"|raspberrypi2"
r"|raspberrypi3-64"
r"|raspberrypi3"
r"|raspberrypi4-64"
r"|raspberrypi4"
r"|raspberrypi5-64"
r"|yellow"
r"|green"
r"|tinker"
r")$"
)
RE_SLUG_FIELD = re.compile(r"^" + RE_SLUG + r"$")
MACHINE_ALL = [
"intel-nuc",
"odroid-c2",
"odroid-n2",
"odroid-xu",
"qemuarm-64",
"qemuarm",
"qemux86-64",
"qemux86",
"raspberrypi",
"raspberrypi2",
"raspberrypi3-64",
"raspberrypi3",
"raspberrypi4-64",
"raspberrypi4",
"tinker",
]
def _warn_addon_config(config: dict[str, Any]):
"""Warn about miss configs."""
name = config.get(ATTR_NAME)
if not name:
raise vol.Invalid("Invalid Add-on config!")
if config.get(ATTR_FULL_ACCESS, False) and (
config.get(ATTR_DEVICES)
or config.get(ATTR_UART)
or config.get(ATTR_USB)
or config.get(ATTR_GPIO)
):
_LOGGER.warning(
"Add-on have full device access, and selective device access in the configuration. Please report this to the maintainer of %s",
name,
)
if config.get(ATTR_BACKUP, AddonBackupMode.HOT) == AddonBackupMode.COLD and (
config.get(ATTR_BACKUP_POST) or config.get(ATTR_BACKUP_PRE)
):
_LOGGER.warning(
"Add-on which only support COLD backups trying to use post/pre commands. Please report this to the maintainer of %s",
name,
)
return config
def _migrate_addon_config(protocol=False):
"""Migrate addon config."""
def _migrate(config: dict[str, Any]):
name = config.get(ATTR_NAME)
if not name:
raise vol.Invalid("Invalid Add-on config!")
# Startup 2018-03-30
if config.get(ATTR_STARTUP) in ("before", "after"):
value = config[ATTR_STARTUP]
if protocol:
_LOGGER.warning(
"Add-on config 'startup' with '%s' is deprecated. Please report this to the maintainer of %s",
value,
name,
)
if value == "before":
config[ATTR_STARTUP] = AddonStartup.SERVICES
elif value == "after":
config[ATTR_STARTUP] = AddonStartup.APPLICATION
# UART 2021-01-20
if "auto_uart" in config:
if protocol:
_LOGGER.warning(
"Add-on config 'auto_uart' is deprecated, use 'uart'. Please report this to the maintainer of %s",
name,
)
config[ATTR_UART] = config.pop("auto_uart")
# Device 2021-01-20
if ATTR_DEVICES in config and any(":" in line for line in config[ATTR_DEVICES]):
if protocol:
_LOGGER.warning(
"Add-on config 'devices' use a deprecated format, the new format uses a list of paths only. Please report this to the maintainer of %s",
name,
)
config[ATTR_DEVICES] = [line.split(":")[0] for line in config[ATTR_DEVICES]]
# TMPFS 2021-02-01
if ATTR_TMPFS in config and not isinstance(config[ATTR_TMPFS], bool):
if protocol:
_LOGGER.warning(
"Add-on config 'tmpfs' use a deprecated format, new it's only a boolean. Please report this to the maintainer of %s",
name,
)
config[ATTR_TMPFS] = True
# 2021-06 "snapshot" renamed to "backup"
for entry in (
"snapshot_exclude",
"snapshot_post",
"snapshot_pre",
"snapshot",
):
if entry in config:
new_entry = entry.replace("snapshot", "backup")
config[new_entry] = config.pop(entry)
_LOGGER.warning(
"Add-on config '%s' is deprecated, '%s' should be used instead. Please report this to the maintainer of %s",
entry,
new_entry,
name,
)
# 2023-11 "map" entries can also be dict to allow path configuration
volumes = []
for entry in config.get(ATTR_MAP, []):
if isinstance(entry, dict):
volumes.append(entry)
if isinstance(entry, str):
result = RE_VOLUME.match(entry)
if not result:
continue
volumes.append(
{
ATTR_TYPE: result.group(1),
ATTR_READ_ONLY: result.group(2) != "rw",
}
)
if volumes:
config[ATTR_MAP] = volumes
# 2023-10 "config" became "homeassistant" so /config can be used for addon's public config
if any(volume[ATTR_TYPE] == MappingType.CONFIG for volume in volumes):
if any(
volume
and volume[ATTR_TYPE]
in {MappingType.ADDON_CONFIG, MappingType.HOMEASSISTANT_CONFIG}
for volume in volumes
):
_LOGGER.warning(
"Add-on config using incompatible map options, '%s' and '%s' are ignored if '%s' is included. Please report this to the maintainer of %s",
MappingType.ADDON_CONFIG,
MappingType.HOMEASSISTANT_CONFIG,
MappingType.CONFIG,
name,
)
else:
_LOGGER.debug(
"Add-on config using deprecated map option '%s' instead of '%s'. Please report this to the maintainer of %s",
MappingType.CONFIG,
MappingType.HOMEASSISTANT_CONFIG,
name,
)
return config
return _migrate
def _simple_startup(value):
"""Simple startup schema."""
if value == "before":
return STARTUP_SERVICES
if value == "after":
return STARTUP_APPLICATION
return value
# pylint: disable=no-value-for-parameter
_SCHEMA_ADDON_CONFIG = vol.Schema(
SCHEMA_ADDON_CONFIG = vol.Schema(
{
vol.Required(ATTR_NAME): str,
vol.Required(ATTR_VERSION): version_tag,
vol.Required(ATTR_SLUG): vol.Match(RE_SLUG_FIELD),
vol.Required(ATTR_DESCRIPTON): str,
vol.Required(ATTR_NAME): vol.Coerce(str),
vol.Required(ATTR_VERSION): vol.Coerce(str),
vol.Required(ATTR_SLUG): vol.Coerce(str),
vol.Required(ATTR_DESCRIPTON): vol.Coerce(str),
vol.Required(ATTR_ARCH): [vol.In(ARCH_ALL)],
vol.Optional(ATTR_MACHINE): vol.All([vol.Match(RE_MACHINE)], vol.Unique()),
vol.Optional(ATTR_MACHINE): [vol.In(MACHINE_ALL)],
vol.Optional(ATTR_URL): vol.Url(),
vol.Optional(ATTR_STARTUP, default=AddonStartup.APPLICATION): vol.Coerce(
AddonStartup
),
vol.Optional(ATTR_BOOT, default=AddonBootConfig.AUTO): vol.Coerce(
AddonBootConfig
),
vol.Required(ATTR_STARTUP): vol.All(_simple_startup, vol.In(STARTUP_ALL)),
vol.Required(ATTR_BOOT): vol.In([BOOT_AUTO, BOOT_MANUAL]),
vol.Optional(ATTR_INIT, default=True): vol.Boolean(),
vol.Optional(ATTR_ADVANCED, default=False): vol.Boolean(),
vol.Optional(ATTR_STAGE, default=AddonStage.STABLE): vol.Coerce(AddonStage),
vol.Optional(ATTR_PORTS): docker_ports,
vol.Optional(ATTR_PORTS_DESCRIPTION): docker_ports_description,
vol.Optional(ATTR_WATCHDOG): vol.Match(
r"^(?:https?|\[PROTO:\w+\]|tcp):\/\/\[HOST\]:(\[PORT:\d+\]|\d+).*$"
),
vol.Optional(ATTR_STAGE, default=AddonStages.STABLE): vol.Coerce(AddonStages),
vol.Optional(ATTR_PORTS): DOCKER_PORTS,
vol.Optional(ATTR_PORTS_DESCRIPTION): DOCKER_PORTS_DESCRIPTION,
vol.Optional(ATTR_WEBUI): vol.Match(
r"^(?:https?|\[PROTO:\w+\]):\/\/\[HOST\]:\[PORT:\d+\].*$"
),
@ -340,41 +202,29 @@ _SCHEMA_ADDON_CONFIG = vol.Schema(
vol.Optional(ATTR_INGRESS_PORT, default=8099): vol.Any(
network_port, vol.Equal(0)
),
vol.Optional(ATTR_INGRESS_ENTRY): str,
vol.Optional(ATTR_INGRESS_STREAM, default=False): vol.Boolean(),
vol.Optional(ATTR_PANEL_ICON, default="mdi:puzzle"): str,
vol.Optional(ATTR_PANEL_TITLE): str,
vol.Optional(ATTR_INGRESS_ENTRY): vol.Coerce(str),
vol.Optional(ATTR_PANEL_ICON, default="mdi:puzzle"): vol.Coerce(str),
vol.Optional(ATTR_PANEL_TITLE): vol.Coerce(str),
vol.Optional(ATTR_PANEL_ADMIN, default=True): vol.Boolean(),
vol.Optional(ATTR_HOMEASSISTANT): version_tag,
vol.Optional(ATTR_HOMEASSISTANT): vol.Maybe(vol.Coerce(str)),
vol.Optional(ATTR_HOST_NETWORK, default=False): vol.Boolean(),
vol.Optional(ATTR_HOST_PID, default=False): vol.Boolean(),
vol.Optional(ATTR_HOST_IPC, default=False): vol.Boolean(),
vol.Optional(ATTR_HOST_UTS, default=False): vol.Boolean(),
vol.Optional(ATTR_HOST_DBUS, default=False): vol.Boolean(),
vol.Optional(ATTR_DEVICES): [str],
vol.Optional(ATTR_DEVICES): [vol.Match(r"^(.*):(.*):([rwm]{1,3})$")],
vol.Optional(ATTR_AUTO_UART, default=False): vol.Boolean(),
vol.Optional(ATTR_UDEV, default=False): vol.Boolean(),
vol.Optional(ATTR_TMPFS, default=False): vol.Boolean(),
vol.Optional(ATTR_MAP, default=list): [
vol.Schema(
{
vol.Required(ATTR_TYPE): vol.Coerce(MappingType),
vol.Optional(ATTR_READ_ONLY, default=True): bool,
vol.Optional(ATTR_PATH): str,
}
)
],
vol.Optional(ATTR_ENVIRONMENT): {vol.Match(r"\w*"): str},
vol.Optional(ATTR_PRIVILEGED): [vol.Coerce(Capabilities)],
vol.Optional(ATTR_TMPFS): vol.Match(r"^size=(\d)*[kmg](,uid=\d{1,4})?(,rw)?$"),
vol.Optional(ATTR_MAP, default=list): [vol.Match(RE_VOLUME)],
vol.Optional(ATTR_ENVIRONMENT): {vol.Match(r"\w*"): vol.Coerce(str)},
vol.Optional(ATTR_PRIVILEGED): [vol.In(PRIVILEGED_ALL)],
vol.Optional(ATTR_APPARMOR, default=True): vol.Boolean(),
vol.Optional(ATTR_FULL_ACCESS, default=False): vol.Boolean(),
vol.Optional(ATTR_AUDIO, default=False): vol.Boolean(),
vol.Optional(ATTR_VIDEO, default=False): vol.Boolean(),
vol.Optional(ATTR_GPIO, default=False): vol.Boolean(),
vol.Optional(ATTR_USB, default=False): vol.Boolean(),
vol.Optional(ATTR_UART, default=False): vol.Boolean(),
vol.Optional(ATTR_DEVICETREE, default=False): vol.Boolean(),
vol.Optional(ATTR_KERNEL_MODULES, default=False): vol.Boolean(),
vol.Optional(ATTR_REALTIME, default=False): vol.Boolean(),
vol.Optional(ATTR_HASSIO_API, default=False): vol.Boolean(),
vol.Optional(ATTR_HASSIO_ROLE, default=ROLE_DEFAULT): vol.In(ROLE_ALL),
vol.Optional(ATTR_HOMEASSISTANT_API, default=False): vol.Boolean(),
@ -383,74 +233,51 @@ _SCHEMA_ADDON_CONFIG = vol.Schema(
vol.Optional(ATTR_DOCKER_API, default=False): vol.Boolean(),
vol.Optional(ATTR_AUTH_API, default=False): vol.Boolean(),
vol.Optional(ATTR_SERVICES): [vol.Match(RE_SERVICE)],
vol.Optional(ATTR_DISCOVERY): [str],
vol.Optional(ATTR_BACKUP_EXCLUDE): [str],
vol.Optional(ATTR_BACKUP_PRE): str,
vol.Optional(ATTR_BACKUP_POST): str,
vol.Optional(ATTR_BACKUP, default=AddonBackupMode.HOT): vol.Coerce(
AddonBackupMode
),
vol.Optional(ATTR_CODENOTARY): vol.Email(),
vol.Optional(ATTR_OPTIONS, default={}): dict,
vol.Optional(ATTR_SCHEMA, default={}): vol.Any(
vol.Optional(ATTR_DISCOVERY): [valid_discovery_service],
vol.Optional(ATTR_SNAPSHOT_EXCLUDE): [vol.Coerce(str)],
vol.Required(ATTR_OPTIONS): dict,
vol.Required(ATTR_SCHEMA): vol.Any(
vol.Schema(
{
str: vol.Any(
vol.Coerce(str): vol.Any(
SCHEMA_ELEMENT,
[
vol.Any(
SCHEMA_ELEMENT,
{str: vol.Any(SCHEMA_ELEMENT, [SCHEMA_ELEMENT])},
{
vol.Coerce(str): vol.Any(
SCHEMA_ELEMENT, [SCHEMA_ELEMENT]
)
},
)
],
vol.Schema({str: vol.Any(SCHEMA_ELEMENT, [SCHEMA_ELEMENT])}),
vol.Schema(
{vol.Coerce(str): vol.Any(SCHEMA_ELEMENT, [SCHEMA_ELEMENT])}
),
)
}
),
False,
),
vol.Optional(ATTR_IMAGE): docker_image,
vol.Optional(ATTR_IMAGE): vol.Match(RE_DOCKER_IMAGE),
vol.Optional(ATTR_TIMEOUT, default=10): vol.All(
vol.Coerce(int), vol.Range(min=10, max=300)
),
vol.Optional(ATTR_JOURNALD, default=False): vol.Boolean(),
vol.Optional(ATTR_BREAKING_VERSIONS, default=list): [version_tag],
},
extra=vol.REMOVE_EXTRA,
)
SCHEMA_ADDON_CONFIG = vol.All(
_migrate_addon_config(True), _warn_addon_config, _SCHEMA_ADDON_CONFIG
)
# pylint: disable=no-value-for-parameter
SCHEMA_BUILD_CONFIG = vol.Schema(
{
vol.Optional(ATTR_BUILD_FROM, default=dict): vol.Any(
vol.Match(RE_DOCKER_IMAGE_BUILD),
vol.Schema({vol.In(ARCH_ALL): vol.Match(RE_DOCKER_IMAGE_BUILD)}),
vol.Optional(ATTR_BUILD_FROM, default=dict): vol.Schema(
{vol.In(ARCH_ALL): vol.Match(RE_DOCKER_IMAGE_BUILD)}
),
vol.Optional(ATTR_SQUASH, default=False): vol.Boolean(),
vol.Optional(ATTR_ARGS, default=dict): vol.Schema({str: str}),
vol.Optional(ATTR_LABELS, default=dict): vol.Schema({str: str}),
},
extra=vol.REMOVE_EXTRA,
)
SCHEMA_TRANSLATION_CONFIGURATION = vol.Schema(
{
vol.Required(ATTR_NAME): str,
vol.Optional(ATTR_DESCRIPTON): vol.Maybe(str),
},
extra=vol.REMOVE_EXTRA,
)
SCHEMA_ADDON_TRANSLATIONS = vol.Schema(
{
vol.Optional(ATTR_CONFIGURATION): {str: SCHEMA_TRANSLATION_CONFIGURATION},
vol.Optional(ATTR_NETWORK): {str: str},
vol.Optional(ATTR_ARGS, default=dict): vol.Schema(
{vol.Coerce(str): vol.Coerce(str)}
),
},
extra=vol.REMOVE_EXTRA,
)
@ -459,55 +286,289 @@ SCHEMA_ADDON_TRANSLATIONS = vol.Schema(
# pylint: disable=no-value-for-parameter
SCHEMA_ADDON_USER = vol.Schema(
{
vol.Required(ATTR_VERSION): version_tag,
vol.Optional(ATTR_IMAGE): docker_image,
vol.Required(ATTR_VERSION): vol.Coerce(str),
vol.Optional(ATTR_IMAGE): vol.Coerce(str),
vol.Optional(ATTR_UUID, default=lambda: uuid.uuid4().hex): uuid_match,
vol.Optional(ATTR_ACCESS_TOKEN): token,
vol.Optional(ATTR_INGRESS_TOKEN, default=secrets.token_urlsafe): str,
vol.Optional(ATTR_INGRESS_TOKEN, default=secrets.token_urlsafe): vol.Coerce(
str
),
vol.Optional(ATTR_OPTIONS, default=dict): dict,
vol.Optional(ATTR_AUTO_UPDATE, default=False): vol.Boolean(),
vol.Optional(ATTR_BOOT): vol.Coerce(AddonBoot),
vol.Optional(ATTR_NETWORK): docker_ports,
vol.Optional(ATTR_AUDIO_OUTPUT): vol.Maybe(str),
vol.Optional(ATTR_AUDIO_INPUT): vol.Maybe(str),
vol.Optional(ATTR_BOOT): vol.In([BOOT_AUTO, BOOT_MANUAL]),
vol.Optional(ATTR_NETWORK): DOCKER_PORTS,
vol.Optional(ATTR_AUDIO_OUTPUT): vol.Maybe(vol.Coerce(str)),
vol.Optional(ATTR_AUDIO_INPUT): vol.Maybe(vol.Coerce(str)),
vol.Optional(ATTR_PROTECTED, default=True): vol.Boolean(),
vol.Optional(ATTR_INGRESS_PANEL, default=False): vol.Boolean(),
vol.Optional(ATTR_WATCHDOG, default=False): vol.Boolean(),
vol.Optional(ATTR_SYSTEM_MANAGED, default=False): vol.Boolean(),
vol.Optional(ATTR_SYSTEM_MANAGED_CONFIG_ENTRY, default=None): vol.Maybe(str),
},
extra=vol.REMOVE_EXTRA,
)
SCHEMA_ADDON_SYSTEM = vol.All(
_migrate_addon_config(),
_SCHEMA_ADDON_CONFIG.extend(
{
vol.Required(ATTR_LOCATION): str,
vol.Required(ATTR_REPOSITORY): str,
vol.Required(ATTR_TRANSLATIONS, default=dict): {
str: SCHEMA_ADDON_TRANSLATIONS
},
}
),
SCHEMA_ADDON_SYSTEM = SCHEMA_ADDON_CONFIG.extend(
{
vol.Required(ATTR_LOCATON): vol.Coerce(str),
vol.Required(ATTR_REPOSITORY): vol.Coerce(str),
}
)
SCHEMA_ADDONS_FILE = vol.Schema(
{
vol.Optional(ATTR_USER, default=dict): {str: SCHEMA_ADDON_USER},
vol.Optional(ATTR_SYSTEM, default=dict): {str: SCHEMA_ADDON_SYSTEM},
},
extra=vol.REMOVE_EXTRA,
vol.Optional(ATTR_USER, default=dict): {vol.Coerce(str): SCHEMA_ADDON_USER},
vol.Optional(ATTR_SYSTEM, default=dict): {vol.Coerce(str): SCHEMA_ADDON_SYSTEM},
}
)
SCHEMA_ADDON_BACKUP = vol.Schema(
SCHEMA_ADDON_SNAPSHOT = vol.Schema(
{
vol.Required(ATTR_USER): SCHEMA_ADDON_USER,
vol.Required(ATTR_SYSTEM): SCHEMA_ADDON_SYSTEM,
vol.Required(ATTR_STATE): vol.Coerce(AddonState),
vol.Required(ATTR_VERSION): version_tag,
vol.Required(ATTR_STATE): vol.In([STATE_STARTED, STATE_STOPPED]),
vol.Required(ATTR_VERSION): vol.Coerce(str),
},
extra=vol.REMOVE_EXTRA,
)
def validate_options(coresys: CoreSys, raw_schema: Dict[str, Any]):
"""Validate schema."""
def validate(struct):
"""Create schema validator for add-ons options."""
options = {}
# read options
for key, value in struct.items():
# Ignore unknown options / remove from list
if key not in raw_schema:
_LOGGER.warning("Unknown options %s", key)
continue
typ = raw_schema[key]
try:
if isinstance(typ, list):
# nested value list
options[key] = _nested_validate_list(coresys, typ[0], value, key)
elif isinstance(typ, dict):
# nested value dict
options[key] = _nested_validate_dict(coresys, typ, value, key)
else:
# normal value
options[key] = _single_validate(coresys, typ, value, key)
except (IndexError, KeyError):
raise vol.Invalid(f"Type error for {key}") from None
_check_missing_options(raw_schema, options, "root")
return options
return validate
# pylint: disable=no-value-for-parameter
# pylint: disable=inconsistent-return-statements
def _single_validate(coresys: CoreSys, typ: str, value: Any, key: str):
"""Validate a single element."""
# if required argument
if value is None:
raise vol.Invalid(f"Missing required option '{key}'")
# Lookup secret
if str(value).startswith("!secret "):
secret: str = value.partition(" ")[2]
value = coresys.secrets.get(secret)
if value is None:
raise vol.Invalid(f"Unknown secret {secret}")
# parse extend data from type
match = RE_SCHEMA_ELEMENT.match(typ)
# prepare range
range_args = {}
for group_name in _SCHEMA_LENGTH_PARTS:
group_value = match.group(group_name)
if group_value:
range_args[group_name[2:]] = float(group_value)
if typ.startswith(V_STR) or typ.startswith(V_PASSWORD):
return vol.All(str(value), vol.Range(**range_args))(value)
elif typ.startswith(V_INT):
return vol.All(vol.Coerce(int), vol.Range(**range_args))(value)
elif typ.startswith(V_FLOAT):
return vol.All(vol.Coerce(float), vol.Range(**range_args))(value)
elif typ.startswith(V_BOOL):
return vol.Boolean()(value)
elif typ.startswith(V_EMAIL):
return vol.Email()(value)
elif typ.startswith(V_URL):
return vol.Url()(value)
elif typ.startswith(V_PORT):
return network_port(value)
elif typ.startswith(V_MATCH):
return vol.Match(match.group("match"))(str(value))
elif typ.startswith(V_LIST):
return vol.In(match.group("list").split("|"))(str(value))
raise vol.Invalid(f"Fatal error for {key} type {typ}")
def _nested_validate_list(coresys, typ, data_list, key):
"""Validate nested items."""
options = []
for element in data_list:
# Nested?
if isinstance(typ, dict):
c_options = _nested_validate_dict(coresys, typ, element, key)
options.append(c_options)
else:
options.append(_single_validate(coresys, typ, element, key))
return options
def _nested_validate_dict(coresys, typ, data_dict, key):
"""Validate nested items."""
options = {}
for c_key, c_value in data_dict.items():
# Ignore unknown options / remove from list
if c_key not in typ:
_LOGGER.warning("Unknown options %s", c_key)
continue
# Nested?
if isinstance(typ[c_key], list):
options[c_key] = _nested_validate_list(
coresys, typ[c_key][0], c_value, c_key
)
else:
options[c_key] = _single_validate(coresys, typ[c_key], c_value, c_key)
_check_missing_options(typ, options, key)
return options
def _check_missing_options(origin, exists, root):
"""Check if all options are exists."""
missing = set(origin) - set(exists)
for miss_opt in missing:
if isinstance(origin[miss_opt], str) and origin[miss_opt].endswith("?"):
continue
raise vol.Invalid(f"Missing option {miss_opt} in {root}")
def schema_ui_options(raw_schema: Dict[str, Any]) -> List[Dict[str, Any]]:
"""Generate UI schema."""
ui_schema = []
# read options
for key, value in raw_schema.items():
if isinstance(value, list):
# nested value list
_nested_ui_list(ui_schema, value, key)
elif isinstance(value, dict):
# nested value dict
_nested_ui_dict(ui_schema, value, key)
else:
# normal value
_single_ui_option(ui_schema, value, key)
return ui_schema
def _single_ui_option(
ui_schema: List[Dict[str, Any]], value: str, key: str, multiple: bool = False
) -> None:
"""Validate a single element."""
ui_node = {"name": key}
# If multiple
if multiple:
ui_node["multiple"] = True
# Parse extend data from type
match = RE_SCHEMA_ELEMENT.match(value)
# Prepare range
for group_name in _SCHEMA_LENGTH_PARTS:
group_value = match.group(group_name)
if not group_value:
continue
if group_name[2:] == "min":
ui_node["lengthMin"] = float(group_value)
elif group_name[2:] == "max":
ui_node["lengthMax"] = float(group_value)
# If required
if value.endswith("?"):
ui_node["optional"] = True
else:
ui_node["required"] = True
# Data types
if value.startswith(V_STR):
ui_node["type"] = "string"
elif value.startswith(V_PASSWORD):
ui_node["type"] = "string"
ui_node["format"] = "password"
elif value.startswith(V_INT):
ui_node["type"] = "integer"
elif value.startswith(V_FLOAT):
ui_node["type"] = "float"
elif value.startswith(V_BOOL):
ui_node["type"] = "boolean"
elif value.startswith(V_EMAIL):
ui_node["type"] = "string"
ui_node["format"] = "email"
elif value.startswith(V_URL):
ui_node["type"] = "string"
ui_node["format"] = "url"
elif value.startswith(V_PORT):
ui_node["type"] = "integer"
elif value.startswith(V_MATCH):
ui_node["type"] = "string"
elif value.startswith(V_LIST):
ui_node["type"] = "select"
ui_node["options"] = match.group("list").split("|")
ui_schema.append(ui_node)
def _nested_ui_list(
ui_schema: List[Dict[str, Any]], option_list: List[Any], key: str
) -> None:
"""UI nested list items."""
try:
element = option_list[0]
except IndexError:
_LOGGER.error("Invalid schema %s", key)
return
if isinstance(element, dict):
_nested_ui_dict(ui_schema, element, key, multiple=True)
else:
_single_ui_option(ui_schema, element, key, multiple=True)
def _nested_ui_dict(
ui_schema: List[Dict[str, Any]],
option_dict: Dict[str, Any],
key: str,
multiple: bool = False,
) -> None:
"""UI nested dict items."""
ui_node = {"name": key, "type": "schema", "optional": True, "multiple": multiple}
nested_schema = []
for c_key, c_value in option_dict.items():
# Nested?
if isinstance(c_value, list):
_nested_ui_list(nested_schema, c_value, c_key)
else:
_single_ui_option(nested_schema, c_value, c_key)
ui_node["schema"] = nested_schema
ui_schema.append(ui_node)

View File

@ -1,59 +1,34 @@
"""Init file for Supervisor RESTful API."""
from dataclasses import dataclass
from functools import partial
import logging
from pathlib import Path
from typing import Any
from typing import Optional
from aiohttp import hdrs, web
from aiohttp import web
from ..const import SUPERVISOR_DOCKER_NAME, AddonState
from ..coresys import CoreSys, CoreSysAttributes
from ..exceptions import APIAddonNotInstalled, HostNotSupportedError
from ..utils.sentry import async_capture_exception
from .addons import APIAddons
from .audio import APIAudio
from .auth import APIAuth
from .backups import APIBackups
from .cli import APICli
from .const import CONTENT_TYPE_TEXT
from .discovery import APIDiscovery
from .dns import APICoreDNS
from .docker import APIDocker
from .hardware import APIHardware
from .os import APIOS
from .homeassistant import APIHomeAssistant
from .host import APIHost
from .info import APIInfo
from .ingress import APIIngress
from .jobs import APIJobs
from .middleware.security import SecurityMiddleware
from .mounts import APIMounts
from .multicast import APIMulticast
from .network import APINetwork
from .observer import APIObserver
from .os import APIOS
from .proxy import APIProxy
from .resolution import APIResoulution
from .root import APIRoot
from .security import APISecurity
from .security import SecurityMiddleware
from .services import APIServices
from .store import APIStore
from .snapshots import APISnapshots
from .supervisor import APISupervisor
from .utils import api_process, api_process_raw
from .multicast import APIMulticast
_LOGGER: logging.Logger = logging.getLogger(__name__)
MAX_CLIENT_SIZE: int = 1024**2 * 16
MAX_LINE_SIZE: int = 24570
@dataclass(slots=True, frozen=True)
class StaticResourceConfig:
"""Configuration for a static resource."""
prefix: str
path: Path
MAX_CLIENT_SIZE: int = 1024 ** 2 * 16
class RestAPI(CoreSysAttributes):
@ -65,166 +40,52 @@ class RestAPI(CoreSysAttributes):
self.security: SecurityMiddleware = SecurityMiddleware(coresys)
self.webapp: web.Application = web.Application(
client_max_size=MAX_CLIENT_SIZE,
middlewares=[
self.security.block_bad_requests,
self.security.system_validation,
self.security.token_validation,
self.security.core_proxy,
],
handler_args={
"max_line_size": MAX_LINE_SIZE,
"max_field_size": MAX_LINE_SIZE,
},
middlewares=[self.security.token_validation],
)
# service stuff
self._runner: web.AppRunner = web.AppRunner(self.webapp, shutdown_timeout=5)
self._site: web.TCPSite | None = None
# share single host API handler for reuse in logging endpoints
self._api_host: APIHost = APIHost()
self._api_host.coresys = coresys
self._runner: web.AppRunner = web.AppRunner(self.webapp)
self._site: Optional[web.TCPSite] = None
async def load(self) -> None:
"""Register REST API Calls."""
static_resource_configs: list[StaticResourceConfig] = []
self._register_addons()
self._register_audio()
self._register_auth()
self._register_backups()
self._register_supervisor()
self._register_host()
self._register_os()
self._register_cli()
self._register_discovery()
self._register_dns()
self._register_docker()
self._register_multicast()
self._register_hardware()
self._register_homeassistant()
self._register_host()
self._register_jobs()
self._register_ingress()
self._register_mounts()
self._register_multicast()
self._register_network()
self._register_observer()
self._register_os()
static_resource_configs.extend(self._register_panel())
self._register_proxy()
self._register_resolution()
self._register_root()
self._register_security()
self._register_panel()
self._register_addons()
self._register_ingress()
self._register_snapshots()
self._register_discovery()
self._register_services()
self._register_store()
self._register_supervisor()
if static_resource_configs:
def process_configs() -> list[web.StaticResource]:
return [
web.StaticResource(config.prefix, config.path)
for config in static_resource_configs
]
for resource in await self.sys_run_in_executor(process_configs):
self.webapp.router.register_resource(resource)
await self.start()
def _register_advanced_logs(self, path: str, syslog_identifier: str):
"""Register logs endpoint for a given path, returning logs for single syslog identifier."""
self.webapp.add_routes(
[
web.get(
f"{path}/logs",
partial(self._api_host.advanced_logs, identifier=syslog_identifier),
),
web.get(
f"{path}/logs/follow",
partial(
self._api_host.advanced_logs,
identifier=syslog_identifier,
follow=True,
),
),
web.get(
f"{path}/logs/boots/{{bootid}}",
partial(self._api_host.advanced_logs, identifier=syslog_identifier),
),
web.get(
f"{path}/logs/boots/{{bootid}}/follow",
partial(
self._api_host.advanced_logs,
identifier=syslog_identifier,
follow=True,
),
),
]
)
self._register_info()
self._register_auth()
self._register_dns()
self._register_audio()
def _register_host(self) -> None:
"""Register hostcontrol functions."""
api_host = self._api_host
api_host = APIHost()
api_host.coresys = self.coresys
self.webapp.add_routes(
[
web.get("/host/info", api_host.info),
web.get("/host/logs", api_host.advanced_logs),
web.get(
"/host/logs/follow",
partial(api_host.advanced_logs, follow=True),
),
web.get("/host/logs/identifiers", api_host.list_identifiers),
web.get("/host/logs/identifiers/{identifier}", api_host.advanced_logs),
web.get(
"/host/logs/identifiers/{identifier}/follow",
partial(api_host.advanced_logs, follow=True),
),
web.get("/host/logs/boots", api_host.list_boots),
web.get("/host/logs/boots/{bootid}", api_host.advanced_logs),
web.get(
"/host/logs/boots/{bootid}/follow",
partial(api_host.advanced_logs, follow=True),
),
web.get(
"/host/logs/boots/{bootid}/identifiers/{identifier}",
api_host.advanced_logs,
),
web.get(
"/host/logs/boots/{bootid}/identifiers/{identifier}/follow",
partial(api_host.advanced_logs, follow=True),
),
web.get("/host/logs", api_host.logs),
web.post("/host/reboot", api_host.reboot),
web.post("/host/shutdown", api_host.shutdown),
web.post("/host/reload", api_host.reload),
web.post("/host/options", api_host.options),
web.get("/host/services", api_host.services),
]
)
def _register_network(self) -> None:
"""Register network functions."""
api_network = APINetwork()
api_network.coresys = self.coresys
self.webapp.add_routes(
[
web.get("/network/info", api_network.info),
web.post("/network/reload", api_network.reload),
web.get(
"/network/interface/{interface}/info", api_network.interface_info
),
web.post(
"/network/interface/{interface}/update",
api_network.interface_update,
),
web.get(
"/network/interface/{interface}/accesspoints",
api_network.scan_accesspoints,
),
web.post(
"/network/interface/{interface}/vlan/{vlan}",
api_network.create_vlan,
),
web.post("/host/services/{service}/stop", api_host.service_stop),
web.post("/host/services/{service}/start", api_host.service_start),
web.post("/host/services/{service}/restart", api_host.service_restart),
web.post("/host/services/{service}/reload", api_host.service_reload),
]
)
@ -237,52 +98,7 @@ class RestAPI(CoreSysAttributes):
[
web.get("/os/info", api_os.info),
web.post("/os/update", api_os.update),
web.get("/os/config/swap", api_os.config_swap_info),
web.post("/os/config/swap", api_os.config_swap_options),
web.post("/os/config/sync", api_os.config_sync),
web.post("/os/datadisk/move", api_os.migrate_data),
web.get("/os/datadisk/list", api_os.list_data),
web.post("/os/datadisk/wipe", api_os.wipe_data),
web.post("/os/boot-slot", api_os.set_boot_slot),
]
)
# Boards endpoints
self.webapp.add_routes(
[
web.get("/os/boards/green", api_os.boards_green_info),
web.post("/os/boards/green", api_os.boards_green_options),
web.get("/os/boards/yellow", api_os.boards_yellow_info),
web.post("/os/boards/yellow", api_os.boards_yellow_options),
web.get("/os/boards/{board}", api_os.boards_other_info),
]
)
def _register_security(self) -> None:
"""Register Security functions."""
api_security = APISecurity()
api_security.coresys = self.coresys
self.webapp.add_routes(
[
web.get("/security/info", api_security.info),
web.post("/security/options", api_security.options),
web.post("/security/integrity", api_security.integrity_check),
]
)
def _register_jobs(self) -> None:
"""Register Jobs functions."""
api_jobs = APIJobs()
api_jobs.coresys = self.coresys
self.webapp.add_routes(
[
web.get("/jobs/info", api_jobs.info),
web.post("/jobs/options", api_jobs.options),
web.post("/jobs/reset", api_jobs.reset),
web.get("/jobs/{uuid}", api_jobs.job_info),
web.delete("/jobs/{uuid}", api_jobs.remove_job),
]
)
@ -299,19 +115,6 @@ class RestAPI(CoreSysAttributes):
]
)
def _register_observer(self) -> None:
"""Register Observer functions."""
api_observer = APIObserver()
api_observer.coresys = self.coresys
self.webapp.add_routes(
[
web.get("/observer/info", api_observer.info),
web.get("/observer/stats", api_observer.stats),
web.post("/observer/update", api_observer.update),
]
)
def _register_multicast(self) -> None:
"""Register Multicast functions."""
api_multicast = APIMulticast()
@ -321,11 +124,11 @@ class RestAPI(CoreSysAttributes):
[
web.get("/multicast/info", api_multicast.info),
web.get("/multicast/stats", api_multicast.stats),
web.get("/multicast/logs", api_multicast.logs),
web.post("/multicast/update", api_multicast.update),
web.post("/multicast/restart", api_multicast.restart),
]
)
self._register_advanced_logs("/multicast", "hassio_multicast")
def _register_hardware(self) -> None:
"""Register hardware functions."""
@ -336,59 +139,16 @@ class RestAPI(CoreSysAttributes):
[
web.get("/hardware/info", api_hardware.info),
web.get("/hardware/audio", api_hardware.audio),
web.post("/hardware/trigger", api_hardware.trigger),
]
)
def _register_root(self) -> None:
"""Register root functions."""
api_root = APIRoot()
api_root.coresys = self.coresys
self.webapp.add_routes([web.get("/info", api_root.info)])
self.webapp.add_routes([web.post("/reload_updates", api_root.reload_updates)])
# Discouraged
self.webapp.add_routes([web.post("/refresh_updates", api_root.refresh_updates)])
self.webapp.add_routes(
[web.get("/available_updates", api_root.available_updates)]
)
# Remove: 2023
self.webapp.add_routes(
[web.get("/supervisor/available_updates", api_root.available_updates)]
)
def _register_resolution(self) -> None:
def _register_info(self) -> None:
"""Register info functions."""
api_resolution = APIResoulution()
api_resolution.coresys = self.coresys
api_info = APIInfo()
api_info.coresys = self.coresys
self.webapp.add_routes(
[
web.get("/resolution/info", api_resolution.info),
web.post(
"/resolution/check/{check}/options", api_resolution.options_check
),
web.post("/resolution/check/{check}/run", api_resolution.run_check),
web.post(
"/resolution/suggestion/{suggestion}",
api_resolution.apply_suggestion,
),
web.delete(
"/resolution/suggestion/{suggestion}",
api_resolution.dismiss_suggestion,
),
web.delete(
"/resolution/issue/{issue}",
api_resolution.dismiss_issue,
),
web.get(
"/resolution/issue/{issue}/suggestions",
api_resolution.suggestions_for_issue,
),
web.post("/resolution/healthcheck", api_resolution.healthcheck),
]
)
self.webapp.add_routes([web.get("/info", api_info.info)])
def _register_auth(self) -> None:
"""Register auth functions."""
@ -396,13 +156,7 @@ class RestAPI(CoreSysAttributes):
api_auth.coresys = self.coresys
self.webapp.add_routes(
[
web.get("/auth", api_auth.auth),
web.post("/auth", api_auth.auth),
web.post("/auth/reset", api_auth.reset),
web.delete("/auth/cache", api_auth.cache),
web.get("/auth/list", api_auth.list_users),
]
[web.post("/auth", api_auth.auth), web.post("/auth/reset", api_auth.reset)]
)
def _register_supervisor(self) -> None:
@ -415,47 +169,14 @@ class RestAPI(CoreSysAttributes):
web.get("/supervisor/ping", api_supervisor.ping),
web.get("/supervisor/info", api_supervisor.info),
web.get("/supervisor/stats", api_supervisor.stats),
web.get("/supervisor/logs", api_supervisor.logs),
web.post("/supervisor/update", api_supervisor.update),
web.post("/supervisor/reload", api_supervisor.reload),
web.post("/supervisor/restart", api_supervisor.restart),
web.post("/supervisor/options", api_supervisor.options),
web.post("/supervisor/repair", api_supervisor.repair),
]
)
async def get_supervisor_logs(*args, **kwargs):
try:
return await self._api_host.advanced_logs_handler(
*args, identifier=SUPERVISOR_DOCKER_NAME, **kwargs
)
except Exception as err: # pylint: disable=broad-exception-caught
# Supervisor logs are critical, so catch everything, log the exception
# and try to return Docker container logs as the fallback
_LOGGER.exception(
"Failed to get supervisor logs using advanced_logs API"
)
if not isinstance(err, HostNotSupportedError):
# No need to capture HostNotSupportedError to Sentry, the cause
# is known and reported to the user using the resolution center.
await async_capture_exception(err)
kwargs.pop("follow", None) # Follow is not supported for Docker logs
return await api_supervisor.logs(*args, **kwargs)
self.webapp.add_routes(
[
web.get("/supervisor/logs", get_supervisor_logs),
web.get(
"/supervisor/logs/follow",
partial(get_supervisor_logs, follow=True),
),
web.get("/supervisor/logs/boots/{bootid}", get_supervisor_logs),
web.get(
"/supervisor/logs/boots/{bootid}/follow",
partial(get_supervisor_logs, follow=True),
),
]
)
def _register_homeassistant(self) -> None:
"""Register Home Assistant functions."""
api_hass = APIHomeAssistant()
@ -464,6 +185,7 @@ class RestAPI(CoreSysAttributes):
self.webapp.add_routes(
[
web.get("/core/info", api_hass.info),
web.get("/core/logs", api_hass.logs),
web.get("/core/stats", api_hass.stats),
web.post("/core/options", api_hass.options),
web.post("/core/update", api_hass.update),
@ -472,28 +194,20 @@ class RestAPI(CoreSysAttributes):
web.post("/core/start", api_hass.start),
web.post("/core/check", api_hass.check),
web.post("/core/rebuild", api_hass.rebuild),
]
)
self._register_advanced_logs("/core", "homeassistant")
# Reroute from legacy
self.webapp.add_routes(
[
# Remove with old Supervisor fallback
web.get("/homeassistant/info", api_hass.info),
web.get("/homeassistant/logs", api_hass.logs),
web.get("/homeassistant/stats", api_hass.stats),
web.post("/homeassistant/options", api_hass.options),
web.post("/homeassistant/update", api_hass.update),
web.post("/homeassistant/restart", api_hass.restart),
web.post("/homeassistant/stop", api_hass.stop),
web.post("/homeassistant/start", api_hass.start),
web.post("/homeassistant/update", api_hass.update),
web.post("/homeassistant/rebuild", api_hass.rebuild),
web.post("/homeassistant/check", api_hass.check),
web.post("/homeassistant/rebuild", api_hass.rebuild),
]
)
self._register_advanced_logs("/homeassistant", "homeassistant")
def _register_proxy(self) -> None:
"""Register Home Assistant API Proxy."""
api_proxy = APIProxy()
@ -507,12 +221,7 @@ class RestAPI(CoreSysAttributes):
web.post("/core/api/{path:.+}", api_proxy.api),
web.get("/core/api/{path:.+}", api_proxy.api),
web.get("/core/api/", api_proxy.api),
]
)
# Reroute from legacy
self.webapp.add_routes(
[
# Remove with old Supervisor fallback
web.get("/homeassistant/api/websocket", api_proxy.websocket),
web.get("/homeassistant/websocket", api_proxy.websocket),
web.get("/homeassistant/api/stream", api_proxy.stream),
@ -529,64 +238,28 @@ class RestAPI(CoreSysAttributes):
self.webapp.add_routes(
[
web.get("/addons", api_addons.list_addons),
web.get("/addons", api_addons.list),
web.post("/addons/reload", api_addons.reload),
web.get("/addons/{addon}/info", api_addons.info),
web.post("/addons/{addon}/install", api_addons.install),
web.post("/addons/{addon}/uninstall", api_addons.uninstall),
web.post("/addons/{addon}/start", api_addons.start),
web.post("/addons/{addon}/stop", api_addons.stop),
web.post("/addons/{addon}/restart", api_addons.restart),
web.post("/addons/{addon}/update", api_addons.update),
web.post("/addons/{addon}/options", api_addons.options),
web.post("/addons/{addon}/sys_options", api_addons.sys_options),
web.post(
"/addons/{addon}/options/validate", api_addons.options_validate
),
web.get("/addons/{addon}/options/config", api_addons.options_config),
web.post("/addons/{addon}/rebuild", api_addons.rebuild),
web.get("/addons/{addon}/logs", api_addons.logs),
web.get("/addons/{addon}/icon", api_addons.icon),
web.get("/addons/{addon}/logo", api_addons.logo),
web.get("/addons/{addon}/changelog", api_addons.changelog),
web.get("/addons/{addon}/documentation", api_addons.documentation),
web.post("/addons/{addon}/stdin", api_addons.stdin),
web.post("/addons/{addon}/security", api_addons.security),
web.get("/addons/{addon}/stats", api_addons.stats),
]
)
@api_process_raw(CONTENT_TYPE_TEXT, error_type=CONTENT_TYPE_TEXT)
async def get_addon_logs(request, *args, **kwargs):
addon = api_addons.get_addon_for_request(request)
kwargs["identifier"] = f"addon_{addon.slug}"
return await self._api_host.advanced_logs(request, *args, **kwargs)
self.webapp.add_routes(
[
web.get("/addons/{addon}/logs", get_addon_logs),
web.get(
"/addons/{addon}/logs/follow",
partial(get_addon_logs, follow=True),
),
web.get("/addons/{addon}/logs/boots/{bootid}", get_addon_logs),
web.get(
"/addons/{addon}/logs/boots/{bootid}/follow",
partial(get_addon_logs, follow=True),
),
]
)
# Legacy routing to support requests for not installed addons
api_store = APIStore()
api_store.coresys = self.coresys
@api_process
async def addons_addon_info(request: web.Request) -> dict[str, Any]:
"""Route to store if info requested for not installed addon."""
try:
return await api_addons.info(request)
except APIAddonNotInstalled:
# Route to store/{addon}/info but add missing fields
return dict(
await api_store.addons_addon_info_wrapped(request),
state=AddonState.UNKNOWN,
options=self.sys_addons.store[request.match_info["addon"]].options,
)
self.webapp.add_routes([web.get("/addons/{addon}/info", addons_addon_info)])
def _register_ingress(self) -> None:
"""Register Ingress functions."""
api_ingress = APIIngress()
@ -595,38 +268,33 @@ class RestAPI(CoreSysAttributes):
self.webapp.add_routes(
[
web.post("/ingress/session", api_ingress.create_session),
web.post("/ingress/validate_session", api_ingress.validate_session),
web.get("/ingress/panels", api_ingress.panels),
web.route(
hdrs.METH_ANY, "/ingress/{token}/{path:.*}", api_ingress.handler
),
web.view("/ingress/{token}/{path:.*}", api_ingress.handler),
]
)
def _register_backups(self) -> None:
"""Register backups functions."""
api_backups = APIBackups()
api_backups.coresys = self.coresys
def _register_snapshots(self) -> None:
"""Register snapshots functions."""
api_snapshots = APISnapshots()
api_snapshots.coresys = self.coresys
self.webapp.add_routes(
[
web.get("/backups", api_backups.list_backups),
web.get("/backups/info", api_backups.info),
web.post("/backups/options", api_backups.options),
web.post("/backups/reload", api_backups.reload),
web.post("/backups/freeze", api_backups.freeze),
web.post("/backups/thaw", api_backups.thaw),
web.post("/backups/new/full", api_backups.backup_full),
web.post("/backups/new/partial", api_backups.backup_partial),
web.post("/backups/new/upload", api_backups.upload),
web.get("/backups/{slug}/info", api_backups.backup_info),
web.delete("/backups/{slug}", api_backups.remove),
web.post("/backups/{slug}/restore/full", api_backups.restore_full),
web.get("/snapshots", api_snapshots.list),
web.post("/snapshots/reload", api_snapshots.reload),
web.post("/snapshots/new/full", api_snapshots.snapshot_full),
web.post("/snapshots/new/partial", api_snapshots.snapshot_partial),
web.post("/snapshots/new/upload", api_snapshots.upload),
web.get("/snapshots/{snapshot}/info", api_snapshots.info),
web.post("/snapshots/{snapshot}/remove", api_snapshots.remove),
web.post(
"/backups/{slug}/restore/partial",
api_backups.restore_partial,
"/snapshots/{snapshot}/restore/full", api_snapshots.restore_full
),
web.get("/backups/{slug}/download", api_backups.download),
web.post(
"/snapshots/{snapshot}/restore/partial",
api_snapshots.restore_partial,
),
web.get("/snapshots/{snapshot}/download", api_snapshots.download),
]
)
@ -637,7 +305,7 @@ class RestAPI(CoreSysAttributes):
self.webapp.add_routes(
[
web.get("/services", api_services.list_services),
web.get("/services", api_services.list),
web.get("/services/{service}", api_services.get_service),
web.post("/services/{service}", api_services.set_service),
web.delete("/services/{service}", api_services.del_service),
@ -651,7 +319,7 @@ class RestAPI(CoreSysAttributes):
self.webapp.add_routes(
[
web.get("/discovery", api_discovery.list_discovery),
web.get("/discovery", api_discovery.list),
web.get("/discovery/{uuid}", api_discovery.get_discovery),
web.delete("/discovery/{uuid}", api_discovery.del_discovery),
web.post("/discovery", api_discovery.set_discovery),
@ -667,6 +335,7 @@ class RestAPI(CoreSysAttributes):
[
web.get("/dns/info", api_dns.info),
web.get("/dns/stats", api_dns.stats),
web.get("/dns/logs", api_dns.logs),
web.post("/dns/update", api_dns.update),
web.post("/dns/options", api_dns.options),
web.post("/dns/restart", api_dns.restart),
@ -674,8 +343,6 @@ class RestAPI(CoreSysAttributes):
]
)
self._register_advanced_logs("/dns", "hassio_dns")
def _register_audio(self) -> None:
"""Register Audio functions."""
api_audio = APIAudio()
@ -685,6 +352,7 @@ class RestAPI(CoreSysAttributes):
[
web.get("/audio/info", api_audio.info),
web.get("/audio/stats", api_audio.stats),
web.get("/audio/logs", api_audio.logs),
web.post("/audio/update", api_audio.update),
web.post("/audio/restart", api_audio.restart),
web.post("/audio/reload", api_audio.reload),
@ -697,116 +365,24 @@ class RestAPI(CoreSysAttributes):
]
)
self._register_advanced_logs("/audio", "hassio_audio")
def _register_mounts(self) -> None:
"""Register mounts endpoints."""
api_mounts = APIMounts()
api_mounts.coresys = self.coresys
self.webapp.add_routes(
[
web.get("/mounts", api_mounts.info),
web.post("/mounts/options", api_mounts.options),
web.post("/mounts", api_mounts.create_mount),
web.put("/mounts/{mount}", api_mounts.update_mount),
web.delete("/mounts/{mount}", api_mounts.delete_mount),
web.post("/mounts/{mount}/reload", api_mounts.reload_mount),
]
)
def _register_store(self) -> None:
"""Register store endpoints."""
api_store = APIStore()
api_store.coresys = self.coresys
self.webapp.add_routes(
[
web.get("/store", api_store.store_info),
web.get("/store/addons", api_store.addons_list),
web.get("/store/addons/{addon}", api_store.addons_addon_info),
web.get("/store/addons/{addon}/icon", api_store.addons_addon_icon),
web.get("/store/addons/{addon}/logo", api_store.addons_addon_logo),
web.get(
"/store/addons/{addon}/changelog", api_store.addons_addon_changelog
),
web.get(
"/store/addons/{addon}/documentation",
api_store.addons_addon_documentation,
),
web.post(
"/store/addons/{addon}/install", api_store.addons_addon_install
),
web.post(
"/store/addons/{addon}/install/{version}",
api_store.addons_addon_install,
),
web.post("/store/addons/{addon}/update", api_store.addons_addon_update),
web.post(
"/store/addons/{addon}/update/{version}",
api_store.addons_addon_update,
),
# Must be below others since it has a wildcard in resource path
web.get("/store/addons/{addon}/{version}", api_store.addons_addon_info),
web.post("/store/reload", api_store.reload),
web.get("/store/repositories", api_store.repositories_list),
web.get(
"/store/repositories/{repository}",
api_store.repositories_repository_info,
),
web.post("/store/repositories", api_store.add_repository),
web.delete(
"/store/repositories/{repository}", api_store.remove_repository
),
]
)
# Reroute from legacy
self.webapp.add_routes(
[
web.post("/addons/reload", api_store.reload),
web.post("/addons/{addon}/install", api_store.addons_addon_install),
web.post("/addons/{addon}/update", api_store.addons_addon_update),
web.get("/addons/{addon}/icon", api_store.addons_addon_icon),
web.get("/addons/{addon}/logo", api_store.addons_addon_logo),
web.get("/addons/{addon}/changelog", api_store.addons_addon_changelog),
web.get(
"/addons/{addon}/documentation",
api_store.addons_addon_documentation,
),
]
)
def _register_panel(self) -> list[StaticResourceConfig]:
def _register_panel(self) -> None:
"""Register panel for Home Assistant."""
return [StaticResourceConfig("/app", Path(__file__).parent.joinpath("panel"))]
def _register_docker(self) -> None:
"""Register docker configuration functions."""
api_docker = APIDocker()
api_docker.coresys = self.coresys
self.webapp.add_routes(
[
web.get("/docker/info", api_docker.info),
web.post("/docker/options", api_docker.options),
web.get("/docker/registries", api_docker.registries),
web.post("/docker/registries", api_docker.create_registry),
web.delete("/docker/registries/{hostname}", api_docker.remove_registry),
]
)
panel_dir = Path(__file__).parent.joinpath("panel")
self.webapp.add_routes([web.static("/app", panel_dir)])
async def start(self) -> None:
"""Run RESTful API webserver."""
await self._runner.setup()
self._site = web.TCPSite(self._runner, host="0.0.0.0", port=80)
self._site = web.TCPSite(
self._runner, host="0.0.0.0", port=80, shutdown_timeout=5
)
try:
await self._site.start()
except OSError as err:
_LOGGER.critical("Failed to create HTTP server at 0.0.0.0:80 -> %s", err)
else:
_LOGGER.info("Starting API on %s", self.sys_docker.network.supervisor)
_LOGGER.info("Start API on %s", self.sys_docker.network.supervisor)
async def stop(self) -> None:
"""Stop RESTful API webserver."""
@ -817,4 +393,4 @@ class RestAPI(CoreSysAttributes):
await self._site.stop()
await self._runner.cleanup()
_LOGGER.info("Stopping API on %s", self.sys_docker.network.supervisor)
_LOGGER.info("Stop API on %s", self.sys_docker.network.supervisor)

View File

@ -1,14 +1,12 @@
"""Init file for Supervisor Home Assistant RESTful API."""
import asyncio
from collections.abc import Awaitable
import logging
from typing import Any, TypedDict
from typing import Any, Awaitable, Dict, List
from aiohttp import web
import voluptuous as vol
from voluptuous.humanize import humanize_error
from ..addons import AnyAddon
from ..addons.addon import Addon
from ..addons.utils import rating_security
from ..const import (
@ -36,7 +34,6 @@ from ..const import (
ATTR_DNS,
ATTR_DOCKER_API,
ATTR_DOCUMENTATION,
ATTR_FORCE,
ATTR_FULL_ACCESS,
ATTR_GPIO,
ATTR_HASSIO_API,
@ -47,7 +44,6 @@ from ..const import (
ATTR_HOST_IPC,
ATTR_HOST_NETWORK,
ATTR_HOST_PID,
ATTR_HOST_UTS,
ATTR_HOSTNAME,
ATTR_ICON,
ATTR_INGRESS,
@ -55,11 +51,14 @@ from ..const import (
ATTR_INGRESS_PANEL,
ATTR_INGRESS_PORT,
ATTR_INGRESS_URL,
ATTR_INSTALLED,
ATTR_IP_ADDRESS,
ATTR_KERNEL_MODULES,
ATTR_VERSION_LATEST,
ATTR_LOGO,
ATTR_LONG_DESCRIPTION,
ATTR_MACHINE,
ATTR_MAINTAINER,
ATTR_MEMORY_LIMIT,
ATTR_MEMORY_PERCENT,
ATTR_MEMORY_USAGE,
@ -72,93 +71,62 @@ from ..const import (
ATTR_PRIVILEGED,
ATTR_PROTECTED,
ATTR_RATING,
ATTR_REPOSITORIES,
ATTR_REPOSITORY,
ATTR_SCHEMA,
ATTR_SERVICES,
ATTR_SLUG,
ATTR_SOURCE,
ATTR_STAGE,
ATTR_STARTUP,
ATTR_STATE,
ATTR_STDIN,
ATTR_SYSTEM_MANAGED,
ATTR_SYSTEM_MANAGED_CONFIG_ENTRY,
ATTR_TRANSLATIONS,
ATTR_UART,
ATTR_UDEV,
ATTR_UPDATE_AVAILABLE,
ATTR_URL,
ATTR_USB,
ATTR_VERSION,
ATTR_VERSION_LATEST,
ATTR_VIDEO,
ATTR_WATCHDOG,
ATTR_WEBUI,
BOOT_AUTO,
BOOT_MANUAL,
CONTENT_TYPE_BINARY,
CONTENT_TYPE_PNG,
CONTENT_TYPE_TEXT,
REQUEST_FROM,
AddonBoot,
AddonBootConfig,
STATE_NONE,
)
from ..coresys import CoreSysAttributes
from ..docker.stats import DockerStats
from ..exceptions import (
APIAddonNotInstalled,
APIError,
APIForbidden,
APINotFound,
PwnedError,
PwnedSecret,
)
from ..validate import docker_ports
from .const import ATTR_BOOT_CONFIG, ATTR_REMOVE_CONFIG, ATTR_SIGNED
from .utils import api_process, api_validate, json_loads
from ..exceptions import APIError
from ..validate import DOCKER_PORTS
from .utils import api_process, api_process_raw, api_validate
_LOGGER: logging.Logger = logging.getLogger(__name__)
SCHEMA_VERSION = vol.Schema({vol.Optional(ATTR_VERSION): str})
SCHEMA_VERSION = vol.Schema({vol.Optional(ATTR_VERSION): vol.Coerce(str)})
# pylint: disable=no-value-for-parameter
SCHEMA_OPTIONS = vol.Schema(
{
vol.Optional(ATTR_BOOT): vol.Coerce(AddonBoot),
vol.Optional(ATTR_NETWORK): vol.Maybe(docker_ports),
vol.Optional(ATTR_BOOT): vol.In([BOOT_AUTO, BOOT_MANUAL]),
vol.Optional(ATTR_NETWORK): vol.Maybe(DOCKER_PORTS),
vol.Optional(ATTR_AUTO_UPDATE): vol.Boolean(),
vol.Optional(ATTR_AUDIO_OUTPUT): vol.Maybe(str),
vol.Optional(ATTR_AUDIO_INPUT): vol.Maybe(str),
vol.Optional(ATTR_AUDIO_OUTPUT): vol.Maybe(vol.Coerce(str)),
vol.Optional(ATTR_AUDIO_INPUT): vol.Maybe(vol.Coerce(str)),
vol.Optional(ATTR_INGRESS_PANEL): vol.Boolean(),
vol.Optional(ATTR_WATCHDOG): vol.Boolean(),
}
)
SCHEMA_SYS_OPTIONS = vol.Schema(
{
vol.Optional(ATTR_SYSTEM_MANAGED): vol.Boolean(),
vol.Optional(ATTR_SYSTEM_MANAGED_CONFIG_ENTRY): vol.Maybe(str),
}
)
# pylint: disable=no-value-for-parameter
SCHEMA_SECURITY = vol.Schema({vol.Optional(ATTR_PROTECTED): vol.Boolean()})
SCHEMA_UNINSTALL = vol.Schema(
{vol.Optional(ATTR_REMOVE_CONFIG, default=False): vol.Boolean()}
)
SCHEMA_REBUILD = vol.Schema({vol.Optional(ATTR_FORCE, default=False): vol.Boolean()})
# pylint: enable=no-value-for-parameter
class OptionsValidateResponse(TypedDict):
"""Response object for options validate."""
message: str
valid: bool
pwned: bool | None
class APIAddons(CoreSysAttributes):
"""Handle RESTful API for add-on functions."""
def get_addon_for_request(self, request: web.Request) -> Addon:
"""Return addon, throw an exception if it doesn't exist."""
addon_slug: str = request.match_info["addon"]
def _extract_addon(
self, request: web.Request, check_installed: bool = True
) -> AnyAddon:
"""Return addon, throw an exception it it doesn't exist."""
addon_slug: str = request.match_info.get("addon")
# Lookup itself
if addon_slug == "self":
@ -169,49 +137,60 @@ class APIAddons(CoreSysAttributes):
addon = self.sys_addons.get(addon_slug)
if not addon:
raise APINotFound(f"Addon {addon_slug} does not exist")
if not isinstance(addon, Addon) or not addon.is_installed:
raise APIAddonNotInstalled("Addon is not installed")
raise APIError("Addon does not exist")
if check_installed and not addon.is_installed:
raise APIError("Addon is not installed")
return addon
@api_process
async def list_addons(self, request: web.Request) -> dict[str, Any]:
async def list(self, request: web.Request) -> Dict[str, Any]:
"""Return all add-ons or repositories."""
data_addons = [
{
ATTR_NAME: addon.name,
ATTR_SLUG: addon.slug,
ATTR_DESCRIPTON: addon.description,
ATTR_ADVANCED: addon.advanced,
ATTR_STAGE: addon.stage,
ATTR_VERSION: addon.version,
ATTR_VERSION_LATEST: addon.latest_version,
ATTR_UPDATE_AVAILABLE: addon.need_update,
ATTR_AVAILABLE: addon.available,
ATTR_DETACHED: addon.is_detached,
ATTR_HOMEASSISTANT: addon.homeassistant_version,
ATTR_STATE: addon.state,
ATTR_REPOSITORY: addon.repository,
ATTR_BUILD: addon.need_build,
ATTR_URL: addon.url,
ATTR_ICON: addon.with_icon,
ATTR_LOGO: addon.with_logo,
ATTR_SYSTEM_MANAGED: addon.system_managed,
}
for addon in self.sys_addons.installed
]
data_addons = []
for addon in self.sys_addons.all:
data_addons.append(
{
ATTR_NAME: addon.name,
ATTR_SLUG: addon.slug,
ATTR_DESCRIPTON: addon.description,
ATTR_ADVANCED: addon.advanced,
ATTR_STAGE: addon.stage,
ATTR_VERSION: addon.latest_version,
ATTR_INSTALLED: addon.version if addon.is_installed else None,
ATTR_AVAILABLE: addon.available,
ATTR_DETACHED: addon.is_detached,
ATTR_REPOSITORY: addon.repository,
ATTR_BUILD: addon.need_build,
ATTR_URL: addon.url,
ATTR_ICON: addon.with_icon,
ATTR_LOGO: addon.with_logo,
}
)
return {ATTR_ADDONS: data_addons}
data_repositories = []
for repository in self.sys_store.all:
data_repositories.append(
{
ATTR_SLUG: repository.slug,
ATTR_NAME: repository.name,
ATTR_SOURCE: repository.source,
ATTR_URL: repository.url,
ATTR_MAINTAINER: repository.maintainer,
}
)
return {ATTR_ADDONS: data_addons, ATTR_REPOSITORIES: data_repositories}
@api_process
async def reload(self, request: web.Request) -> None:
"""Reload all add-on data from store."""
await asyncio.shield(self.sys_store.reload())
async def info(self, request: web.Request) -> dict[str, Any]:
@api_process
async def info(self, request: web.Request) -> Dict[str, Any]:
"""Return add-on information."""
addon: Addon = self.get_addon_for_request(request)
addon: AnyAddon = self._extract_addon(request, check_installed=False)
data = {
ATTR_NAME: addon.name,
@ -219,14 +198,15 @@ class APIAddons(CoreSysAttributes):
ATTR_HOSTNAME: addon.hostname,
ATTR_DNS: addon.dns,
ATTR_DESCRIPTON: addon.description,
ATTR_LONG_DESCRIPTION: await addon.long_description(),
ATTR_LONG_DESCRIPTION: addon.long_description,
ATTR_ADVANCED: addon.advanced,
ATTR_STAGE: addon.stage,
ATTR_AUTO_UPDATE: None,
ATTR_REPOSITORY: addon.repository,
ATTR_VERSION: None,
ATTR_VERSION_LATEST: addon.latest_version,
ATTR_PROTECTED: addon.protected,
ATTR_RATING: rating_security(addon),
ATTR_BOOT_CONFIG: addon.boot_config,
ATTR_BOOT: addon.boot,
ATTR_OPTIONS: addon.options,
ATTR_SCHEMA: addon.schema_ui,
@ -234,6 +214,7 @@ class APIAddons(CoreSysAttributes):
ATTR_MACHINE: addon.supported_machine,
ATTR_HOMEASSISTANT: addon.homeassistant_version,
ATTR_URL: addon.url,
ATTR_STATE: STATE_NONE,
ATTR_DETACHED: addon.is_detached,
ATTR_AVAILABLE: addon.available,
ATTR_BUILD: addon.need_build,
@ -242,67 +223,70 @@ class APIAddons(CoreSysAttributes):
ATTR_HOST_NETWORK: addon.host_network,
ATTR_HOST_PID: addon.host_pid,
ATTR_HOST_IPC: addon.host_ipc,
ATTR_HOST_UTS: addon.host_uts,
ATTR_HOST_DBUS: addon.host_dbus,
ATTR_PRIVILEGED: addon.privileged,
ATTR_FULL_ACCESS: addon.with_full_access,
ATTR_APPARMOR: addon.apparmor,
ATTR_DEVICES: _pretty_devices(addon),
ATTR_ICON: addon.with_icon,
ATTR_LOGO: addon.with_logo,
ATTR_CHANGELOG: addon.with_changelog,
ATTR_DOCUMENTATION: addon.with_documentation,
ATTR_STDIN: addon.with_stdin,
ATTR_WEBUI: None,
ATTR_HASSIO_API: addon.access_hassio_api,
ATTR_HASSIO_ROLE: addon.hassio_role,
ATTR_AUTH_API: addon.access_auth_api,
ATTR_HOMEASSISTANT_API: addon.access_homeassistant_api,
ATTR_GPIO: addon.with_gpio,
ATTR_USB: addon.with_usb,
ATTR_UART: addon.with_uart,
ATTR_KERNEL_MODULES: addon.with_kernel_modules,
ATTR_DEVICETREE: addon.with_devicetree,
ATTR_UDEV: addon.with_udev,
ATTR_DOCKER_API: addon.access_docker_api,
ATTR_VIDEO: addon.with_video,
ATTR_AUDIO: addon.with_audio,
ATTR_STARTUP: addon.startup,
ATTR_AUDIO_INPUT: None,
ATTR_AUDIO_OUTPUT: None,
ATTR_SERVICES: _pretty_services(addon),
ATTR_DISCOVERY: addon.discovery,
ATTR_TRANSLATIONS: addon.translations,
ATTR_IP_ADDRESS: None,
ATTR_INGRESS: addon.with_ingress,
ATTR_SIGNED: addon.signed,
ATTR_STATE: addon.state,
ATTR_WEBUI: addon.webui,
ATTR_INGRESS_ENTRY: addon.ingress_entry,
ATTR_INGRESS_URL: addon.ingress_url,
ATTR_INGRESS_PORT: addon.ingress_port,
ATTR_INGRESS_PANEL: addon.ingress_panel,
ATTR_AUDIO_INPUT: addon.audio_input,
ATTR_AUDIO_OUTPUT: addon.audio_output,
ATTR_AUTO_UPDATE: addon.auto_update,
ATTR_IP_ADDRESS: str(addon.ip_address),
ATTR_VERSION: addon.version,
ATTR_UPDATE_AVAILABLE: addon.need_update,
ATTR_WATCHDOG: addon.watchdog,
ATTR_DEVICES: addon.static_devices
+ [device.path for device in addon.devices],
ATTR_SYSTEM_MANAGED: addon.system_managed,
ATTR_SYSTEM_MANAGED_CONFIG_ENTRY: addon.system_managed_config_entry,
ATTR_INGRESS_ENTRY: None,
ATTR_INGRESS_URL: None,
ATTR_INGRESS_PORT: None,
ATTR_INGRESS_PANEL: None,
}
if addon.is_installed:
data.update(
{
ATTR_STATE: await addon.state(),
ATTR_WEBUI: addon.webui,
ATTR_INGRESS_ENTRY: addon.ingress_entry,
ATTR_INGRESS_URL: addon.ingress_url,
ATTR_INGRESS_PORT: addon.ingress_port,
ATTR_INGRESS_PANEL: addon.ingress_panel,
ATTR_AUDIO_INPUT: addon.audio_input,
ATTR_AUDIO_OUTPUT: addon.audio_output,
ATTR_AUTO_UPDATE: addon.auto_update,
ATTR_IP_ADDRESS: str(addon.ip_address),
ATTR_VERSION: addon.version,
}
)
return data
@api_process
async def options(self, request: web.Request) -> None:
"""Store user options for add-on."""
addon = self.get_addon_for_request(request)
addon: AnyAddon = self._extract_addon(request)
# Update secrets for validation
await self.sys_homeassistant.secrets.reload()
await self.sys_secrets.reload()
# Extend schema with add-on specific validation
addon_schema = SCHEMA_OPTIONS.extend(
{vol.Optional(ATTR_OPTIONS): vol.Maybe(addon.schema)}
{vol.Optional(ATTR_OPTIONS): vol.Any(None, addon.schema)}
)
# Validate/Process Body
@ -310,10 +294,6 @@ class APIAddons(CoreSysAttributes):
if ATTR_OPTIONS in body:
addon.options = body[ATTR_OPTIONS]
if ATTR_BOOT in body:
if addon.boot_config == AddonBootConfig.MANUAL_ONLY:
raise APIError(
f"Addon {addon.slug} boot option is set to {addon.boot_config} so it cannot be changed"
)
addon.boot = body[ATTR_BOOT]
if ATTR_AUTO_UPDATE in body:
addon.auto_update = body[ATTR_AUTO_UPDATE]
@ -326,96 +306,25 @@ class APIAddons(CoreSysAttributes):
if ATTR_INGRESS_PANEL in body:
addon.ingress_panel = body[ATTR_INGRESS_PANEL]
await self.sys_ingress.update_hass_panel(addon)
if ATTR_WATCHDOG in body:
addon.watchdog = body[ATTR_WATCHDOG]
await addon.save_persist()
@api_process
async def sys_options(self, request: web.Request) -> None:
"""Store system options for an add-on."""
addon = self.get_addon_for_request(request)
# Validate/Process Body
body = await api_validate(SCHEMA_SYS_OPTIONS, request)
if ATTR_SYSTEM_MANAGED in body:
addon.system_managed = body[ATTR_SYSTEM_MANAGED]
if ATTR_SYSTEM_MANAGED_CONFIG_ENTRY in body:
addon.system_managed_config_entry = body[ATTR_SYSTEM_MANAGED_CONFIG_ENTRY]
await addon.save_persist()
@api_process
async def options_validate(self, request: web.Request) -> OptionsValidateResponse:
"""Validate user options for add-on."""
addon = self.get_addon_for_request(request)
data = OptionsValidateResponse(message="", valid=True, pwned=False)
options = await request.json(loads=json_loads) or addon.options
# Validate config
options_schema = addon.schema
try:
options_schema.validate(options)
except vol.Invalid as ex:
data["message"] = humanize_error(options, ex)
data["valid"] = False
if not self.sys_security.pwned:
return data
# Pwned check
for secret in options_schema.pwned:
try:
await self.sys_security.verify_secret(secret)
continue
except PwnedSecret:
data["pwned"] = True
except PwnedError:
data["pwned"] = None
break
if self.sys_security.force and data["pwned"] in (None, True):
data["valid"] = False
if data["pwned"] is None:
data["message"] = "Error happening on pwned secrets check!"
else:
data["message"] = "Add-on uses pwned secrets!"
return data
@api_process
async def options_config(self, request: web.Request) -> None:
"""Validate user options for add-on."""
slug: str = request.match_info["addon"]
if slug != "self":
raise APIForbidden("This can be only read by the Add-on itself!")
addon = self.get_addon_for_request(request)
# Lookup/reload secrets
await self.sys_homeassistant.secrets.reload()
try:
return addon.schema.validate(addon.options)
except vol.Invalid:
raise APIError("Invalid configuration data for the add-on") from None
addon.save_persist()
@api_process
async def security(self, request: web.Request) -> None:
"""Store security options for add-on."""
addon = self.get_addon_for_request(request)
body: dict[str, Any] = await api_validate(SCHEMA_SECURITY, request)
addon: AnyAddon = self._extract_addon(request)
body: Dict[str, Any] = await api_validate(SCHEMA_SECURITY, request)
if ATTR_PROTECTED in body:
_LOGGER.warning("Changing protected flag for %s!", addon.slug)
_LOGGER.warning("Protected flag changing for %s!", addon.slug)
addon.protected = body[ATTR_PROTECTED]
await addon.save_persist()
addon.save_persist()
@api_process
async def stats(self, request: web.Request) -> dict[str, Any]:
async def stats(self, request: web.Request) -> Dict[str, Any]:
"""Return resource information."""
addon = self.get_addon_for_request(request)
addon: AnyAddon = self._extract_addon(request)
stats: DockerStats = await addon.stats()
return {
@ -430,58 +339,122 @@ class APIAddons(CoreSysAttributes):
}
@api_process
async def uninstall(self, request: web.Request) -> Awaitable[None]:
"""Uninstall add-on."""
addon = self.get_addon_for_request(request)
body: dict[str, Any] = await api_validate(SCHEMA_UNINSTALL, request)
return await asyncio.shield(
self.sys_addons.uninstall(
addon.slug, remove_config=body[ATTR_REMOVE_CONFIG]
)
)
def install(self, request: web.Request) -> Awaitable[None]:
"""Install add-on."""
addon: AnyAddon = self._extract_addon(request, check_installed=False)
return asyncio.shield(addon.install())
@api_process
async def start(self, request: web.Request) -> None:
def uninstall(self, request: web.Request) -> Awaitable[None]:
"""Uninstall add-on."""
addon: AnyAddon = self._extract_addon(request)
return asyncio.shield(addon.uninstall())
@api_process
def start(self, request: web.Request) -> Awaitable[None]:
"""Start add-on."""
addon = self.get_addon_for_request(request)
if start_task := await asyncio.shield(addon.start()):
await start_task
addon: AnyAddon = self._extract_addon(request)
return asyncio.shield(addon.start())
@api_process
def stop(self, request: web.Request) -> Awaitable[None]:
"""Stop add-on."""
addon = self.get_addon_for_request(request)
addon: AnyAddon = self._extract_addon(request)
return asyncio.shield(addon.stop())
@api_process
async def restart(self, request: web.Request) -> None:
"""Restart add-on."""
addon: Addon = self.get_addon_for_request(request)
if start_task := await asyncio.shield(addon.restart()):
await start_task
def update(self, request: web.Request) -> Awaitable[None]:
"""Update add-on."""
addon: AnyAddon = self._extract_addon(request)
if addon.latest_version == addon.version:
raise APIError("No update available!")
return asyncio.shield(addon.update())
@api_process
async def rebuild(self, request: web.Request) -> None:
"""Rebuild local build add-on."""
addon = self.get_addon_for_request(request)
body: dict[str, Any] = await api_validate(SCHEMA_REBUILD, request)
def restart(self, request: web.Request) -> Awaitable[None]:
"""Restart add-on."""
addon: AnyAddon = self._extract_addon(request)
return asyncio.shield(addon.restart())
if start_task := await asyncio.shield(
self.sys_addons.rebuild(addon.slug, force=body[ATTR_FORCE])
):
await start_task
@api_process
def rebuild(self, request: web.Request) -> Awaitable[None]:
"""Rebuild local build add-on."""
addon: AnyAddon = self._extract_addon(request)
if not addon.need_build:
raise APIError("Only local build addons are supported")
return asyncio.shield(addon.rebuild())
@api_process_raw(CONTENT_TYPE_BINARY)
def logs(self, request: web.Request) -> Awaitable[bytes]:
"""Return logs from add-on."""
addon: AnyAddon = self._extract_addon(request)
return addon.logs()
@api_process_raw(CONTENT_TYPE_PNG)
async def icon(self, request: web.Request) -> bytes:
"""Return icon from add-on."""
addon: AnyAddon = self._extract_addon(request, check_installed=False)
if not addon.with_icon:
raise APIError("No icon found!")
with addon.path_icon.open("rb") as png:
return png.read()
@api_process_raw(CONTENT_TYPE_PNG)
async def logo(self, request: web.Request) -> bytes:
"""Return logo from add-on."""
addon: AnyAddon = self._extract_addon(request, check_installed=False)
if not addon.with_logo:
raise APIError("No logo found!")
with addon.path_logo.open("rb") as png:
return png.read()
@api_process_raw(CONTENT_TYPE_TEXT)
async def changelog(self, request: web.Request) -> str:
"""Return changelog from add-on."""
addon: AnyAddon = self._extract_addon(request, check_installed=False)
if not addon.with_changelog:
raise APIError("No changelog found!")
with addon.path_changelog.open("r") as changelog:
return changelog.read()
@api_process_raw(CONTENT_TYPE_TEXT)
async def documentation(self, request: web.Request) -> str:
"""Return documentation from add-on."""
addon: AnyAddon = self._extract_addon(request, check_installed=False)
if not addon.with_documentation:
raise APIError("No documentation found!")
with addon.path_documentation.open("r") as documentation:
return documentation.read()
@api_process
async def stdin(self, request: web.Request) -> None:
"""Write to stdin of add-on."""
addon = self.get_addon_for_request(request)
addon: AnyAddon = self._extract_addon(request)
if not addon.with_stdin:
raise APIError(f"STDIN not supported the {addon.slug} add-on")
raise APIError("STDIN not supported by add-on")
data = await request.read()
await asyncio.shield(addon.write_stdin(data))
def _pretty_services(addon: Addon) -> list[str]:
def _pretty_devices(addon: AnyAddon) -> List[str]:
"""Return a simplified device list."""
dev_list = addon.devices
if not dev_list:
return None
return [row.split(":")[0] for row in dev_list]
def _pretty_services(addon: AnyAddon) -> List[str]:
"""Return a simplified services role list."""
return [f"{name}:{access}" for name, access in addon.services_role.items()]
services = []
for name, access in addon.services_role.items():
services.append(f"{name}:{access}")
return services

View File

@ -1,12 +1,10 @@
"""Init file for Supervisor Audio RESTful API."""
import asyncio
from collections.abc import Awaitable
from dataclasses import asdict
import logging
from typing import Any
from typing import Any, Awaitable, Dict
from aiohttp import web
import attr
import voluptuous as vol
from ..const import (
@ -20,6 +18,7 @@ from ..const import (
ATTR_HOST,
ATTR_INDEX,
ATTR_INPUT,
ATTR_VERSION_LATEST,
ATTR_MEMORY_LIMIT,
ATTR_MEMORY_PERCENT,
ATTR_MEMORY_USAGE,
@ -27,20 +26,18 @@ from ..const import (
ATTR_NETWORK_RX,
ATTR_NETWORK_TX,
ATTR_OUTPUT,
ATTR_UPDATE_AVAILABLE,
ATTR_VERSION,
ATTR_VERSION_LATEST,
ATTR_VOLUME,
CONTENT_TYPE_BINARY,
)
from ..coresys import CoreSysAttributes
from ..exceptions import APIError
from ..host.sound import StreamType
from ..validate import version_tag
from .utils import api_process, api_validate
from .utils import api_process, api_process_raw, api_validate
_LOGGER: logging.Logger = logging.getLogger(__name__)
SCHEMA_VERSION = vol.Schema({vol.Optional(ATTR_VERSION): version_tag})
SCHEMA_VERSION = vol.Schema({vol.Optional(ATTR_VERSION): vol.Coerce(str)})
SCHEMA_VOLUME = vol.Schema(
{
@ -57,10 +54,10 @@ SCHEMA_MUTE = vol.Schema(
}
)
SCHEMA_DEFAULT = vol.Schema({vol.Required(ATTR_NAME): str})
SCHEMA_DEFAULT = vol.Schema({vol.Required(ATTR_NAME): vol.Coerce(str)})
SCHEMA_PROFILE = vol.Schema(
{vol.Required(ATTR_CARD): str, vol.Required(ATTR_NAME): str}
{vol.Required(ATTR_CARD): vol.Coerce(str), vol.Required(ATTR_NAME): vol.Coerce(str)}
)
@ -68,25 +65,28 @@ class APIAudio(CoreSysAttributes):
"""Handle RESTful API for Audio functions."""
@api_process
async def info(self, request: web.Request) -> dict[str, Any]:
async def info(self, request: web.Request) -> Dict[str, Any]:
"""Return Audio information."""
return {
ATTR_VERSION: self.sys_plugins.audio.version,
ATTR_VERSION_LATEST: self.sys_plugins.audio.latest_version,
ATTR_UPDATE_AVAILABLE: self.sys_plugins.audio.need_update,
ATTR_HOST: str(self.sys_docker.network.audio),
ATTR_AUDIO: {
ATTR_CARD: [asdict(card) for card in self.sys_host.sound.cards],
ATTR_INPUT: [asdict(stream) for stream in self.sys_host.sound.inputs],
ATTR_OUTPUT: [asdict(stream) for stream in self.sys_host.sound.outputs],
ATTR_CARD: [attr.asdict(card) for card in self.sys_host.sound.cards],
ATTR_INPUT: [
attr.asdict(stream) for stream in self.sys_host.sound.inputs
],
ATTR_OUTPUT: [
attr.asdict(stream) for stream in self.sys_host.sound.outputs
],
ATTR_APPLICATION: [
asdict(stream) for stream in self.sys_host.sound.applications
attr.asdict(stream) for stream in self.sys_host.sound.applications
],
},
}
@api_process
async def stats(self, request: web.Request) -> dict[str, Any]:
async def stats(self, request: web.Request) -> Dict[str, Any]:
"""Return resource information."""
stats = await self.sys_plugins.audio.stats()
@ -108,9 +108,14 @@ class APIAudio(CoreSysAttributes):
version = body.get(ATTR_VERSION, self.sys_plugins.audio.latest_version)
if version == self.sys_plugins.audio.version:
raise APIError(f"Version {version} is already in use")
raise APIError("Version {} is already in use".format(version))
await asyncio.shield(self.sys_plugins.audio.update(version))
@api_process_raw(CONTENT_TYPE_BINARY)
def logs(self, request: web.Request) -> Awaitable[bytes]:
"""Return Audio Docker logs."""
return self.sys_plugins.audio.logs()
@api_process
def restart(self, request: web.Request) -> Awaitable[None]:
"""Restart Audio plugin."""
@ -124,7 +129,7 @@ class APIAudio(CoreSysAttributes):
@api_process
async def set_volume(self, request: web.Request) -> None:
"""Set audio volume on stream."""
source: StreamType = StreamType(request.match_info["source"])
source: StreamType = StreamType(request.match_info.get("source"))
application: bool = request.path.endswith("application")
body = await api_validate(SCHEMA_VOLUME, request)
@ -137,7 +142,7 @@ class APIAudio(CoreSysAttributes):
@api_process
async def set_mute(self, request: web.Request) -> None:
"""Mute audio volume on stream."""
source: StreamType = StreamType(request.match_info["source"])
source: StreamType = StreamType(request.match_info.get("source"))
application: bool = request.path.endswith("application")
body = await api_validate(SCHEMA_MUTE, request)
@ -150,7 +155,7 @@ class APIAudio(CoreSysAttributes):
@api_process
async def set_default(self, request: web.Request) -> None:
"""Set audio default stream."""
source: StreamType = StreamType(request.match_info["source"])
source: StreamType = StreamType(request.match_info.get("source"))
body = await api_validate(SCHEMA_DEFAULT, request)
await asyncio.shield(self.sys_host.sound.set_default(source, body[ATTR_NAME]))

View File

@ -1,50 +1,39 @@
"""Init file for Supervisor auth/SSO RESTful API."""
import asyncio
from collections.abc import Awaitable
import logging
from typing import Any, cast
from typing import Dict
from aiohttp import BasicAuth, web
from aiohttp.hdrs import AUTHORIZATION, CONTENT_TYPE, WWW_AUTHENTICATE
from aiohttp.web import FileField
from aiohttp.web_exceptions import HTTPUnauthorized
from multidict import MultiDictProxy
import voluptuous as vol
from ..addons.addon import Addon
from ..const import ATTR_NAME, ATTR_PASSWORD, ATTR_USERNAME, REQUEST_FROM
from ..coresys import CoreSysAttributes
from ..exceptions import APIForbidden
from .const import (
ATTR_GROUP_IDS,
ATTR_IS_ACTIVE,
ATTR_IS_OWNER,
ATTR_LOCAL_ONLY,
ATTR_USERS,
from ..const import (
ATTR_PASSWORD,
ATTR_USERNAME,
CONTENT_TYPE_JSON,
CONTENT_TYPE_URL,
REQUEST_FROM,
)
from .utils import api_process, api_validate, json_loads
from ..coresys import CoreSysAttributes
from ..exceptions import APIForbidden
from .utils import api_process, api_validate
_LOGGER: logging.Logger = logging.getLogger(__name__)
SCHEMA_PASSWORD_RESET = vol.Schema(
{
vol.Required(ATTR_USERNAME): str,
vol.Required(ATTR_PASSWORD): str,
vol.Required(ATTR_USERNAME): vol.Coerce(str),
vol.Required(ATTR_PASSWORD): vol.Coerce(str),
}
)
REALM_HEADER: dict[str, str] = {
WWW_AUTHENTICATE: 'Basic realm="Home Assistant Authentication"'
}
class APIAuth(CoreSysAttributes):
"""Handle RESTful API for auth functions."""
def _process_basic(self, request: web.Request, addon: Addon) -> Awaitable[bool]:
def _process_basic(self, request: web.Request, addon: Addon) -> bool:
"""Process login request with basic auth.
Return a coroutine.
@ -53,11 +42,8 @@ class APIAuth(CoreSysAttributes):
return self.sys_auth.check_login(addon, auth.login, auth.password)
def _process_dict(
self,
request: web.Request,
addon: Addon,
data: dict[str, Any] | MultiDictProxy[str | bytes | FileField],
) -> Awaitable[bool]:
self, request: web.Request, addon: Addon, data: Dict[str, str]
) -> bool:
"""Process login with dict data.
Return a coroutine.
@ -65,74 +51,38 @@ class APIAuth(CoreSysAttributes):
username = data.get("username") or data.get("user")
password = data.get("password")
# Test that we did receive strings and not something else, raise if so
try:
_ = username.encode and password.encode # type: ignore
except AttributeError:
raise HTTPUnauthorized(headers=REALM_HEADER) from None
return self.sys_auth.check_login(
addon, cast(str, username), cast(str, password)
)
return self.sys_auth.check_login(addon, username, password)
@api_process
async def auth(self, request: web.Request) -> bool:
"""Process login request."""
addon = request[REQUEST_FROM]
if not isinstance(addon, Addon) or not addon.access_auth_api:
if not addon.access_auth_api:
raise APIForbidden("Can't use Home Assistant auth!")
# BasicAuth
if AUTHORIZATION in request.headers:
if not await self._process_basic(request, addon):
raise HTTPUnauthorized(headers=REALM_HEADER)
return True
return await self._process_basic(request, addon)
# Json
if request.headers.get(CONTENT_TYPE) == CONTENT_TYPE_JSON:
data = await request.json(loads=json_loads)
if not await self._process_dict(request, addon, data):
raise HTTPUnauthorized()
return True
data = await request.json()
return await self._process_dict(request, addon, data)
# URL encoded
if request.headers.get(CONTENT_TYPE) == CONTENT_TYPE_URL:
data = await request.post()
if not await self._process_dict(request, addon, data):
raise HTTPUnauthorized()
return True
return await self._process_dict(request, addon, data)
# Advertise Basic authentication by default
raise HTTPUnauthorized(headers=REALM_HEADER)
raise HTTPUnauthorized(
headers={WWW_AUTHENTICATE: 'Basic realm="Home Assistant Authentication"'}
)
@api_process
async def reset(self, request: web.Request) -> None:
"""Process reset password request."""
body: dict[str, str] = await api_validate(SCHEMA_PASSWORD_RESET, request)
body: Dict[str, str] = await api_validate(SCHEMA_PASSWORD_RESET, request)
await asyncio.shield(
self.sys_auth.change_password(body[ATTR_USERNAME], body[ATTR_PASSWORD])
)
@api_process
async def cache(self, request: web.Request) -> None:
"""Process cache reset request."""
await self.sys_auth.reset_data()
@api_process
async def list_users(self, request: web.Request) -> dict[str, list[dict[str, Any]]]:
"""List users on the Home Assistant instance."""
return {
ATTR_USERS: [
{
ATTR_USERNAME: user[ATTR_USERNAME],
ATTR_NAME: user[ATTR_NAME],
ATTR_IS_OWNER: user[ATTR_IS_OWNER],
ATTR_IS_ACTIVE: user[ATTR_IS_ACTIVE],
ATTR_LOCAL_ONLY: user[ATTR_LOCAL_ONLY],
ATTR_GROUP_IDS: user[ATTR_GROUP_IDS],
}
for user in await self.sys_auth.list_users()
if user[ATTR_USERNAME]
]
}

View File

@ -1,579 +0,0 @@
"""Backups RESTful API."""
from __future__ import annotations
import asyncio
from collections.abc import Callable
import errno
from io import IOBase
import logging
from pathlib import Path
import re
from tempfile import TemporaryDirectory
from typing import Any, cast
from aiohttp import BodyPartReader, web
from aiohttp.hdrs import CONTENT_DISPOSITION
import voluptuous as vol
from voluptuous.humanize import humanize_error
from ..backups.backup import Backup
from ..backups.const import LOCATION_CLOUD_BACKUP, LOCATION_TYPE
from ..backups.validate import ALL_FOLDERS, FOLDER_HOMEASSISTANT, days_until_stale
from ..const import (
ATTR_ADDONS,
ATTR_BACKUPS,
ATTR_COMPRESSED,
ATTR_CONTENT,
ATTR_DATE,
ATTR_DAYS_UNTIL_STALE,
ATTR_EXTRA,
ATTR_FILENAME,
ATTR_FOLDERS,
ATTR_HOMEASSISTANT,
ATTR_HOMEASSISTANT_EXCLUDE_DATABASE,
ATTR_JOB_ID,
ATTR_LOCATION,
ATTR_NAME,
ATTR_PASSWORD,
ATTR_PROTECTED,
ATTR_REPOSITORIES,
ATTR_SIZE,
ATTR_SIZE_BYTES,
ATTR_SLUG,
ATTR_SUPERVISOR_VERSION,
ATTR_TIMEOUT,
ATTR_TYPE,
ATTR_VERSION,
REQUEST_FROM,
BusEvent,
CoreState,
)
from ..coresys import CoreSysAttributes
from ..exceptions import APIError, APIForbidden, APINotFound
from ..jobs import JobSchedulerOptions, SupervisorJob
from ..mounts.const import MountUsage
from ..resolution.const import UnhealthyReason
from .const import (
ATTR_ADDITIONAL_LOCATIONS,
ATTR_BACKGROUND,
ATTR_LOCATION_ATTRIBUTES,
ATTR_LOCATIONS,
CONTENT_TYPE_TAR,
)
from .utils import api_process, api_validate
_LOGGER: logging.Logger = logging.getLogger(__name__)
ALL_ADDONS_FLAG = "ALL"
LOCATION_LOCAL = ".local"
RE_SLUGIFY_NAME = re.compile(r"[^A-Za-z0-9]+")
RE_BACKUP_FILENAME = re.compile(r"^[^\\\/]+\.tar$")
# Backwards compatible
# Remove: 2022.08
_ALL_FOLDERS = ALL_FOLDERS + [FOLDER_HOMEASSISTANT]
def _ensure_list(item: Any) -> list:
"""Ensure value is a list."""
if not isinstance(item, list):
return [item]
return item
def _convert_local_location(item: str | None) -> str | None:
"""Convert local location value."""
if item in {LOCATION_LOCAL, ""}:
return None
return item
# pylint: disable=no-value-for-parameter
SCHEMA_FOLDERS = vol.All([vol.In(_ALL_FOLDERS)], vol.Unique())
SCHEMA_LOCATION = vol.All(vol.Maybe(str), _convert_local_location)
SCHEMA_LOCATION_LIST = vol.All(_ensure_list, [SCHEMA_LOCATION], vol.Unique())
SCHEMA_RESTORE_FULL = vol.Schema(
{
vol.Optional(ATTR_PASSWORD): vol.Maybe(str),
vol.Optional(ATTR_BACKGROUND, default=False): vol.Boolean(),
vol.Optional(ATTR_LOCATION): SCHEMA_LOCATION,
}
)
SCHEMA_RESTORE_PARTIAL = SCHEMA_RESTORE_FULL.extend(
{
vol.Optional(ATTR_HOMEASSISTANT): vol.Boolean(),
vol.Optional(ATTR_ADDONS): vol.All([str], vol.Unique()),
vol.Optional(ATTR_FOLDERS): SCHEMA_FOLDERS,
}
)
SCHEMA_BACKUP_FULL = vol.Schema(
{
vol.Optional(ATTR_NAME): str,
vol.Optional(ATTR_FILENAME): vol.Match(RE_BACKUP_FILENAME),
vol.Optional(ATTR_PASSWORD): vol.Maybe(str),
vol.Optional(ATTR_COMPRESSED): vol.Maybe(vol.Boolean()),
vol.Optional(ATTR_LOCATION): SCHEMA_LOCATION_LIST,
vol.Optional(ATTR_HOMEASSISTANT_EXCLUDE_DATABASE): vol.Boolean(),
vol.Optional(ATTR_BACKGROUND, default=False): vol.Boolean(),
vol.Optional(ATTR_EXTRA): dict,
}
)
SCHEMA_BACKUP_PARTIAL = SCHEMA_BACKUP_FULL.extend(
{
vol.Optional(ATTR_ADDONS): vol.Or(
ALL_ADDONS_FLAG, vol.All([str], vol.Unique())
),
vol.Optional(ATTR_FOLDERS): SCHEMA_FOLDERS,
vol.Optional(ATTR_HOMEASSISTANT): vol.Boolean(),
}
)
SCHEMA_OPTIONS = vol.Schema({vol.Optional(ATTR_DAYS_UNTIL_STALE): days_until_stale})
SCHEMA_FREEZE = vol.Schema({vol.Optional(ATTR_TIMEOUT): vol.All(int, vol.Range(min=1))})
SCHEMA_REMOVE = vol.Schema({vol.Optional(ATTR_LOCATION): SCHEMA_LOCATION_LIST})
class APIBackups(CoreSysAttributes):
"""Handle RESTful API for backups functions."""
def _extract_slug(self, request):
"""Return backup, throw an exception if it doesn't exist."""
backup = self.sys_backups.get(request.match_info.get("slug"))
if not backup:
raise APINotFound("Backup does not exist")
return backup
def _make_location_attributes(self, backup: Backup) -> dict[str, dict[str, Any]]:
"""Make location attributes dictionary."""
return {
loc if loc else LOCATION_LOCAL: {
ATTR_PROTECTED: backup.all_locations[loc].protected,
ATTR_SIZE_BYTES: backup.all_locations[loc].size_bytes,
}
for loc in backup.locations
}
def _list_backups(self):
"""Return list of backups."""
return [
{
ATTR_SLUG: backup.slug,
ATTR_NAME: backup.name,
ATTR_DATE: backup.date,
ATTR_TYPE: backup.sys_type,
ATTR_SIZE: backup.size,
ATTR_SIZE_BYTES: backup.size_bytes,
ATTR_LOCATION: backup.location,
ATTR_LOCATIONS: backup.locations,
ATTR_PROTECTED: backup.protected,
ATTR_LOCATION_ATTRIBUTES: self._make_location_attributes(backup),
ATTR_COMPRESSED: backup.compressed,
ATTR_CONTENT: {
ATTR_HOMEASSISTANT: backup.homeassistant_version is not None,
ATTR_ADDONS: backup.addon_list,
ATTR_FOLDERS: backup.folders,
},
}
for backup in self.sys_backups.list_backups
if backup.location != LOCATION_CLOUD_BACKUP
]
@api_process
async def list_backups(self, request):
"""Return backup list."""
data_backups = self._list_backups()
if request.path == "/snapshots":
# Kept for backwards compability
return {"snapshots": data_backups}
return {ATTR_BACKUPS: data_backups}
@api_process
async def info(self, request):
"""Return backup list and manager info."""
return {
ATTR_BACKUPS: self._list_backups(),
ATTR_DAYS_UNTIL_STALE: self.sys_backups.days_until_stale,
}
@api_process
async def options(self, request):
"""Set backup manager options."""
body = await api_validate(SCHEMA_OPTIONS, request)
if ATTR_DAYS_UNTIL_STALE in body:
self.sys_backups.days_until_stale = body[ATTR_DAYS_UNTIL_STALE]
await self.sys_backups.save_data()
@api_process
async def reload(self, _):
"""Reload backup list."""
await asyncio.shield(self.sys_backups.reload())
return True
@api_process
async def backup_info(self, request):
"""Return backup info."""
backup = self._extract_slug(request)
data_addons = []
for addon_data in backup.addons:
data_addons.append(
{
ATTR_SLUG: addon_data[ATTR_SLUG],
ATTR_NAME: addon_data[ATTR_NAME],
ATTR_VERSION: addon_data[ATTR_VERSION],
ATTR_SIZE: addon_data[ATTR_SIZE],
}
)
return {
ATTR_SLUG: backup.slug,
ATTR_TYPE: backup.sys_type,
ATTR_NAME: backup.name,
ATTR_DATE: backup.date,
ATTR_SIZE: backup.size,
ATTR_SIZE_BYTES: backup.size_bytes,
ATTR_COMPRESSED: backup.compressed,
ATTR_PROTECTED: backup.protected,
ATTR_LOCATION_ATTRIBUTES: self._make_location_attributes(backup),
ATTR_SUPERVISOR_VERSION: backup.supervisor_version,
ATTR_HOMEASSISTANT: backup.homeassistant_version,
ATTR_LOCATION: backup.location,
ATTR_LOCATIONS: backup.locations,
ATTR_ADDONS: data_addons,
ATTR_REPOSITORIES: backup.repositories,
ATTR_FOLDERS: backup.folders,
ATTR_HOMEASSISTANT_EXCLUDE_DATABASE: backup.homeassistant_exclude_database,
ATTR_EXTRA: backup.extra,
}
def _location_to_mount(self, location: str | None) -> LOCATION_TYPE:
"""Convert a single location to a mount if possible."""
if not location or location == LOCATION_CLOUD_BACKUP:
return cast(LOCATION_TYPE, location)
mount = self.sys_mounts.get(location)
if mount.usage != MountUsage.BACKUP:
raise APIError(
f"Mount {mount.name} is not used for backups, cannot backup to there"
)
return mount
def _location_field_to_mount(self, body: dict[str, Any]) -> dict[str, Any]:
"""Change location field to mount if necessary."""
body[ATTR_LOCATION] = self._location_to_mount(body.get(ATTR_LOCATION))
return body
def _validate_cloud_backup_location(
self, request: web.Request, location: list[str | None] | str | None
) -> None:
"""Cloud backup location is only available to Home Assistant."""
if not isinstance(location, list):
location = [location]
if (
LOCATION_CLOUD_BACKUP in location
and request.get(REQUEST_FROM) != self.sys_homeassistant
):
raise APIForbidden(
f"Location {LOCATION_CLOUD_BACKUP} is only available for Home Assistant"
)
async def _background_backup_task(
self, backup_method: Callable, *args, **kwargs
) -> tuple[asyncio.Task, str]:
"""Start backup task in background and return task and job ID."""
event = asyncio.Event()
job, backup_task = cast(
tuple[SupervisorJob, asyncio.Task],
self.sys_jobs.schedule_job(
backup_method, JobSchedulerOptions(), *args, **kwargs
),
)
async def release_on_freeze(new_state: CoreState):
if new_state == CoreState.FREEZE:
event.set()
# Wait for system to get into freeze state before returning
# If the backup fails validation it will raise before getting there
listener = self.sys_bus.register_event(
BusEvent.SUPERVISOR_STATE_CHANGE, release_on_freeze
)
try:
event_task = self.sys_create_task(event.wait())
_, pending = await asyncio.wait(
(backup_task, event_task),
return_when=asyncio.FIRST_COMPLETED,
)
# It seems backup returned early (error or something), make sure to cancel
# the event task to avoid "Task was destroyed but it is pending!" errors.
if event_task in pending:
event_task.cancel()
return (backup_task, job.uuid)
finally:
self.sys_bus.remove_listener(listener)
@api_process
async def backup_full(self, request: web.Request):
"""Create full backup."""
body = await api_validate(SCHEMA_BACKUP_FULL, request)
locations: list[LOCATION_TYPE] | None = None
if ATTR_LOCATION in body:
location_names: list[str | None] = body.pop(ATTR_LOCATION)
self._validate_cloud_backup_location(request, location_names)
locations = [
self._location_to_mount(location) for location in location_names
]
body[ATTR_LOCATION] = locations.pop(0)
if locations:
body[ATTR_ADDITIONAL_LOCATIONS] = locations
background = body.pop(ATTR_BACKGROUND)
backup_task, job_id = await self._background_backup_task(
self.sys_backups.do_backup_full, **body
)
if background and not backup_task.done():
return {ATTR_JOB_ID: job_id}
backup: Backup = await backup_task
if backup:
return {ATTR_JOB_ID: job_id, ATTR_SLUG: backup.slug}
raise APIError(
f"An error occurred while making backup, check job '{job_id}' or supervisor logs for details",
job_id=job_id,
)
@api_process
async def backup_partial(self, request: web.Request):
"""Create a partial backup."""
body = await api_validate(SCHEMA_BACKUP_PARTIAL, request)
locations: list[LOCATION_TYPE] | None = None
if ATTR_LOCATION in body:
location_names: list[str | None] = body.pop(ATTR_LOCATION)
self._validate_cloud_backup_location(request, location_names)
locations = [
self._location_to_mount(location) for location in location_names
]
body[ATTR_LOCATION] = locations.pop(0)
if locations:
body[ATTR_ADDITIONAL_LOCATIONS] = locations
if body.get(ATTR_ADDONS) == ALL_ADDONS_FLAG:
body[ATTR_ADDONS] = list(self.sys_addons.local)
background = body.pop(ATTR_BACKGROUND)
backup_task, job_id = await self._background_backup_task(
self.sys_backups.do_backup_partial, **body
)
if background and not backup_task.done():
return {ATTR_JOB_ID: job_id}
backup: Backup = await backup_task
if backup:
return {ATTR_JOB_ID: job_id, ATTR_SLUG: backup.slug}
raise APIError(
f"An error occurred while making backup, check job '{job_id}' or supervisor logs for details",
job_id=job_id,
)
@api_process
async def restore_full(self, request: web.Request):
"""Full restore of a backup."""
backup = self._extract_slug(request)
body = await api_validate(SCHEMA_RESTORE_FULL, request)
self._validate_cloud_backup_location(
request, body.get(ATTR_LOCATION, backup.location)
)
background = body.pop(ATTR_BACKGROUND)
restore_task, job_id = await self._background_backup_task(
self.sys_backups.do_restore_full, backup, **body
)
if background and not restore_task.done() or await restore_task:
return {ATTR_JOB_ID: job_id}
raise APIError(
f"An error occurred during restore of {backup.slug}, check job '{job_id}' or supervisor logs for details",
job_id=job_id,
)
@api_process
async def restore_partial(self, request: web.Request):
"""Partial restore a backup."""
backup = self._extract_slug(request)
body = await api_validate(SCHEMA_RESTORE_PARTIAL, request)
self._validate_cloud_backup_location(
request, body.get(ATTR_LOCATION, backup.location)
)
background = body.pop(ATTR_BACKGROUND)
restore_task, job_id = await self._background_backup_task(
self.sys_backups.do_restore_partial, backup, **body
)
if background and not restore_task.done() or await restore_task:
return {ATTR_JOB_ID: job_id}
raise APIError(
f"An error occurred during restore of {backup.slug}, check job '{job_id}' or supervisor logs for details",
job_id=job_id,
)
@api_process
async def freeze(self, request: web.Request):
"""Initiate manual freeze for external backup."""
body = await api_validate(SCHEMA_FREEZE, request)
await asyncio.shield(self.sys_backups.freeze_all(**body))
@api_process
async def thaw(self, request: web.Request):
"""Begin thaw after manual freeze."""
await self.sys_backups.thaw_all()
@api_process
async def remove(self, request: web.Request):
"""Remove a backup."""
backup = self._extract_slug(request)
body = await api_validate(SCHEMA_REMOVE, request)
locations: list[LOCATION_TYPE] | None = None
if ATTR_LOCATION in body:
self._validate_cloud_backup_location(request, body[ATTR_LOCATION])
locations = [self._location_to_mount(name) for name in body[ATTR_LOCATION]]
else:
self._validate_cloud_backup_location(request, backup.location)
await self.sys_backups.remove(backup, locations=locations)
@api_process
async def download(self, request: web.Request):
"""Download a backup file."""
backup = self._extract_slug(request)
# Query will give us '' for /backups, convert value to None
location = _convert_local_location(
request.query.get(ATTR_LOCATION, backup.location)
)
self._validate_cloud_backup_location(request, location)
if location not in backup.all_locations:
raise APIError(f"Backup {backup.slug} is not in location {location}")
_LOGGER.info("Downloading backup %s", backup.slug)
filename = backup.all_locations[location].path
# If the file is missing, return 404 and trigger reload of location
if not await self.sys_run_in_executor(filename.is_file):
self.sys_create_task(self.sys_backups.reload(location))
return web.Response(status=404)
response = web.FileResponse(filename)
response.content_type = CONTENT_TYPE_TAR
download_filename = filename.name
if download_filename == f"{backup.slug}.tar":
download_filename = f"{RE_SLUGIFY_NAME.sub('_', backup.name)}.tar"
response.headers[CONTENT_DISPOSITION] = (
f"attachment; filename={download_filename}"
)
return response
@api_process
async def upload(self, request: web.Request):
"""Upload a backup file."""
location: LOCATION_TYPE = None
locations: list[LOCATION_TYPE] | None = None
if ATTR_LOCATION in request.query:
location_names: list[str] = request.query.getall(ATTR_LOCATION, [])
self._validate_cloud_backup_location(
request, cast(list[str | None], location_names)
)
# Convert empty string to None if necessary
locations = [
self._location_to_mount(location)
if _convert_local_location(location)
else None
for location in location_names
]
location = locations.pop(0)
filename: str | None = None
if ATTR_FILENAME in request.query:
filename = request.query.get(ATTR_FILENAME)
try:
vol.Match(RE_BACKUP_FILENAME)(filename)
except vol.Invalid as ex:
raise APIError(humanize_error(filename, ex)) from None
tmp_path = await self.sys_backups.get_upload_path_for_location(location)
temp_dir: TemporaryDirectory | None = None
backup_file_stream: IOBase | None = None
def open_backup_file() -> Path:
nonlocal temp_dir, backup_file_stream
temp_dir = TemporaryDirectory(dir=tmp_path.as_posix())
tar_file = Path(temp_dir.name, "upload.tar")
backup_file_stream = tar_file.open("wb")
return tar_file
def close_backup_file() -> None:
if backup_file_stream:
# Make sure it got closed, in case of exception. It is safe to
# close the file stream twice.
backup_file_stream.close()
if temp_dir:
temp_dir.cleanup()
try:
reader = await request.multipart()
contents = await reader.next()
if not isinstance(contents, BodyPartReader):
raise APIError("Improperly formatted upload, could not read backup")
tar_file = await self.sys_run_in_executor(open_backup_file)
while chunk := await contents.read_chunk(size=2**16):
await self.sys_run_in_executor(
cast(IOBase, backup_file_stream).write, chunk
)
await self.sys_run_in_executor(cast(IOBase, backup_file_stream).close)
backup = await asyncio.shield(
self.sys_backups.import_backup(
tar_file,
filename,
location=location,
additional_locations=locations,
)
)
except OSError as err:
if err.errno == errno.EBADMSG and location in {
LOCATION_CLOUD_BACKUP,
None,
}:
self.sys_resolution.add_unhealthy_reason(
UnhealthyReason.OSERROR_BAD_MESSAGE
)
_LOGGER.error("Can't write new backup file: %s", err)
return False
except asyncio.CancelledError:
return False
finally:
await self.sys_run_in_executor(close_backup_file)
if backup:
return {ATTR_SLUG: backup.slug}
return False

View File

@ -1,13 +1,14 @@
"""Init file for Supervisor HA cli RESTful API."""
import asyncio
import logging
from typing import Any
from typing import Any, Dict
from aiohttp import web
import voluptuous as vol
from ..const import (
ATTR_VERSION,
ATTR_VERSION_LATEST,
ATTR_BLK_READ,
ATTR_BLK_WRITE,
ATTR_CPU_PERCENT,
@ -16,33 +17,28 @@ from ..const import (
ATTR_MEMORY_USAGE,
ATTR_NETWORK_RX,
ATTR_NETWORK_TX,
ATTR_UPDATE_AVAILABLE,
ATTR_VERSION,
ATTR_VERSION_LATEST,
)
from ..coresys import CoreSysAttributes
from ..validate import version_tag
from .utils import api_process, api_validate
_LOGGER: logging.Logger = logging.getLogger(__name__)
SCHEMA_VERSION = vol.Schema({vol.Optional(ATTR_VERSION): version_tag})
SCHEMA_VERSION = vol.Schema({vol.Optional(ATTR_VERSION): vol.Coerce(str)})
class APICli(CoreSysAttributes):
"""Handle RESTful API for HA Cli functions."""
@api_process
async def info(self, request: web.Request) -> dict[str, Any]:
async def info(self, request: web.Request) -> Dict[str, Any]:
"""Return HA cli information."""
return {
ATTR_VERSION: self.sys_plugins.cli.version,
ATTR_VERSION_LATEST: self.sys_plugins.cli.latest_version,
ATTR_UPDATE_AVAILABLE: self.sys_plugins.cli.need_update,
}
@api_process
async def stats(self, request: web.Request) -> dict[str, Any]:
async def stats(self, request: web.Request) -> Dict[str, Any]:
"""Return resource information."""
stats = await self.sys_plugins.cli.stats()

View File

@ -1,90 +0,0 @@
"""Const for API."""
from enum import StrEnum
CONTENT_TYPE_BINARY = "application/octet-stream"
CONTENT_TYPE_JSON = "application/json"
CONTENT_TYPE_PNG = "image/png"
CONTENT_TYPE_TAR = "application/tar"
CONTENT_TYPE_TEXT = "text/plain"
CONTENT_TYPE_URL = "application/x-www-form-urlencoded"
CONTENT_TYPE_X_LOG = "text/x-log"
COOKIE_INGRESS = "ingress_session"
ATTR_ADDITIONAL_LOCATIONS = "additional_locations"
ATTR_AGENT_VERSION = "agent_version"
ATTR_APPARMOR_VERSION = "apparmor_version"
ATTR_ATTRIBUTES = "attributes"
ATTR_AVAILABLE_UPDATES = "available_updates"
ATTR_BACKGROUND = "background"
ATTR_BOOT_CONFIG = "boot_config"
ATTR_BOOT_SLOT = "boot_slot"
ATTR_BOOT_SLOTS = "boot_slots"
ATTR_BOOT_TIMESTAMP = "boot_timestamp"
ATTR_BOOTS = "boots"
ATTR_BROADCAST_LLMNR = "broadcast_llmnr"
ATTR_BROADCAST_MDNS = "broadcast_mdns"
ATTR_BY_ID = "by_id"
ATTR_CHILDREN = "children"
ATTR_CONNECTION_BUS = "connection_bus"
ATTR_DATA_DISK = "data_disk"
ATTR_DEVICE = "device"
ATTR_DEV_PATH = "dev_path"
ATTR_DISKS = "disks"
ATTR_DRIVES = "drives"
ATTR_DT_SYNCHRONIZED = "dt_synchronized"
ATTR_DT_UTC = "dt_utc"
ATTR_EJECTABLE = "ejectable"
ATTR_FALLBACK = "fallback"
ATTR_FILESYSTEMS = "filesystems"
ATTR_FORCE = "force"
ATTR_GROUP_IDS = "group_ids"
ATTR_IDENTIFIERS = "identifiers"
ATTR_IS_ACTIVE = "is_active"
ATTR_IS_OWNER = "is_owner"
ATTR_JOBS = "jobs"
ATTR_LLMNR = "llmnr"
ATTR_LLMNR_HOSTNAME = "llmnr_hostname"
ATTR_LOCAL_ONLY = "local_only"
ATTR_LOCATION_ATTRIBUTES = "location_attributes"
ATTR_LOCATIONS = "locations"
ATTR_MDNS = "mdns"
ATTR_MODEL = "model"
ATTR_MOUNTS = "mounts"
ATTR_MOUNT_POINTS = "mount_points"
ATTR_PANEL_PATH = "panel_path"
ATTR_REMOVABLE = "removable"
ATTR_REMOVE_CONFIG = "remove_config"
ATTR_REVISION = "revision"
ATTR_SAFE_MODE = "safe_mode"
ATTR_SEAT = "seat"
ATTR_SIGNED = "signed"
ATTR_STARTUP_TIME = "startup_time"
ATTR_STATUS = "status"
ATTR_SUBSYSTEM = "subsystem"
ATTR_SYSFS = "sysfs"
ATTR_SYSTEM_HEALTH_LED = "system_health_led"
ATTR_TIME_DETECTED = "time_detected"
ATTR_UPDATE_TYPE = "update_type"
ATTR_USAGE = "usage"
ATTR_USE_NTP = "use_ntp"
ATTR_USERS = "users"
ATTR_USER_PATH = "user_path"
ATTR_VENDOR = "vendor"
ATTR_VIRTUALIZATION = "virtualization"
class BootSlot(StrEnum):
"""Boot slots used by HAOS."""
A = "A"
B = "B"
class DetectBlockingIO(StrEnum):
"""Enable/Disable detection for blocking I/O in event loop."""
OFF = "off"
ON = "on"
ON_AT_STARTUP = "on-at-startup"

View File

@ -1,33 +1,24 @@
"""Init file for Supervisor network RESTful API."""
import logging
from typing import Any
from aiohttp import web
import voluptuous as vol
from ..addons.addon import Addon
from .utils import api_process, api_validate
from ..const import (
ATTR_ADDON,
ATTR_UUID,
ATTR_CONFIG,
ATTR_DISCOVERY,
ATTR_SERVICE,
ATTR_SERVICES,
ATTR_UUID,
REQUEST_FROM,
AddonState,
)
from ..coresys import CoreSysAttributes
from ..discovery import Message
from ..exceptions import APIForbidden, APINotFound
from .utils import api_process, api_validate, require_home_assistant
from ..exceptions import APIError, APIForbidden
from ..discovery.validate import valid_discovery_service
_LOGGER: logging.Logger = logging.getLogger(__name__)
SCHEMA_DISCOVERY = vol.Schema(
{
vol.Required(ATTR_SERVICE): str,
vol.Required(ATTR_CONFIG): dict,
vol.Required(ATTR_SERVICE): valid_discovery_service,
vol.Optional(ATTR_CONFIG): vol.Maybe(dict),
}
)
@ -35,71 +26,59 @@ SCHEMA_DISCOVERY = vol.Schema(
class APIDiscovery(CoreSysAttributes):
"""Handle RESTful API for discovery functions."""
def _extract_message(self, request: web.Request) -> Message:
def _extract_message(self, request):
"""Extract discovery message from URL."""
message = self.sys_discovery.get(request.match_info["uuid"])
message = self.sys_discovery.get(request.match_info.get("uuid"))
if not message:
raise APINotFound("Discovery message not found")
raise APIError("Discovery message not found")
return message
def _check_permission_ha(self, request):
"""Check permission for API call / Home Assistant."""
if request[REQUEST_FROM] != self.sys_homeassistant:
raise APIForbidden("Only HomeAssistant can use this API!")
@api_process
@require_home_assistant
async def list_discovery(self, request: web.Request) -> dict[str, Any]:
"""Show registered and available services."""
# Get available discovery
discovery = [
{
ATTR_ADDON: message.addon,
ATTR_SERVICE: message.service,
ATTR_UUID: message.uuid,
ATTR_CONFIG: message.config,
}
for message in self.sys_discovery.list_messages
if (
discovered := self.sys_addons.get_local_only(
message.addon,
)
async def list(self, request):
"""Show register services."""
self._check_permission_ha(request)
discovery = []
for message in self.sys_discovery.list_messages:
discovery.append(
{
ATTR_ADDON: message.addon,
ATTR_SERVICE: message.service,
ATTR_UUID: message.uuid,
ATTR_CONFIG: message.config,
}
)
and discovered.state == AddonState.STARTED
]
# Get available services/add-ons
services: dict[str, list[str]] = {}
for addon in self.sys_addons.all:
for name in addon.discovery:
services.setdefault(name, []).append(addon.slug)
return {ATTR_DISCOVERY: discovery, ATTR_SERVICES: services}
return {ATTR_DISCOVERY: discovery}
@api_process
async def set_discovery(self, request: web.Request) -> dict[str, str]:
async def set_discovery(self, request):
"""Write data into a discovery pipeline."""
body = await api_validate(SCHEMA_DISCOVERY, request)
addon: Addon = request[REQUEST_FROM]
service = body[ATTR_SERVICE]
addon = request[REQUEST_FROM]
# Access?
if body[ATTR_SERVICE] not in addon.discovery:
_LOGGER.error(
"Add-on %s attempted to send discovery for service %s which is not listed in its config. Please report this to the maintainer of the add-on",
addon.name,
service,
)
raise APIForbidden(
"Add-ons must list services they provide via discovery in their config!"
)
raise APIForbidden("Can't use discovery!")
# Process discovery message
message = await self.sys_discovery.send(addon, **body)
message = self.sys_discovery.send(addon, **body)
return {ATTR_UUID: message.uuid}
@api_process
@require_home_assistant
async def get_discovery(self, request: web.Request) -> dict[str, Any]:
async def get_discovery(self, request):
"""Read data into a discovery message."""
message = self._extract_message(request)
# HomeAssistant?
self._check_permission_ha(request)
return {
ATTR_ADDON: message.addon,
ATTR_SERVICE: message.service,
@ -108,7 +87,7 @@ class APIDiscovery(CoreSysAttributes):
}
@api_process
async def del_discovery(self, request: web.Request) -> None:
async def del_discovery(self, request):
"""Delete data into a discovery message."""
message = self._extract_message(request)
addon = request[REQUEST_FROM]
@ -117,4 +96,5 @@ class APIDiscovery(CoreSysAttributes):
if message.addon != addon.slug:
raise APIForbidden("Can't remove discovery message")
await self.sys_discovery.remove(message)
self.sys_discovery.remove(message)
return True

View File

@ -1,9 +1,7 @@
"""Init file for Supervisor DNS RESTful API."""
import asyncio
from collections.abc import Awaitable
import logging
from typing import Any
from typing import Any, Awaitable, Dict
from aiohttp import web
import voluptuous as vol
@ -13,6 +11,7 @@ from ..const import (
ATTR_BLK_WRITE,
ATTR_CPU_PERCENT,
ATTR_HOST,
ATTR_VERSION_LATEST,
ATTR_LOCALS,
ATTR_MEMORY_LIMIT,
ATTR_MEMORY_PERCENT,
@ -20,68 +19,49 @@ from ..const import (
ATTR_NETWORK_RX,
ATTR_NETWORK_TX,
ATTR_SERVERS,
ATTR_UPDATE_AVAILABLE,
ATTR_VERSION,
ATTR_VERSION_LATEST,
CONTENT_TYPE_BINARY,
)
from ..coresys import CoreSysAttributes
from ..exceptions import APIError
from ..validate import dns_server_list, version_tag
from .const import ATTR_FALLBACK, ATTR_LLMNR, ATTR_MDNS
from .utils import api_process, api_validate
from ..validate import dns_server_list
from .utils import api_process, api_process_raw, api_validate
_LOGGER: logging.Logger = logging.getLogger(__name__)
# pylint: disable=no-value-for-parameter
SCHEMA_OPTIONS = vol.Schema(
{
vol.Optional(ATTR_SERVERS): dns_server_list,
vol.Optional(ATTR_FALLBACK): vol.Boolean(),
}
)
SCHEMA_OPTIONS = vol.Schema({vol.Optional(ATTR_SERVERS): dns_server_list})
SCHEMA_VERSION = vol.Schema({vol.Optional(ATTR_VERSION): version_tag})
SCHEMA_VERSION = vol.Schema({vol.Optional(ATTR_VERSION): vol.Coerce(str)})
class APICoreDNS(CoreSysAttributes):
"""Handle RESTful API for DNS functions."""
@api_process
async def info(self, request: web.Request) -> dict[str, Any]:
async def info(self, request: web.Request) -> Dict[str, Any]:
"""Return DNS information."""
return {
ATTR_VERSION: self.sys_plugins.dns.version,
ATTR_VERSION_LATEST: self.sys_plugins.dns.latest_version,
ATTR_UPDATE_AVAILABLE: self.sys_plugins.dns.need_update,
ATTR_HOST: str(self.sys_docker.network.dns),
ATTR_SERVERS: self.sys_plugins.dns.servers,
ATTR_LOCALS: self.sys_plugins.dns.locals,
ATTR_MDNS: self.sys_plugins.dns.mdns,
ATTR_LLMNR: self.sys_plugins.dns.llmnr,
ATTR_FALLBACK: self.sys_plugins.dns.fallback,
ATTR_LOCALS: self.sys_host.network.dns_servers,
}
@api_process
async def options(self, request: web.Request) -> None:
"""Set DNS options."""
body = await api_validate(SCHEMA_OPTIONS, request)
restart_required = False
if ATTR_SERVERS in body:
self.sys_plugins.dns.servers = body[ATTR_SERVERS]
restart_required = True
if ATTR_FALLBACK in body:
self.sys_plugins.dns.fallback = body[ATTR_FALLBACK]
restart_required = True
if restart_required:
self.sys_create_task(self.sys_plugins.dns.restart())
await self.sys_plugins.dns.save_data()
self.sys_plugins.dns.save_data()
@api_process
async def stats(self, request: web.Request) -> dict[str, Any]:
async def stats(self, request: web.Request) -> Dict[str, Any]:
"""Return resource information."""
stats = await self.sys_plugins.dns.stats()
@ -103,9 +83,14 @@ class APICoreDNS(CoreSysAttributes):
version = body.get(ATTR_VERSION, self.sys_plugins.dns.latest_version)
if version == self.sys_plugins.dns.version:
raise APIError(f"Version {version} is already in use")
raise APIError("Version {} is already in use".format(version))
await asyncio.shield(self.sys_plugins.dns.update(version))
@api_process_raw(CONTENT_TYPE_BINARY)
def logs(self, request: web.Request) -> Awaitable[bytes]:
"""Return DNS Docker logs."""
return self.sys_plugins.dns.logs()
@api_process
def restart(self, request: web.Request) -> Awaitable[None]:
"""Restart CoreDNS plugin."""

View File

@ -1,107 +0,0 @@
"""Init file for Supervisor Home Assistant RESTful API."""
import logging
from typing import Any
from aiohttp import web
import voluptuous as vol
from supervisor.resolution.const import ContextType, IssueType, SuggestionType
from ..const import (
ATTR_ENABLE_IPV6,
ATTR_HOSTNAME,
ATTR_LOGGING,
ATTR_PASSWORD,
ATTR_REGISTRIES,
ATTR_STORAGE,
ATTR_USERNAME,
ATTR_VERSION,
)
from ..coresys import CoreSysAttributes
from ..exceptions import APINotFound
from .utils import api_process, api_validate
_LOGGER: logging.Logger = logging.getLogger(__name__)
SCHEMA_DOCKER_REGISTRY = vol.Schema(
{
str: {
vol.Required(ATTR_USERNAME): str,
vol.Required(ATTR_PASSWORD): str,
}
}
)
# pylint: disable=no-value-for-parameter
SCHEMA_OPTIONS = vol.Schema({vol.Optional(ATTR_ENABLE_IPV6): vol.Maybe(vol.Boolean())})
class APIDocker(CoreSysAttributes):
"""Handle RESTful API for Docker configuration."""
@api_process
async def info(self, request: web.Request):
"""Get docker info."""
data_registries = {}
for hostname, registry in self.sys_docker.config.registries.items():
data_registries[hostname] = {
ATTR_USERNAME: registry[ATTR_USERNAME],
}
return {
ATTR_VERSION: self.sys_docker.info.version,
ATTR_ENABLE_IPV6: self.sys_docker.config.enable_ipv6,
ATTR_STORAGE: self.sys_docker.info.storage,
ATTR_LOGGING: self.sys_docker.info.logging,
ATTR_REGISTRIES: data_registries,
}
@api_process
async def options(self, request: web.Request) -> None:
"""Set docker options."""
body = await api_validate(SCHEMA_OPTIONS, request)
if (
ATTR_ENABLE_IPV6 in body
and self.sys_docker.config.enable_ipv6 != body[ATTR_ENABLE_IPV6]
):
self.sys_docker.config.enable_ipv6 = body[ATTR_ENABLE_IPV6]
_LOGGER.info("Host system reboot required to apply new IPv6 configuration")
self.sys_resolution.create_issue(
IssueType.REBOOT_REQUIRED,
ContextType.SYSTEM,
suggestions=[SuggestionType.EXECUTE_REBOOT],
)
await self.sys_docker.config.save_data()
@api_process
async def registries(self, request) -> dict[str, Any]:
"""Return the list of registries."""
data_registries = {}
for hostname, registry in self.sys_docker.config.registries.items():
data_registries[hostname] = {
ATTR_USERNAME: registry[ATTR_USERNAME],
}
return {ATTR_REGISTRIES: data_registries}
@api_process
async def create_registry(self, request: web.Request):
"""Create a new docker registry."""
body = await api_validate(SCHEMA_DOCKER_REGISTRY, request)
for hostname, registry in body.items():
self.sys_docker.config.registries[hostname] = registry
await self.sys_docker.config.save_data()
@api_process
async def remove_registry(self, request: web.Request):
"""Delete a docker registry."""
hostname = request.match_info.get(ATTR_HOSTNAME)
if hostname not in self.sys_docker.config.registries:
raise APINotFound(f"Hostname {hostname} does not exist in registries")
del self.sys_docker.config.registries[hostname]
await self.sys_docker.config.save_data()

View File

@ -1,121 +1,42 @@
"""Init file for Supervisor hardware RESTful API."""
import asyncio
import logging
from typing import Any
from typing import Any, Dict
from aiohttp import web
from .utils import api_process
from ..const import (
ATTR_AUDIO,
ATTR_DEVICES,
ATTR_ID,
ATTR_INPUT,
ATTR_NAME,
ATTR_OUTPUT,
ATTR_SERIAL,
ATTR_SIZE,
ATTR_SYSTEM,
ATTR_DISK,
ATTR_GPIO,
ATTR_AUDIO,
ATTR_INPUT,
ATTR_OUTPUT,
)
from ..coresys import CoreSysAttributes
from ..dbus.udisks2 import UDisks2Manager
from ..dbus.udisks2.block import UDisks2Block
from ..dbus.udisks2.drive import UDisks2Drive
from ..hardware.data import Device
from .const import (
ATTR_ATTRIBUTES,
ATTR_BY_ID,
ATTR_CHILDREN,
ATTR_CONNECTION_BUS,
ATTR_DEV_PATH,
ATTR_DEVICE,
ATTR_DRIVES,
ATTR_EJECTABLE,
ATTR_FILESYSTEMS,
ATTR_MODEL,
ATTR_MOUNT_POINTS,
ATTR_REMOVABLE,
ATTR_REVISION,
ATTR_SEAT,
ATTR_SUBSYSTEM,
ATTR_SYSFS,
ATTR_TIME_DETECTED,
ATTR_VENDOR,
)
from .utils import api_process
_LOGGER: logging.Logger = logging.getLogger(__name__)
def device_struct(device: Device) -> dict[str, Any]:
"""Return a dict with information of a interface to be used in the API."""
return {
ATTR_NAME: device.name,
ATTR_SYSFS: device.sysfs,
ATTR_DEV_PATH: device.path,
ATTR_SUBSYSTEM: device.subsystem,
ATTR_BY_ID: device.by_id,
ATTR_ATTRIBUTES: device.attributes,
ATTR_CHILDREN: device.children,
}
def filesystem_struct(fs_block: UDisks2Block) -> dict[str, Any]:
"""Return a dict with information of a filesystem block device to be used in the API."""
return {
ATTR_DEVICE: str(fs_block.device),
ATTR_ID: fs_block.id,
ATTR_SIZE: fs_block.size,
ATTR_NAME: fs_block.id_label,
ATTR_SYSTEM: fs_block.hint_system,
ATTR_MOUNT_POINTS: [
str(mount_point)
for mount_point in (
fs_block.filesystem.mount_points if fs_block.filesystem else []
)
],
}
def drive_struct(udisks2: UDisks2Manager, drive: UDisks2Drive) -> dict[str, Any]:
"""Return a dict with information of a disk to be used in the API."""
return {
ATTR_VENDOR: drive.vendor,
ATTR_MODEL: drive.model,
ATTR_REVISION: drive.revision,
ATTR_SERIAL: drive.serial,
ATTR_ID: drive.id,
ATTR_SIZE: drive.size,
ATTR_TIME_DETECTED: drive.time_detected.isoformat(),
ATTR_CONNECTION_BUS: drive.connection_bus,
ATTR_SEAT: drive.seat,
ATTR_REMOVABLE: drive.removable,
ATTR_EJECTABLE: drive.ejectable,
ATTR_FILESYSTEMS: [
filesystem_struct(block)
for block in udisks2.block_devices
if block.filesystem and block.drive == drive.object_path
],
}
class APIHardware(CoreSysAttributes):
"""Handle RESTful API for hardware functions."""
@api_process
async def info(self, request: web.Request) -> dict[str, Any]:
async def info(self, request: web.Request) -> Dict[str, Any]:
"""Show hardware info."""
return {
ATTR_DEVICES: [
device_struct(device) for device in self.sys_hardware.devices
],
ATTR_DRIVES: [
drive_struct(self.sys_dbus.udisks2, drive)
for drive in self.sys_dbus.udisks2.drives
],
ATTR_SERIAL: list(
self.sys_hardware.serial_devices | self.sys_hardware.serial_by_id
),
ATTR_INPUT: list(self.sys_hardware.input_devices),
ATTR_DISK: list(self.sys_hardware.disk_devices),
ATTR_GPIO: list(self.sys_hardware.gpio_devices),
ATTR_AUDIO: self.sys_hardware.audio_devices,
}
@api_process
async def audio(self, request: web.Request) -> dict[str, Any]:
async def audio(self, request: web.Request) -> Dict[str, Any]:
"""Show pulse audio profiles."""
return {
ATTR_AUDIO: {
@ -129,3 +50,8 @@ class APIHardware(CoreSysAttributes):
},
}
}
@api_process
def trigger(self, request: web.Request) -> None:
"""Trigger a udev device reload."""
return asyncio.shield(self.sys_hardware.udev_trigger())

View File

@ -1,9 +1,7 @@
"""Init file for Supervisor Home Assistant RESTful API."""
import asyncio
from collections.abc import Awaitable
import logging
from typing import Any
from typing import Any, Coroutine, Dict
from aiohttp import web
import voluptuous as vol
@ -12,8 +10,6 @@ from ..const import (
ATTR_ARCH,
ATTR_AUDIO_INPUT,
ATTR_AUDIO_OUTPUT,
ATTR_BACKUP,
ATTR_BACKUPS_EXCLUDE_DATABASE,
ATTR_BLK_READ,
ATTR_BLK_WRITE,
ATTR_BOOT,
@ -29,16 +25,16 @@ from ..const import (
ATTR_PORT,
ATTR_REFRESH_TOKEN,
ATTR_SSL,
ATTR_UPDATE_AVAILABLE,
ATTR_VERSION,
ATTR_VERSION_LATEST,
ATTR_WAIT_BOOT,
ATTR_WATCHDOG,
CONTENT_TYPE_BINARY,
)
from ..coresys import CoreSysAttributes
from ..exceptions import APIDBMigrationInProgress, APIError
from ..validate import docker_image, network_port, version_tag
from .const import ATTR_FORCE, ATTR_SAFE_MODE
from .utils import api_process, api_validate
from ..exceptions import APIError
from ..validate import docker_image, network_port
from .utils import api_process, api_process_raw, api_validate
_LOGGER: logging.Logger = logging.getLogger(__name__)
@ -46,59 +42,29 @@ _LOGGER: logging.Logger = logging.getLogger(__name__)
SCHEMA_OPTIONS = vol.Schema(
{
vol.Optional(ATTR_BOOT): vol.Boolean(),
vol.Optional(ATTR_IMAGE): vol.Maybe(docker_image),
vol.Optional(ATTR_IMAGE): docker_image,
vol.Optional(ATTR_PORT): network_port,
vol.Optional(ATTR_SSL): vol.Boolean(),
vol.Optional(ATTR_WATCHDOG): vol.Boolean(),
vol.Optional(ATTR_REFRESH_TOKEN): vol.Maybe(str),
vol.Optional(ATTR_AUDIO_OUTPUT): vol.Maybe(str),
vol.Optional(ATTR_AUDIO_INPUT): vol.Maybe(str),
vol.Optional(ATTR_BACKUPS_EXCLUDE_DATABASE): vol.Boolean(),
vol.Optional(ATTR_WAIT_BOOT): vol.All(vol.Coerce(int), vol.Range(min=60)),
vol.Optional(ATTR_REFRESH_TOKEN): vol.Maybe(vol.Coerce(str)),
vol.Optional(ATTR_AUDIO_OUTPUT): vol.Maybe(vol.Coerce(str)),
vol.Optional(ATTR_AUDIO_INPUT): vol.Maybe(vol.Coerce(str)),
}
)
SCHEMA_UPDATE = vol.Schema(
{
vol.Optional(ATTR_VERSION): version_tag,
vol.Optional(ATTR_BACKUP): bool,
}
)
SCHEMA_RESTART = vol.Schema(
{
vol.Optional(ATTR_SAFE_MODE, default=False): vol.Boolean(),
vol.Optional(ATTR_FORCE, default=False): vol.Boolean(),
}
)
SCHEMA_STOP = vol.Schema(
{
vol.Optional(ATTR_FORCE, default=False): vol.Boolean(),
}
)
SCHEMA_VERSION = vol.Schema({vol.Optional(ATTR_VERSION): vol.Coerce(str)})
class APIHomeAssistant(CoreSysAttributes):
"""Handle RESTful API for Home Assistant functions."""
async def _check_offline_migration(self, force: bool = False) -> None:
"""Check and raise if there's an offline DB migration in progress."""
if (
not force
and (state := await self.sys_homeassistant.api.get_api_state())
and state.offline_db_migration
):
raise APIDBMigrationInProgress(
"Offline database migration in progress, try again after it has completed"
)
@api_process
async def info(self, request: web.Request) -> dict[str, Any]:
async def info(self, request: web.Request) -> Dict[str, Any]:
"""Return host information."""
return {
ATTR_VERSION: self.sys_homeassistant.version,
ATTR_VERSION_LATEST: self.sys_homeassistant.latest_version,
ATTR_UPDATE_AVAILABLE: self.sys_homeassistant.need_update,
ATTR_MACHINE: self.sys_homeassistant.machine,
ATTR_IP_ADDRESS: str(self.sys_homeassistant.ip_address),
ATTR_ARCH: self.sys_homeassistant.arch,
@ -107,9 +73,11 @@ class APIHomeAssistant(CoreSysAttributes):
ATTR_PORT: self.sys_homeassistant.api_port,
ATTR_SSL: self.sys_homeassistant.api_ssl,
ATTR_WATCHDOG: self.sys_homeassistant.watchdog,
ATTR_WAIT_BOOT: self.sys_homeassistant.wait_boot,
ATTR_AUDIO_INPUT: self.sys_homeassistant.audio_input,
ATTR_AUDIO_OUTPUT: self.sys_homeassistant.audio_output,
ATTR_BACKUPS_EXCLUDE_DATABASE: self.sys_homeassistant.backups_exclude_database,
# Remove end of Q3 2020
"last_version": self.sys_homeassistant.latest_version,
}
@api_process
@ -118,10 +86,7 @@ class APIHomeAssistant(CoreSysAttributes):
body = await api_validate(SCHEMA_OPTIONS, request)
if ATTR_IMAGE in body:
self.sys_homeassistant.set_image(body[ATTR_IMAGE])
self.sys_homeassistant.override_image = (
self.sys_homeassistant.image != self.sys_homeassistant.default_image
)
self.sys_homeassistant.image = body[ATTR_IMAGE]
if ATTR_BOOT in body:
self.sys_homeassistant.boot = body[ATTR_BOOT]
@ -135,6 +100,9 @@ class APIHomeAssistant(CoreSysAttributes):
if ATTR_WATCHDOG in body:
self.sys_homeassistant.watchdog = body[ATTR_WATCHDOG]
if ATTR_WAIT_BOOT in body:
self.sys_homeassistant.wait_boot = body[ATTR_WAIT_BOOT]
if ATTR_REFRESH_TOKEN in body:
self.sys_homeassistant.refresh_token = body[ATTR_REFRESH_TOKEN]
@ -144,17 +112,12 @@ class APIHomeAssistant(CoreSysAttributes):
if ATTR_AUDIO_OUTPUT in body:
self.sys_homeassistant.audio_output = body[ATTR_AUDIO_OUTPUT]
if ATTR_BACKUPS_EXCLUDE_DATABASE in body:
self.sys_homeassistant.backups_exclude_database = body[
ATTR_BACKUPS_EXCLUDE_DATABASE
]
await self.sys_homeassistant.save_data()
self.sys_homeassistant.save_data()
@api_process
async def stats(self, request: web.Request) -> dict[Any, str]:
async def stats(self, request: web.Request) -> Dict[Any, str]:
"""Return resource information."""
stats = await self.sys_homeassistant.core.stats()
stats = await self.sys_homeassistant.stats()
if not stats:
raise APIError("No stats available")
@ -172,52 +135,39 @@ class APIHomeAssistant(CoreSysAttributes):
@api_process
async def update(self, request: web.Request) -> None:
"""Update Home Assistant."""
body = await api_validate(SCHEMA_UPDATE, request)
await self._check_offline_migration()
body = await api_validate(SCHEMA_VERSION, request)
version = body.get(ATTR_VERSION, self.sys_homeassistant.latest_version)
await asyncio.shield(
self.sys_homeassistant.core.update(
version=body.get(ATTR_VERSION, self.sys_homeassistant.latest_version),
backup=body.get(ATTR_BACKUP),
)
)
await asyncio.shield(self.sys_homeassistant.update(version))
@api_process
async def stop(self, request: web.Request) -> Awaitable[None]:
def stop(self, request: web.Request) -> Coroutine:
"""Stop Home Assistant."""
body = await api_validate(SCHEMA_STOP, request)
await self._check_offline_migration(force=body[ATTR_FORCE])
return await asyncio.shield(self.sys_homeassistant.core.stop())
return asyncio.shield(self.sys_homeassistant.stop())
@api_process
def start(self, request: web.Request) -> Awaitable[None]:
def start(self, request: web.Request) -> Coroutine:
"""Start Home Assistant."""
return asyncio.shield(self.sys_homeassistant.core.start())
return asyncio.shield(self.sys_homeassistant.start())
@api_process
async def restart(self, request: web.Request) -> None:
def restart(self, request: web.Request) -> Coroutine:
"""Restart Home Assistant."""
body = await api_validate(SCHEMA_RESTART, request)
await self._check_offline_migration(force=body[ATTR_FORCE])
await asyncio.shield(
self.sys_homeassistant.core.restart(safe_mode=body[ATTR_SAFE_MODE])
)
return asyncio.shield(self.sys_homeassistant.restart())
@api_process
async def rebuild(self, request: web.Request) -> None:
def rebuild(self, request: web.Request) -> Coroutine:
"""Rebuild Home Assistant."""
body = await api_validate(SCHEMA_RESTART, request)
await self._check_offline_migration(force=body[ATTR_FORCE])
return asyncio.shield(self.sys_homeassistant.rebuild())
await asyncio.shield(
self.sys_homeassistant.core.rebuild(safe_mode=body[ATTR_SAFE_MODE])
)
@api_process_raw(CONTENT_TYPE_BINARY)
def logs(self, request: web.Request) -> Coroutine:
"""Return Home Assistant Docker logs."""
return self.sys_homeassistant.logs()
@api_process
async def check(self, request: web.Request) -> None:
"""Check configuration of Home Assistant."""
result = await self.sys_homeassistant.core.check_config()
result = await self.sys_homeassistant.check_config()
if not result.valid:
raise APIError(result.log)

View File

@ -1,24 +1,16 @@
"""Init file for Supervisor host RESTful API."""
import asyncio
from contextlib import suppress
import logging
from typing import Any
from typing import Awaitable
from aiohttp import ClientConnectionResetError, ClientPayloadError, web
from aiohttp.hdrs import ACCEPT, RANGE
from aiohttp import web
import voluptuous as vol
from voluptuous.error import CoerceInvalid
from ..const import (
ATTR_CHASSIS,
ATTR_CPE,
ATTR_DEPLOYMENT,
ATTR_DESCRIPTON,
ATTR_DISK_FREE,
ATTR_DISK_LIFE_TIME,
ATTR_DISK_TOTAL,
ATTR_DISK_USED,
ATTR_FEATURES,
ATTR_HOSTNAME,
ATTR_KERNEL,
@ -26,97 +18,32 @@ from ..const import (
ATTR_OPERATING_SYSTEM,
ATTR_SERVICES,
ATTR_STATE,
ATTR_TIMEZONE,
CONTENT_TYPE_BINARY,
)
from ..coresys import CoreSysAttributes
from ..exceptions import APIDBMigrationInProgress, APIError, HostLogError
from ..host.const import (
PARAM_BOOT_ID,
PARAM_FOLLOW,
PARAM_SYSLOG_IDENTIFIER,
LogFormat,
LogFormatter,
)
from ..host.logs import SYSTEMD_JOURNAL_GATEWAYD_LINES_MAX
from ..utils.systemd_journal import journal_logs_reader
from .const import (
ATTR_AGENT_VERSION,
ATTR_APPARMOR_VERSION,
ATTR_BOOT_TIMESTAMP,
ATTR_BOOTS,
ATTR_BROADCAST_LLMNR,
ATTR_BROADCAST_MDNS,
ATTR_DT_SYNCHRONIZED,
ATTR_DT_UTC,
ATTR_FORCE,
ATTR_IDENTIFIERS,
ATTR_LLMNR_HOSTNAME,
ATTR_STARTUP_TIME,
ATTR_USE_NTP,
ATTR_VIRTUALIZATION,
CONTENT_TYPE_TEXT,
CONTENT_TYPE_X_LOG,
)
from .utils import api_process, api_process_raw, api_validate
_LOGGER: logging.Logger = logging.getLogger(__name__)
IDENTIFIER = "identifier"
BOOTID = "bootid"
DEFAULT_LINES = 100
SERVICE = "service"
SCHEMA_OPTIONS = vol.Schema({vol.Optional(ATTR_HOSTNAME): str})
# pylint: disable=no-value-for-parameter
SCHEMA_SHUTDOWN = vol.Schema(
{
vol.Optional(ATTR_FORCE, default=False): vol.Boolean(),
}
)
# pylint: enable=no-value-for-parameter
SCHEMA_OPTIONS = vol.Schema({vol.Optional(ATTR_HOSTNAME): vol.Coerce(str)})
class APIHost(CoreSysAttributes):
"""Handle RESTful API for host functions."""
async def _check_ha_offline_migration(self, force: bool) -> None:
"""Check if HA has an offline migration in progress and raise if not forced."""
if (
not force
and (state := await self.sys_homeassistant.api.get_api_state())
and state.offline_db_migration
):
raise APIDBMigrationInProgress(
"Home Assistant offline database migration in progress, please wait until complete before shutting down host"
)
@api_process
async def info(self, request):
"""Return host information."""
return {
ATTR_AGENT_VERSION: self.sys_dbus.agent.version,
ATTR_APPARMOR_VERSION: self.sys_host.apparmor.version,
ATTR_CHASSIS: self.sys_host.info.chassis,
ATTR_VIRTUALIZATION: self.sys_host.info.virtualization,
ATTR_CPE: self.sys_host.info.cpe,
ATTR_DEPLOYMENT: self.sys_host.info.deployment,
ATTR_DISK_FREE: await self.sys_host.info.free_space(),
ATTR_DISK_TOTAL: await self.sys_host.info.total_space(),
ATTR_DISK_USED: await self.sys_host.info.used_space(),
ATTR_DISK_LIFE_TIME: await self.sys_host.info.disk_life_time(),
ATTR_FEATURES: self.sys_host.features,
ATTR_FEATURES: self.sys_host.supperted_features,
ATTR_HOSTNAME: self.sys_host.info.hostname,
ATTR_LLMNR_HOSTNAME: self.sys_host.info.llmnr_hostname,
ATTR_KERNEL: self.sys_host.info.kernel,
ATTR_OPERATING_SYSTEM: self.sys_host.info.operating_system,
ATTR_TIMEZONE: self.sys_host.info.timezone,
ATTR_DT_UTC: self.sys_host.info.dt_utc,
ATTR_DT_SYNCHRONIZED: self.sys_host.info.dt_synchronized,
ATTR_USE_NTP: self.sys_host.info.use_ntp,
ATTR_STARTUP_TIME: self.sys_host.info.startup_time,
ATTR_BOOT_TIMESTAMP: self.sys_host.info.boot_timestamp,
ATTR_BROADCAST_LLMNR: self.sys_host.info.broadcast_llmnr,
ATTR_BROADCAST_MDNS: self.sys_host.info.broadcast_mdns,
ATTR_DEPLOYMENT: self.sys_host.info.deployment,
ATTR_KERNEL: self.sys_host.info.kernel,
}
@api_process
@ -131,20 +58,14 @@ class APIHost(CoreSysAttributes):
)
@api_process
async def reboot(self, request):
def reboot(self, request):
"""Reboot host."""
body = await api_validate(SCHEMA_SHUTDOWN, request)
await self._check_ha_offline_migration(force=body[ATTR_FORCE])
return await asyncio.shield(self.sys_host.control.reboot())
return asyncio.shield(self.sys_host.control.reboot())
@api_process
async def shutdown(self, request):
def shutdown(self, request):
"""Poweroff host."""
body = await api_validate(SCHEMA_SHUTDOWN, request)
await self._check_ha_offline_migration(force=body[ATTR_FORCE])
return await asyncio.shield(self.sys_host.control.shutdown())
return asyncio.shield(self.sys_host.control.shutdown())
@api_process
def reload(self, request):
@ -167,125 +88,30 @@ class APIHost(CoreSysAttributes):
return {ATTR_SERVICES: services}
@api_process
async def list_boots(self, _: web.Request):
"""Return a list of boot IDs."""
boot_ids = await self.sys_host.logs.get_boot_ids()
return {
ATTR_BOOTS: {
str(1 + i - len(boot_ids)): boot_id
for i, boot_id in enumerate(boot_ids)
}
}
def service_start(self, request):
"""Start a service."""
unit = request.match_info.get(SERVICE)
return asyncio.shield(self.sys_host.services.start(unit))
@api_process
async def list_identifiers(self, _: web.Request):
"""Return a list of syslog identifiers."""
return {ATTR_IDENTIFIERS: await self.sys_host.logs.get_identifiers()}
def service_stop(self, request):
"""Stop a service."""
unit = request.match_info.get(SERVICE)
return asyncio.shield(self.sys_host.services.stop(unit))
async def _get_boot_id(self, possible_offset: str) -> str:
"""Convert offset into boot ID if required."""
with suppress(CoerceInvalid):
offset = vol.Coerce(int)(possible_offset)
try:
return await self.sys_host.logs.get_boot_id(offset)
except (ValueError, HostLogError) as err:
raise APIError() from err
return possible_offset
@api_process
def service_reload(self, request):
"""Reload a service."""
unit = request.match_info.get(SERVICE)
return asyncio.shield(self.sys_host.services.reload(unit))
async def advanced_logs_handler(
self, request: web.Request, identifier: str | None = None, follow: bool = False
) -> web.StreamResponse:
"""Return systemd-journald logs."""
log_formatter = LogFormatter.PLAIN
params: dict[str, Any] = {}
if identifier:
params[PARAM_SYSLOG_IDENTIFIER] = identifier
elif IDENTIFIER in request.match_info:
params[PARAM_SYSLOG_IDENTIFIER] = request.match_info[IDENTIFIER]
else:
params[PARAM_SYSLOG_IDENTIFIER] = self.sys_host.logs.default_identifiers
# host logs should be always verbose, no matter what Accept header is used
log_formatter = LogFormatter.VERBOSE
@api_process
def service_restart(self, request):
"""Restart a service."""
unit = request.match_info.get(SERVICE)
return asyncio.shield(self.sys_host.services.restart(unit))
if BOOTID in request.match_info:
params[PARAM_BOOT_ID] = await self._get_boot_id(request.match_info[BOOTID])
if follow:
params[PARAM_FOLLOW] = ""
if ACCEPT in request.headers and request.headers[ACCEPT] not in [
CONTENT_TYPE_TEXT,
CONTENT_TYPE_X_LOG,
"*/*",
]:
raise APIError(
"Invalid content type requested. Only text/plain and text/x-log "
"supported for now."
)
if "verbose" in request.query or request.headers[ACCEPT] == CONTENT_TYPE_X_LOG:
log_formatter = LogFormatter.VERBOSE
if "lines" in request.query:
lines = request.query.get("lines", DEFAULT_LINES)
try:
lines = int(lines)
except ValueError:
# If the user passed a non-integer value, just use the default instead of error.
lines = DEFAULT_LINES
finally:
# We can't use the entries= Range header syntax to refer to the last 1 line,
# and passing 1 to the calculation below would return the 1st line of the logs
# instead. Since this is really an edge case that doesn't matter much, we'll just
# return 2 lines at minimum.
lines = max(2, lines)
# entries=cursor[[:num_skip]:num_entries]
range_header = f"entries=:-{lines - 1}:{SYSTEMD_JOURNAL_GATEWAYD_LINES_MAX if follow else lines}"
elif RANGE in request.headers:
range_header = request.headers[RANGE]
else:
range_header = f"entries=:-{DEFAULT_LINES - 1}:{SYSTEMD_JOURNAL_GATEWAYD_LINES_MAX if follow else DEFAULT_LINES}"
async with self.sys_host.logs.journald_logs(
params=params, range_header=range_header, accept=LogFormat.JOURNAL
) as resp:
try:
response = web.StreamResponse()
response.content_type = CONTENT_TYPE_TEXT
headers_returned = False
async for cursor, line in journal_logs_reader(resp, log_formatter):
try:
if not headers_returned:
if cursor:
response.headers["X-First-Cursor"] = cursor
response.headers["X-Accel-Buffering"] = "no"
await response.prepare(request)
headers_returned = True
await response.write(line.encode("utf-8") + b"\n")
except ClientConnectionResetError as err:
# When client closes the connection while reading busy logs, we
# sometimes get this exception. It should be safe to ignore it.
_LOGGER.debug(
"ClientConnectionResetError raised when returning journal logs: %s",
err,
)
break
except ConnectionError as err:
_LOGGER.warning(
"%s raised when returning journal logs: %s",
type(err).__name__,
err,
)
break
except (ConnectionResetError, ClientPayloadError) as ex:
# ClientPayloadError is most likely caused by the closing the connection
raise APIError(
"Connection reset when trying to fetch data from systemd-journald."
) from ex
return response
@api_process_raw(CONTENT_TYPE_TEXT, error_type=CONTENT_TYPE_TEXT)
async def advanced_logs(
self, request: web.Request, identifier: str | None = None, follow: bool = False
) -> web.StreamResponse:
"""Return systemd-journald logs. Wrapped as standard API handler."""
return await self.advanced_logs_handler(request, identifier, follow)
@api_process_raw(CONTENT_TYPE_BINARY)
def logs(self, request: web.Request) -> Awaitable[bytes]:
"""Return host kernel logs."""
return self.sys_host.info.get_dmesg()

44
supervisor/api/info.py Normal file
View File

@ -0,0 +1,44 @@
"""Init file for Supervisor info RESTful API."""
import logging
from typing import Any, Dict
from aiohttp import web
from ..const import (
ATTR_ARCH,
ATTR_CHANNEL,
ATTR_DOCKER,
ATTR_HASSOS,
ATTR_HOMEASSISTANT,
ATTR_HOSTNAME,
ATTR_LOGGING,
ATTR_MACHINE,
ATTR_SUPERVISOR,
ATTR_SUPPORTED_ARCH,
ATTR_TIMEZONE,
)
from ..coresys import CoreSysAttributes
from .utils import api_process
_LOGGER: logging.Logger = logging.getLogger(__name__)
class APIInfo(CoreSysAttributes):
"""Handle RESTful API for info functions."""
@api_process
async def info(self, request: web.Request) -> Dict[str, Any]:
"""Show system info."""
return {
ATTR_SUPERVISOR: self.sys_supervisor.version,
ATTR_HOMEASSISTANT: self.sys_homeassistant.version,
ATTR_HASSOS: self.sys_hassos.version,
ATTR_DOCKER: self.sys_docker.info.version,
ATTR_HOSTNAME: self.sys_host.info.hostname,
ATTR_MACHINE: self.sys_machine,
ATTR_ARCH: self.sys_arch.default,
ATTR_SUPPORTED_ARCH: self.sys_arch.supported,
ATTR_CHANNEL: self.sys_updater.channel,
ATTR_LOGGING: self.sys_config.logging,
ATTR_TIMEZONE: self.sys_timezone,
}

View File

@ -1,89 +1,43 @@
"""Supervisor Add-on ingress service."""
import asyncio
from ipaddress import ip_address
import logging
from typing import Any
from typing import Any, Dict, Union
import aiohttp
from aiohttp import ClientTimeout, hdrs, web
from aiohttp import hdrs, web
from aiohttp.web_exceptions import (
HTTPBadGateway,
HTTPServiceUnavailable,
HTTPUnauthorized,
)
from multidict import CIMultiDict, istr
import voluptuous as vol
from ..addons.addon import Addon
from ..const import (
ATTR_ADMIN,
ATTR_ENABLE,
ATTR_ICON,
ATTR_PANELS,
ATTR_SESSION,
ATTR_SESSION_DATA_USER_ID,
ATTR_TITLE,
HEADER_REMOTE_USER_DISPLAY_NAME,
HEADER_REMOTE_USER_ID,
HEADER_REMOTE_USER_NAME,
ATTR_PANELS,
ATTR_ENABLE,
COOKIE_INGRESS,
HEADER_TOKEN,
HEADER_TOKEN_OLD,
IngressSessionData,
IngressSessionDataUser,
REQUEST_FROM,
)
from ..coresys import CoreSysAttributes
from ..exceptions import HomeAssistantAPIError
from .const import COOKIE_INGRESS
from .utils import api_process, api_validate, require_home_assistant
from .utils import api_process
_LOGGER: logging.Logger = logging.getLogger(__name__)
VALIDATE_SESSION_DATA = vol.Schema({ATTR_SESSION: str})
"""Expected optional payload of create session request"""
SCHEMA_INGRESS_CREATE_SESSION_DATA = vol.Schema(
{
vol.Optional(ATTR_SESSION_DATA_USER_ID): str,
}
)
# from https://github.com/aio-libs/aiohttp/blob/8ae650bee4add9f131d49b96a0a150311ea58cd1/aiohttp/helpers.py#L1059C1-L1079C1
def must_be_empty_body(method: str, code: int) -> bool:
"""Check if a request must return an empty body."""
return (
status_code_must_be_empty_body(code)
or method_must_be_empty_body(method)
or (200 <= code < 300 and method.upper() == hdrs.METH_CONNECT)
)
def method_must_be_empty_body(method: str) -> bool:
"""Check if a method must return an empty body."""
# https://datatracker.ietf.org/doc/html/rfc9112#section-6.3-2.1
# https://datatracker.ietf.org/doc/html/rfc9112#section-6.3-2.2
return method.upper() == hdrs.METH_HEAD
def status_code_must_be_empty_body(code: int) -> bool:
"""Check if a status code must return an empty body."""
# https://datatracker.ietf.org/doc/html/rfc9112#section-6.3-2.1
return code in {204, 304} or 100 <= code < 200
class APIIngress(CoreSysAttributes):
"""Ingress view to handle add-on webui routing."""
_list_of_users: list[IngressSessionDataUser]
def __init__(self) -> None:
"""Initialize APIIngress."""
self._list_of_users = []
def _extract_addon(self, request: web.Request) -> Addon:
"""Return addon, throw an exception it it doesn't exist."""
token = request.match_info["token"]
token = request.match_info.get("token")
# Find correct add-on
addon = self.sys_ingress.get(token)
@ -93,12 +47,17 @@ class APIIngress(CoreSysAttributes):
return addon
def _check_ha_access(self, request: web.Request) -> None:
if request[REQUEST_FROM] != self.sys_homeassistant:
_LOGGER.warning("Ingress is only available behind Home Assistant")
raise HTTPUnauthorized()
def _create_url(self, addon: Addon, path: str) -> str:
"""Create URL to container."""
return f"http://{addon.ip_address}:{addon.ingress_port}/{path}"
@api_process
async def panels(self, request: web.Request) -> dict[str, Any]:
async def panels(self, request: web.Request) -> Dict[str, Any]:
"""Create a list of panel data."""
addons = {}
for addon in self.sys_ingress.addons:
@ -112,69 +71,43 @@ class APIIngress(CoreSysAttributes):
return {ATTR_PANELS: addons}
@api_process
@require_home_assistant
async def create_session(self, request: web.Request) -> dict[str, Any]:
async def create_session(self, request: web.Request) -> Dict[str, Any]:
"""Create a new session."""
schema_ingress_config_session_data = await api_validate(
SCHEMA_INGRESS_CREATE_SESSION_DATA, request
)
data: IngressSessionData | None = None
self._check_ha_access(request)
if ATTR_SESSION_DATA_USER_ID in schema_ingress_config_session_data:
user = await self._find_user_by_id(
schema_ingress_config_session_data[ATTR_SESSION_DATA_USER_ID]
)
if user:
data = IngressSessionData(user)
session = self.sys_ingress.create_session(data)
session = self.sys_ingress.create_session()
return {ATTR_SESSION: session}
@api_process
@require_home_assistant
async def validate_session(self, request: web.Request) -> None:
"""Validate session and extending how long it's valid for."""
data = await api_validate(VALIDATE_SESSION_DATA, request)
# Check Ingress Session
if not self.sys_ingress.validate_session(data[ATTR_SESSION]):
_LOGGER.warning("No valid ingress session %s", data[ATTR_SESSION])
raise HTTPUnauthorized()
async def handler(
self, request: web.Request
) -> web.Response | web.StreamResponse | web.WebSocketResponse:
) -> Union[web.Response, web.StreamResponse, web.WebSocketResponse]:
"""Route data to Supervisor ingress service."""
self._check_ha_access(request)
# Check Ingress Session
session = request.cookies.get(COOKIE_INGRESS, "")
session = request.cookies.get(COOKIE_INGRESS)
if not self.sys_ingress.validate_session(session):
_LOGGER.warning("No valid ingress session %s", session)
raise HTTPUnauthorized()
# Process requests
addon = self._extract_addon(request)
path = request.match_info.get("path", "")
session_data = self.sys_ingress.get_session_data(session)
path = request.match_info.get("path")
try:
# Websocket
if _is_websocket(request):
return await self._handle_websocket(request, addon, path, session_data)
return await self._handle_websocket(request, addon, path)
# Request
return await self._handle_request(request, addon, path, session_data)
return await self._handle_request(request, addon, path)
except aiohttp.ClientError as err:
_LOGGER.error("Ingress error: %s", err)
raise HTTPBadGateway()
raise HTTPBadGateway() from None
async def _handle_websocket(
self,
request: web.Request,
addon: Addon,
path: str,
session_data: IngressSessionData | None,
self, request: web.Request, addon: Addon, path: str
) -> web.WebSocketResponse:
"""Ingress route for websocket."""
if hdrs.SEC_WEBSOCKET_PROTOCOL in request.headers:
@ -183,7 +116,7 @@ class APIIngress(CoreSysAttributes):
for proto in request.headers[hdrs.SEC_WEBSOCKET_PROTOCOL].split(",")
]
else:
req_protocols = []
req_protocols = ()
ws_server = web.WebSocketResponse(
protocols=req_protocols, autoclose=False, autoping=False
@ -192,11 +125,11 @@ class APIIngress(CoreSysAttributes):
# Preparing
url = self._create_url(addon, path)
source_header = _init_header(request, addon, session_data)
source_header = _init_header(request, addon)
# Support GET query
if request.query_string:
url = f"{url}?{request.query_string}"
url = "{}?{}".format(url, request.query_string)
# Start proxy
async with self.sys_websession.ws_connect(
@ -209,8 +142,8 @@ class APIIngress(CoreSysAttributes):
# Proxy requests
await asyncio.wait(
[
self.sys_create_task(_websocket_forward(ws_server, ws_client)),
self.sys_create_task(_websocket_forward(ws_client, ws_server)),
_websocket_forward(ws_server, ws_client),
_websocket_forward(ws_client, ws_server),
],
return_when=asyncio.FIRST_COMPLETED,
)
@ -218,25 +151,12 @@ class APIIngress(CoreSysAttributes):
return ws_server
async def _handle_request(
self,
request: web.Request,
addon: Addon,
path: str,
session_data: IngressSessionData | None,
) -> web.Response | web.StreamResponse:
self, request: web.Request, addon: Addon, path: str
) -> Union[web.Response, web.StreamResponse]:
"""Ingress route for request."""
url = self._create_url(addon, path)
source_header = _init_header(request, addon, session_data)
# Passing the raw stream breaks requests for some webservers
# since we just need it for POST requests really, for all other methods
# we read the bytes and pass that to the request to the add-on
# add-ons needs to add support with that in the configuration
data = (
request.content
if request.method == "POST" and addon.ingress_stream
else await request.read()
)
data = await request.read()
source_header = _init_header(request, addon)
async with self.sys_websession.request(
request.method,
@ -245,22 +165,12 @@ class APIIngress(CoreSysAttributes):
params=request.query,
allow_redirects=False,
data=data,
timeout=ClientTimeout(total=None),
skip_auto_headers={hdrs.CONTENT_TYPE},
) as result:
headers = _response_header(result)
# Avoid parsing content_type in simple cases for better performance
if maybe_content_type := result.headers.get(hdrs.CONTENT_TYPE):
content_type = (maybe_content_type.partition(";"))[0].strip()
else:
content_type = result.content_type
# Simple request
if (
# empty body responses should not be streamed,
# otherwise aiohttp < 3.9.0 may generate
# an invalid "0\r\n\r\n" chunk instead of an empty response.
must_be_empty_body(request.method, result.status)
or hdrs.CONTENT_LENGTH in result.headers
hdrs.CONTENT_LENGTH in result.headers
and int(result.headers.get(hdrs.CONTENT_LENGTH, 0)) < 4_194_000
):
# Return Response
@ -268,89 +178,57 @@ class APIIngress(CoreSysAttributes):
return web.Response(
headers=headers,
status=result.status,
content_type=content_type,
content_type=result.content_type,
body=body,
)
# Stream response
response = web.StreamResponse(status=result.status, headers=headers)
response.content_type = content_type
response.content_type = result.content_type
try:
response.headers["X-Accel-Buffering"] = "no"
await response.prepare(request)
async for data, _ in result.content.iter_chunks():
async for data in result.content.iter_chunked(4096):
await response.write(data)
except (
aiohttp.ClientError,
aiohttp.ClientPayloadError,
ConnectionResetError,
) as err:
except (aiohttp.ClientError, aiohttp.ClientPayloadError) as err:
_LOGGER.error("Stream error with %s: %s", url, err)
return response
async def _find_user_by_id(self, user_id: str) -> IngressSessionDataUser | None:
"""Find user object by the user's ID."""
try:
list_of_users = await self.sys_homeassistant.get_users()
except (HomeAssistantAPIError, TypeError) as err:
_LOGGER.error(
"%s error occurred while requesting list of users: %s", type(err), err
)
return None
if list_of_users is not None:
self._list_of_users = list_of_users
return next((user for user in self._list_of_users if user.id == user_id), None)
def _init_header(
request: web.Request, addon: Addon, session_data: IngressSessionData | None
) -> CIMultiDict[str]:
request: web.Request, addon: str
) -> Union[CIMultiDict, Dict[str, str]]:
"""Create initial header."""
headers = CIMultiDict[str]()
if session_data is not None:
headers[HEADER_REMOTE_USER_ID] = session_data.user.id
if session_data.user.username is not None:
headers[HEADER_REMOTE_USER_NAME] = session_data.user.username
if session_data.user.display_name is not None:
headers[HEADER_REMOTE_USER_DISPLAY_NAME] = session_data.user.display_name
headers = {}
# filter flags
for name, value in request.headers.items():
if name in (
hdrs.CONTENT_LENGTH,
hdrs.CONTENT_ENCODING,
hdrs.TRANSFER_ENCODING,
hdrs.SEC_WEBSOCKET_EXTENSIONS,
hdrs.SEC_WEBSOCKET_PROTOCOL,
hdrs.SEC_WEBSOCKET_VERSION,
hdrs.SEC_WEBSOCKET_KEY,
istr(HEADER_TOKEN),
istr(HEADER_TOKEN_OLD),
istr(HEADER_REMOTE_USER_ID),
istr(HEADER_REMOTE_USER_NAME),
istr(HEADER_REMOTE_USER_DISPLAY_NAME),
):
continue
headers.add(name, value)
headers[name] = value
# Update X-Forwarded-For
if request.transport:
forward_for = request.headers.get(hdrs.X_FORWARDED_FOR)
connected_ip = ip_address(request.transport.get_extra_info("peername")[0])
headers[hdrs.X_FORWARDED_FOR] = f"{forward_for}, {connected_ip!s}"
forward_for = request.headers.get(hdrs.X_FORWARDED_FOR)
connected_ip = ip_address(request.transport.get_extra_info("peername")[0])
headers[hdrs.X_FORWARDED_FOR] = f"{forward_for}, {connected_ip!s}"
return headers
def _response_header(response: aiohttp.ClientResponse) -> CIMultiDict[str]:
def _response_header(response: aiohttp.ClientResponse) -> Dict[str, str]:
"""Create response header."""
headers = CIMultiDict[str]()
headers = {}
for name, value in response.headers.items():
if name in (
@ -360,7 +238,7 @@ def _response_header(response: aiohttp.ClientResponse) -> CIMultiDict[str]:
hdrs.CONTENT_ENCODING,
):
continue
headers.add(name, value)
headers[name] = value
return headers

View File

@ -1,121 +0,0 @@
"""Init file for Supervisor Jobs RESTful API."""
import logging
from typing import Any
from aiohttp import web
import voluptuous as vol
from ..coresys import CoreSysAttributes
from ..exceptions import APIError, APINotFound, JobNotFound
from ..jobs import SupervisorJob
from ..jobs.const import ATTR_IGNORE_CONDITIONS, JobCondition
from .const import ATTR_JOBS
from .utils import api_process, api_validate
_LOGGER: logging.Logger = logging.getLogger(__name__)
SCHEMA_OPTIONS = vol.Schema(
{vol.Optional(ATTR_IGNORE_CONDITIONS): [vol.Coerce(JobCondition)]}
)
class APIJobs(CoreSysAttributes):
"""Handle RESTful API for OS functions."""
def _extract_job(self, request: web.Request) -> SupervisorJob:
"""Extract job from request or raise."""
try:
return self.sys_jobs.get_job(request.match_info["uuid"])
except JobNotFound:
raise APINotFound("Job does not exist") from None
def _list_jobs(self, start: SupervisorJob | None = None) -> list[dict[str, Any]]:
"""Return current job tree.
Jobs are added to cache as they are created so by default they are in oldest to newest.
This is correct ordering for child jobs as it makes logical sense to present those in
the order they occurred within the parent. For the list as a whole, sort from newest
to oldest as its likely any client is most interested in the newer ones.
"""
# Initially sort oldest to newest so all child lists end up in correct order
jobs_by_parent: dict[str | None, list[SupervisorJob]] = {}
for job in sorted(self.sys_jobs.jobs):
if job.internal:
continue
if job.parent_id not in jobs_by_parent:
jobs_by_parent[job.parent_id] = [job]
else:
jobs_by_parent[job.parent_id].append(job)
# After parent-child organization, sort the root jobs only from newest to oldest
job_list: list[dict[str, Any]] = []
queue: list[tuple[list[dict[str, Any]], SupervisorJob]] = (
[(job_list, start)]
if start
else [
(job_list, job)
for job in sorted(jobs_by_parent.get(None, []), reverse=True)
]
)
while queue:
(current_list, current_job) = queue.pop(0)
child_jobs: list[dict[str, Any]] = []
# We remove parent_id and instead use that info to represent jobs as a tree
job_dict = current_job.as_dict() | {"child_jobs": child_jobs}
job_dict.pop("parent_id")
current_list.append(job_dict)
if current_job.uuid in jobs_by_parent:
queue.extend(
[
(child_jobs, job)
for job in jobs_by_parent.get(current_job.uuid, [])
]
)
return job_list
@api_process
async def info(self, request: web.Request) -> dict[str, Any]:
"""Return JobManager information."""
return {
ATTR_IGNORE_CONDITIONS: self.sys_jobs.ignore_conditions,
ATTR_JOBS: self._list_jobs(),
}
@api_process
async def options(self, request: web.Request) -> None:
"""Set options for JobManager."""
body = await api_validate(SCHEMA_OPTIONS, request)
if ATTR_IGNORE_CONDITIONS in body:
self.sys_jobs.ignore_conditions = body[ATTR_IGNORE_CONDITIONS]
await self.sys_jobs.save_data()
await self.sys_resolution.evaluate.evaluate_system()
@api_process
async def reset(self, request: web.Request) -> None:
"""Reset options for JobManager."""
await self.sys_jobs.reset_data()
@api_process
async def job_info(self, request: web.Request) -> dict[str, Any]:
"""Get details of a job by ID."""
job = self._extract_job(request)
return self._list_jobs(job)[0]
@api_process
async def remove_job(self, request: web.Request) -> None:
"""Remove a completed job."""
job = self._extract_job(request)
if not job.done:
raise APIError(f"Job {job.uuid} is not done!")
self.sys_jobs.remove_job(job)

View File

@ -1 +0,0 @@
"""API middleware for aiohttp."""

View File

@ -1,322 +0,0 @@
"""Handle security part of this API."""
from collections.abc import Callable
import logging
import re
from typing import Final
from urllib.parse import unquote
from aiohttp.web import Request, Response, middleware
from aiohttp.web_exceptions import HTTPBadRequest, HTTPForbidden, HTTPUnauthorized
from awesomeversion import AwesomeVersion
from supervisor.homeassistant.const import LANDINGPAGE
from ...addons.const import RE_SLUG
from ...const import (
REQUEST_FROM,
ROLE_ADMIN,
ROLE_BACKUP,
ROLE_DEFAULT,
ROLE_HOMEASSISTANT,
ROLE_MANAGER,
VALID_API_STATES,
)
from ...coresys import CoreSys, CoreSysAttributes
from ...utils import version_is_new_enough
from ..utils import api_return_error, extract_supervisor_token
_LOGGER: logging.Logger = logging.getLogger(__name__)
_CORE_VERSION: Final = AwesomeVersion("2023.3.4")
# fmt: off
_CORE_FRONTEND_PATHS: Final = (
r"|/app/.*\.(?:js|gz|json|map|woff2)"
r"|/(store/)?addons/" + RE_SLUG + r"/(logo|icon)"
)
CORE_FRONTEND: Final = re.compile(
r"^(?:" + _CORE_FRONTEND_PATHS + r")$"
)
# Block Anytime
BLACKLIST: Final = re.compile(
r"^(?:"
r"|/homeassistant/api/hassio/.*"
r"|/core/api/hassio/.*"
r")$"
)
# Free to call or have own security concepts
NO_SECURITY_CHECK: Final = re.compile(
r"^(?:"
r"|/homeassistant/api/.*"
r"|/homeassistant/websocket"
r"|/core/api/.*"
r"|/core/websocket"
r"|/supervisor/ping"
r"|/ingress/[-_A-Za-z0-9]+/.*"
+ _CORE_FRONTEND_PATHS
+ r")$"
)
# Observer allow API calls
OBSERVER_CHECK: Final = re.compile(
r"^(?:"
r"|/.+/info"
r")$"
)
# Can called by every add-on
ADDONS_API_BYPASS: Final = re.compile(
r"^(?:"
r"|/addons/self/(?!security|update)[^/]+"
r"|/addons/self/options/config"
r"|/info"
r"|/services.*"
r"|/discovery.*"
r"|/auth"
r")$"
)
# Home Assistant only
CORE_ONLY_PATHS: Final = re.compile(
r"^(?:"
r"/addons/" + RE_SLUG + "/sys_options"
r")$"
)
# Policy role add-on API access
ADDONS_ROLE_ACCESS: dict[str, re.Pattern] = {
ROLE_DEFAULT: re.compile(
r"^(?:"
r"|/.+/info"
r")$"
),
ROLE_HOMEASSISTANT: re.compile(
r"^(?:"
r"|/.+/info"
r"|/core/.+"
r"|/homeassistant/.+"
r")$"
),
ROLE_BACKUP: re.compile(
r"^(?:"
r"|/.+/info"
r"|/backups.*"
r")$"
),
ROLE_MANAGER: re.compile(
r"^(?:"
r"|/.+/info"
r"|/addons(?:/" + RE_SLUG + r"/(?!security).+|/reload)?"
r"|/audio/.+"
r"|/auth/cache"
r"|/available_updates"
r"|/backups.*"
r"|/cli/.+"
r"|/core/.+"
r"|/dns/.+"
r"|/docker/.+"
r"|/jobs/.+"
r"|/hardware/.+"
r"|/hassos/.+"
r"|/homeassistant/.+"
r"|/host/.+"
r"|/mounts.*"
r"|/multicast/.+"
r"|/network/.+"
r"|/observer/.+"
r"|/os/(?!datadisk/wipe).+"
r"|/refresh_updates"
r"|/resolution/.+"
r"|/security/.+"
r"|/snapshots.*"
r"|/store.*"
r"|/supervisor/.+"
r")$"
),
ROLE_ADMIN: re.compile(
r".*"
),
}
FILTERS: Final = re.compile(
r"(?:"
# Common exploits
r"proc/self/environ"
r"|(<|%3C).*script.*(>|%3E)"
# File Injections
r"|(\.\.//?)+" # ../../anywhere
r"|[a-zA-Z0-9_]=/([a-z0-9_.]//?)+" # .html?v=/.//test
# SQL Injections
r"|union.*select.*\("
r"|union.*all.*select.*"
r"|concat.*\("
r")",
flags=re.IGNORECASE,
)
# fmt: on
class SecurityMiddleware(CoreSysAttributes):
"""Security middleware functions."""
def __init__(self, coresys: CoreSys):
"""Initialize security middleware."""
self.coresys: CoreSys = coresys
def _recursive_unquote(self, value: str) -> str:
"""Handle values that are encoded multiple times."""
if (unquoted := unquote(value)) != value:
unquoted = self._recursive_unquote(unquoted)
return unquoted
@middleware
async def block_bad_requests(self, request: Request, handler: Callable) -> Response:
"""Process request and tblock commonly known exploit attempts."""
if FILTERS.search(self._recursive_unquote(request.path)):
_LOGGER.warning(
"Filtered a potential harmful request to: %s", request.raw_path
)
raise HTTPBadRequest
if FILTERS.search(self._recursive_unquote(request.query_string)):
_LOGGER.warning(
"Filtered a request with a potential harmful query string: %s",
request.raw_path,
)
raise HTTPBadRequest
return await handler(request)
@middleware
async def system_validation(self, request: Request, handler: Callable) -> Response:
"""Check if core is ready to response."""
if self.sys_core.state not in VALID_API_STATES:
return api_return_error(
message=f"System is not ready with state: {self.sys_core.state}"
)
return await handler(request)
@middleware
async def token_validation(self, request: Request, handler: Callable) -> Response:
"""Check security access of this layer."""
request_from: CoreSysAttributes | None = None
supervisor_token = extract_supervisor_token(request)
# Blacklist
if BLACKLIST.match(request.path):
_LOGGER.error("%s is blacklisted!", request.path)
raise HTTPForbidden()
# Ignore security check
if NO_SECURITY_CHECK.match(request.path):
_LOGGER.debug("Passthrough %s", request.path)
request[REQUEST_FROM] = None
return await handler(request)
# Not token
if not supervisor_token:
_LOGGER.warning("No API token provided for %s", request.path)
raise HTTPUnauthorized()
# Home-Assistant
if supervisor_token == self.sys_homeassistant.supervisor_token:
_LOGGER.debug("%s access from Home Assistant", request.path)
request_from = self.sys_homeassistant
elif CORE_ONLY_PATHS.match(request.path):
_LOGGER.warning("Attempted access to %s from client besides Home Assistant")
raise HTTPForbidden()
# Host
if supervisor_token == self.sys_plugins.cli.supervisor_token:
_LOGGER.debug("%s access from Host", request.path)
request_from = self.sys_host
# Observer
if supervisor_token == self.sys_plugins.observer.supervisor_token:
if not OBSERVER_CHECK.match(request.path):
_LOGGER.warning("%s invalid Observer access", request.path)
raise HTTPForbidden()
_LOGGER.debug("%s access from Observer", request.path)
request_from = self.sys_plugins.observer
# Add-on
addon = None
if supervisor_token and not request_from:
addon = self.sys_addons.from_token(supervisor_token)
# Check Add-on API access
if addon and ADDONS_API_BYPASS.match(request.path):
_LOGGER.debug("Passthrough %s from %s", request.path, addon.slug)
request_from = addon
elif addon and addon.access_hassio_api:
# Check Role
if ADDONS_ROLE_ACCESS[addon.hassio_role].match(request.path):
_LOGGER.info("%s access from %s", request.path, addon.slug)
request_from = addon
else:
_LOGGER.warning("%s no role for %s", request.path, addon.slug)
elif addon:
_LOGGER.warning(
"%s missing API permission for %s", addon.slug, request.path
)
if request_from:
request[REQUEST_FROM] = request_from
return await handler(request)
_LOGGER.error("Invalid token for access %s", request.path)
raise HTTPForbidden()
@middleware
async def core_proxy(self, request: Request, handler: Callable) -> Response:
"""Validate user from Core API proxy."""
if (
request[REQUEST_FROM] != self.sys_homeassistant
or self.sys_homeassistant.version == LANDINGPAGE
or version_is_new_enough(self.sys_homeassistant.version, _CORE_VERSION)
):
return await handler(request)
authorization_index: int | None = None
content_type_index: int | None = None
user_request: bool = False
admin_request: bool = False
ingress_request: bool = False
for idx, (key, value) in enumerate(request.raw_headers):
if key in (b"Authorization", b"X-Hassio-Key"):
authorization_index = idx
elif key == b"Content-Type":
content_type_index = idx
elif key == b"X-Hass-User-ID":
user_request = True
elif key == b"X-Hass-Is-Admin":
admin_request = value == b"1"
elif key == b"X-Ingress-Path":
ingress_request = True
if (user_request or admin_request) and not ingress_request:
return await handler(request)
is_proxy_request = (
authorization_index is not None
and content_type_index is not None
and content_type_index - authorization_index == 1
)
if (
not CORE_FRONTEND.match(request.path) and is_proxy_request
) or ingress_request:
raise HTTPBadRequest()
return await handler(request)

View File

@ -1,138 +0,0 @@
"""Inits file for supervisor mounts REST API."""
from typing import Any, cast
from aiohttp import web
import voluptuous as vol
from ..const import ATTR_NAME, ATTR_STATE
from ..coresys import CoreSysAttributes
from ..exceptions import APIError, APINotFound
from ..mounts.const import ATTR_DEFAULT_BACKUP_MOUNT, MountUsage
from ..mounts.mount import Mount
from ..mounts.validate import SCHEMA_MOUNT_CONFIG, MountData
from .const import ATTR_MOUNTS, ATTR_USER_PATH
from .utils import api_process, api_validate
SCHEMA_OPTIONS = vol.Schema(
{
vol.Optional(ATTR_DEFAULT_BACKUP_MOUNT): vol.Maybe(str),
}
)
class APIMounts(CoreSysAttributes):
"""Handle REST API for mounting options."""
def _extract_mount(self, request: web.Request) -> Mount:
"""Extract mount from request or raise."""
name = request.match_info["mount"]
if name not in self.sys_mounts:
raise APINotFound(f"No mount exists with name {name}")
return self.sys_mounts.get(name)
@api_process
async def info(self, request: web.Request) -> dict[str, Any]:
"""Return MountManager info."""
return {
ATTR_DEFAULT_BACKUP_MOUNT: self.sys_mounts.default_backup_mount.name
if self.sys_mounts.default_backup_mount
else None,
ATTR_MOUNTS: [
mount.to_dict()
| {
ATTR_STATE: mount.state,
ATTR_USER_PATH: mount.container_where.as_posix()
if mount.container_where
else None,
}
for mount in self.sys_mounts.mounts
],
}
@api_process
async def options(self, request: web.Request) -> None:
"""Set Mount Manager options."""
body = await api_validate(SCHEMA_OPTIONS, request)
if ATTR_DEFAULT_BACKUP_MOUNT in body:
name: str | None = body[ATTR_DEFAULT_BACKUP_MOUNT]
if name is None:
self.sys_mounts.default_backup_mount = None
elif (mount := self.sys_mounts.get(name)).usage != MountUsage.BACKUP:
raise APIError(
f"Mount {name} is not used for backups, cannot use it as default backup mount"
)
else:
self.sys_mounts.default_backup_mount = mount
await self.sys_mounts.save_data()
@api_process
async def create_mount(self, request: web.Request) -> None:
"""Create a new mount in supervisor."""
body = cast(MountData, await api_validate(SCHEMA_MOUNT_CONFIG, request))
if body["name"] in self.sys_mounts:
raise APIError(f"A mount already exists with name {body['name']}")
mount = Mount.from_dict(self.coresys, body)
await self.sys_mounts.create_mount(mount)
# If it's a backup mount, reload backups
if mount.usage == MountUsage.BACKUP:
self.sys_create_task(self.sys_backups.reload())
# If there's no default backup mount, set it to the new mount
if not self.sys_mounts.default_backup_mount:
self.sys_mounts.default_backup_mount = mount
await self.sys_mounts.save_data()
@api_process
async def update_mount(self, request: web.Request) -> None:
"""Update an existing mount in supervisor."""
current = self._extract_mount(request)
name_schema = vol.Schema(
{vol.Optional(ATTR_NAME, default=current.name): current.name},
extra=vol.ALLOW_EXTRA,
)
body = cast(
MountData,
await api_validate(vol.All(name_schema, SCHEMA_MOUNT_CONFIG), request),
)
mount = Mount.from_dict(self.coresys, body)
await self.sys_mounts.create_mount(mount)
# If it's a backup mount, reload backups
if mount.usage == MountUsage.BACKUP:
self.sys_create_task(self.sys_backups.reload())
# If this mount was the default backup mount and isn't for backups any more, remove it
elif self.sys_mounts.default_backup_mount == mount:
self.sys_mounts.default_backup_mount = None
await self.sys_mounts.save_data()
@api_process
async def delete_mount(self, request: web.Request) -> None:
"""Delete an existing mount in supervisor."""
current = self._extract_mount(request)
mount = await self.sys_mounts.remove_mount(current.name)
# If it was a backup mount, reload backups
if mount.usage == MountUsage.BACKUP:
self.sys_create_task(self.sys_backups.reload())
await self.sys_mounts.save_data()
@api_process
async def reload_mount(self, request: web.Request) -> None:
"""Reload an existing mount in supervisor."""
mount = self._extract_mount(request)
await self.sys_mounts.reload_mount(mount.name)
# If it's a backup mount, reload backups
if mount.usage == MountUsage.BACKUP:
self.sys_create_task(self.sys_backups.reload())

View File

@ -1,9 +1,7 @@
"""Init file for Supervisor Multicast RESTful API."""
import asyncio
from collections.abc import Awaitable
import logging
from typing import Any
from typing import Any, Awaitable, Dict
from aiohttp import web
import voluptuous as vol
@ -12,39 +10,37 @@ from ..const import (
ATTR_BLK_READ,
ATTR_BLK_WRITE,
ATTR_CPU_PERCENT,
ATTR_VERSION_LATEST,
ATTR_MEMORY_LIMIT,
ATTR_MEMORY_PERCENT,
ATTR_MEMORY_USAGE,
ATTR_NETWORK_RX,
ATTR_NETWORK_TX,
ATTR_UPDATE_AVAILABLE,
ATTR_VERSION,
ATTR_VERSION_LATEST,
CONTENT_TYPE_BINARY,
)
from ..coresys import CoreSysAttributes
from ..exceptions import APIError
from ..validate import version_tag
from .utils import api_process, api_validate
from .utils import api_process, api_process_raw, api_validate
_LOGGER: logging.Logger = logging.getLogger(__name__)
SCHEMA_VERSION = vol.Schema({vol.Optional(ATTR_VERSION): version_tag})
SCHEMA_VERSION = vol.Schema({vol.Optional(ATTR_VERSION): vol.Coerce(str)})
class APIMulticast(CoreSysAttributes):
"""Handle RESTful API for Multicast functions."""
@api_process
async def info(self, request: web.Request) -> dict[str, Any]:
async def info(self, request: web.Request) -> Dict[str, Any]:
"""Return Multicast information."""
return {
ATTR_VERSION: self.sys_plugins.multicast.version,
ATTR_VERSION_LATEST: self.sys_plugins.multicast.latest_version,
ATTR_UPDATE_AVAILABLE: self.sys_plugins.multicast.need_update,
}
@api_process
async def stats(self, request: web.Request) -> dict[str, Any]:
async def stats(self, request: web.Request) -> Dict[str, Any]:
"""Return resource information."""
stats = await self.sys_plugins.multicast.stats()
@ -66,9 +62,14 @@ class APIMulticast(CoreSysAttributes):
version = body.get(ATTR_VERSION, self.sys_plugins.multicast.latest_version)
if version == self.sys_plugins.multicast.version:
raise APIError(f"Version {version} is already in use")
raise APIError("Version {} is already in use".format(version))
await asyncio.shield(self.sys_plugins.multicast.update(version))
@api_process_raw(CONTENT_TYPE_BINARY)
def logs(self, request: web.Request) -> Awaitable[bytes]:
"""Return Multicast Docker logs."""
return self.sys_plugins.multicast.logs()
@api_process
def restart(self, request: web.Request) -> Awaitable[None]:
"""Restart Multicast plugin."""

View File

@ -1,342 +0,0 @@
"""REST API for network."""
import asyncio
from collections.abc import Awaitable
from ipaddress import IPv4Address, IPv4Interface, IPv6Address, IPv6Interface
from typing import Any
from aiohttp import web
import voluptuous as vol
from ..const import (
ATTR_ACCESSPOINTS,
ATTR_ADDR_GEN_MODE,
ATTR_ADDRESS,
ATTR_AUTH,
ATTR_CONNECTED,
ATTR_DNS,
ATTR_DOCKER,
ATTR_ENABLED,
ATTR_FREQUENCY,
ATTR_GATEWAY,
ATTR_HOST_INTERNET,
ATTR_ID,
ATTR_INTERFACE,
ATTR_INTERFACES,
ATTR_IP6_PRIVACY,
ATTR_IPV4,
ATTR_IPV6,
ATTR_MAC,
ATTR_METHOD,
ATTR_MODE,
ATTR_NAMESERVERS,
ATTR_PARENT,
ATTR_PRIMARY,
ATTR_PSK,
ATTR_READY,
ATTR_SIGNAL,
ATTR_SSID,
ATTR_SUPERVISOR_INTERNET,
ATTR_TYPE,
ATTR_VLAN,
ATTR_WIFI,
DOCKER_IPV4_NETWORK_MASK,
DOCKER_NETWORK,
)
from ..coresys import CoreSysAttributes
from ..exceptions import APIError, APINotFound, HostNetworkNotFound
from ..host.configuration import (
AccessPoint,
Interface,
InterfaceAddrGenMode,
InterfaceIp6Privacy,
InterfaceMethod,
Ip6Setting,
IpConfig,
IpSetting,
VlanConfig,
WifiConfig,
)
from ..host.const import AuthMethod, InterfaceType, WifiMode
from .utils import api_process, api_validate
_SCHEMA_IPV4_CONFIG = vol.Schema(
{
vol.Optional(ATTR_ADDRESS): [vol.Coerce(IPv4Interface)],
vol.Optional(ATTR_METHOD): vol.Coerce(InterfaceMethod),
vol.Optional(ATTR_GATEWAY): vol.Coerce(IPv4Address),
vol.Optional(ATTR_NAMESERVERS): [vol.Coerce(IPv4Address)],
}
)
_SCHEMA_IPV6_CONFIG = vol.Schema(
{
vol.Optional(ATTR_ADDRESS): [vol.Coerce(IPv6Interface)],
vol.Optional(ATTR_METHOD): vol.Coerce(InterfaceMethod),
vol.Optional(ATTR_ADDR_GEN_MODE): vol.Coerce(InterfaceAddrGenMode),
vol.Optional(ATTR_IP6_PRIVACY): vol.Coerce(InterfaceIp6Privacy),
vol.Optional(ATTR_GATEWAY): vol.Coerce(IPv6Address),
vol.Optional(ATTR_NAMESERVERS): [vol.Coerce(IPv6Address)],
}
)
_SCHEMA_WIFI_CONFIG = vol.Schema(
{
vol.Optional(ATTR_MODE): vol.Coerce(WifiMode),
vol.Optional(ATTR_AUTH): vol.Coerce(AuthMethod),
vol.Optional(ATTR_SSID): str,
vol.Optional(ATTR_PSK): str,
}
)
# pylint: disable=no-value-for-parameter
SCHEMA_UPDATE = vol.Schema(
{
vol.Optional(ATTR_IPV4): _SCHEMA_IPV4_CONFIG,
vol.Optional(ATTR_IPV6): _SCHEMA_IPV6_CONFIG,
vol.Optional(ATTR_WIFI): _SCHEMA_WIFI_CONFIG,
vol.Optional(ATTR_ENABLED): vol.Boolean(),
}
)
def ip4config_struct(config: IpConfig, setting: IpSetting) -> dict[str, Any]:
"""Return a dict with information about IPv4 configuration."""
return {
ATTR_METHOD: setting.method,
ATTR_ADDRESS: [address.with_prefixlen for address in config.address],
ATTR_NAMESERVERS: [str(address) for address in config.nameservers],
ATTR_GATEWAY: str(config.gateway) if config.gateway else None,
ATTR_READY: config.ready,
}
def ip6config_struct(config: IpConfig, setting: Ip6Setting) -> dict[str, Any]:
"""Return a dict with information about IPv6 configuration."""
return {
ATTR_METHOD: setting.method,
ATTR_ADDR_GEN_MODE: setting.addr_gen_mode,
ATTR_IP6_PRIVACY: setting.ip6_privacy,
ATTR_ADDRESS: [address.with_prefixlen for address in config.address],
ATTR_NAMESERVERS: [str(address) for address in config.nameservers],
ATTR_GATEWAY: str(config.gateway) if config.gateway else None,
ATTR_READY: config.ready,
}
def wifi_struct(config: WifiConfig) -> dict[str, Any]:
"""Return a dict with information about wifi configuration."""
return {
ATTR_MODE: config.mode,
ATTR_AUTH: config.auth,
ATTR_SSID: config.ssid,
ATTR_SIGNAL: config.signal,
}
def vlan_struct(config: VlanConfig) -> dict[str, Any]:
"""Return a dict with information about VLAN configuration."""
return {
ATTR_ID: config.id,
ATTR_PARENT: config.interface,
}
def interface_struct(interface: Interface) -> dict[str, Any]:
"""Return a dict with information of a interface to be used in th API."""
return {
ATTR_INTERFACE: interface.name,
ATTR_TYPE: interface.type,
ATTR_ENABLED: interface.enabled,
ATTR_CONNECTED: interface.connected,
ATTR_PRIMARY: interface.primary,
ATTR_MAC: interface.mac,
ATTR_IPV4: ip4config_struct(interface.ipv4, interface.ipv4setting)
if interface.ipv4 and interface.ipv4setting
else None,
ATTR_IPV6: ip6config_struct(interface.ipv6, interface.ipv6setting)
if interface.ipv6 and interface.ipv6setting
else None,
ATTR_WIFI: wifi_struct(interface.wifi) if interface.wifi else None,
ATTR_VLAN: vlan_struct(interface.vlan) if interface.vlan else None,
}
def accesspoint_struct(accesspoint: AccessPoint) -> dict[str, Any]:
"""Return a dict for AccessPoint."""
return {
ATTR_MODE: accesspoint.mode,
ATTR_SSID: accesspoint.ssid,
ATTR_FREQUENCY: accesspoint.frequency,
ATTR_SIGNAL: accesspoint.signal,
ATTR_MAC: accesspoint.mac,
}
class APINetwork(CoreSysAttributes):
"""Handle REST API for network."""
def _get_interface(self, name: str) -> Interface:
"""Get Interface by name or default."""
if name.lower() == "default":
for interface in self.sys_host.network.interfaces:
if not interface.primary:
continue
return interface
else:
try:
return self.sys_host.network.get(name)
except HostNetworkNotFound:
pass
raise APINotFound(f"Interface {name} does not exist") from None
@api_process
async def info(self, request: web.Request) -> dict[str, Any]:
"""Return network information."""
return {
ATTR_INTERFACES: [
interface_struct(interface)
for interface in self.sys_host.network.interfaces
],
ATTR_DOCKER: {
ATTR_INTERFACE: DOCKER_NETWORK,
ATTR_ADDRESS: str(DOCKER_IPV4_NETWORK_MASK),
ATTR_GATEWAY: str(self.sys_docker.network.gateway),
ATTR_DNS: str(self.sys_docker.network.dns),
},
ATTR_HOST_INTERNET: self.sys_host.network.connectivity,
ATTR_SUPERVISOR_INTERNET: self.sys_supervisor.connectivity,
}
@api_process
async def interface_info(self, request: web.Request) -> dict[str, Any]:
"""Return network information for a interface."""
interface = self._get_interface(request.match_info[ATTR_INTERFACE])
return interface_struct(interface)
@api_process
async def interface_update(self, request: web.Request) -> None:
"""Update the configuration of an interface."""
interface = self._get_interface(request.match_info[ATTR_INTERFACE])
# Validate data
body = await api_validate(SCHEMA_UPDATE, request)
if not body:
raise APIError("You need to supply at least one option to update")
# Apply config
for key, config in body.items():
if key == ATTR_IPV4:
interface.ipv4setting = IpSetting(
method=config.get(ATTR_METHOD, InterfaceMethod.STATIC),
address=config.get(ATTR_ADDRESS, []),
gateway=config.get(ATTR_GATEWAY),
nameservers=config.get(ATTR_NAMESERVERS, []),
)
elif key == ATTR_IPV6:
interface.ipv6setting = Ip6Setting(
method=config.get(ATTR_METHOD, InterfaceMethod.STATIC),
addr_gen_mode=config.get(
ATTR_ADDR_GEN_MODE, InterfaceAddrGenMode.DEFAULT
),
ip6_privacy=config.get(
ATTR_IP6_PRIVACY, InterfaceIp6Privacy.DEFAULT
),
address=config.get(ATTR_ADDRESS, []),
gateway=config.get(ATTR_GATEWAY),
nameservers=config.get(ATTR_NAMESERVERS, []),
)
elif key == ATTR_WIFI:
interface.wifi = WifiConfig(
mode=config.get(ATTR_MODE, WifiMode.INFRASTRUCTURE),
ssid=config.get(ATTR_SSID, ""),
auth=config.get(ATTR_AUTH, AuthMethod.OPEN),
psk=config.get(ATTR_PSK, None),
signal=None,
)
elif key == ATTR_ENABLED:
interface.enabled = config
await asyncio.shield(self.sys_host.network.apply_changes(interface))
@api_process
def reload(self, request: web.Request) -> Awaitable[None]:
"""Reload network data."""
return asyncio.shield(
self.sys_host.network.update(force_connectivity_check=True)
)
@api_process
async def scan_accesspoints(self, request: web.Request) -> dict[str, Any]:
"""Scan and return a list of available networks."""
interface = self._get_interface(request.match_info[ATTR_INTERFACE])
# Only wlan is supported
if interface.type != InterfaceType.WIRELESS:
raise APIError(f"Interface {interface.name} is not a valid wireless card!")
ap_list = await self.sys_host.network.scan_wifi(interface)
return {ATTR_ACCESSPOINTS: [accesspoint_struct(ap) for ap in ap_list]}
@api_process
async def create_vlan(self, request: web.Request) -> None:
"""Create a new vlan."""
interface = self._get_interface(request.match_info[ATTR_INTERFACE])
vlan = int(request.match_info.get(ATTR_VLAN, -1))
if vlan < 0:
raise APIError(f"Invalid vlan specified: {vlan}")
# Only ethernet is supported
if interface.type != InterfaceType.ETHERNET:
raise APIError(
f"Interface {interface.name} is not a valid ethernet card for vlan!"
)
body = await api_validate(SCHEMA_UPDATE, request)
vlan_config = VlanConfig(vlan, interface.name)
ipv4_setting = None
if ATTR_IPV4 in body:
ipv4_setting = IpSetting(
method=body[ATTR_IPV4].get(ATTR_METHOD, InterfaceMethod.AUTO),
address=body[ATTR_IPV4].get(ATTR_ADDRESS, []),
gateway=body[ATTR_IPV4].get(ATTR_GATEWAY, None),
nameservers=body[ATTR_IPV4].get(ATTR_NAMESERVERS, []),
)
ipv6_setting = None
if ATTR_IPV6 in body:
ipv6_setting = Ip6Setting(
method=body[ATTR_IPV6].get(ATTR_METHOD, InterfaceMethod.AUTO),
addr_gen_mode=body[ATTR_IPV6].get(
ATTR_ADDR_GEN_MODE, InterfaceAddrGenMode.DEFAULT
),
ip6_privacy=body[ATTR_IPV6].get(
ATTR_IP6_PRIVACY, InterfaceIp6Privacy.DEFAULT
),
address=body[ATTR_IPV6].get(ATTR_ADDRESS, []),
gateway=body[ATTR_IPV6].get(ATTR_GATEWAY, None),
nameservers=body[ATTR_IPV6].get(ATTR_NAMESERVERS, []),
)
vlan_interface = Interface(
"",
"",
"",
True,
True,
False,
InterfaceType.VLAN,
None,
ipv4_setting,
None,
ipv6_setting,
None,
vlan_config,
)
await asyncio.shield(self.sys_host.network.apply_changes(vlan_interface))

View File

@ -1,68 +0,0 @@
"""Init file for Supervisor Observer RESTful API."""
import asyncio
import logging
from typing import Any
from aiohttp import web
import voluptuous as vol
from ..const import (
ATTR_BLK_READ,
ATTR_BLK_WRITE,
ATTR_CPU_PERCENT,
ATTR_HOST,
ATTR_MEMORY_LIMIT,
ATTR_MEMORY_PERCENT,
ATTR_MEMORY_USAGE,
ATTR_NETWORK_RX,
ATTR_NETWORK_TX,
ATTR_UPDATE_AVAILABLE,
ATTR_VERSION,
ATTR_VERSION_LATEST,
)
from ..coresys import CoreSysAttributes
from ..validate import version_tag
from .utils import api_process, api_validate
_LOGGER: logging.Logger = logging.getLogger(__name__)
SCHEMA_VERSION = vol.Schema({vol.Optional(ATTR_VERSION): version_tag})
class APIObserver(CoreSysAttributes):
"""Handle RESTful API for Observer functions."""
@api_process
async def info(self, request: web.Request) -> dict[str, Any]:
"""Return HA Observer information."""
return {
ATTR_HOST: str(self.sys_docker.network.observer),
ATTR_VERSION: self.sys_plugins.observer.version,
ATTR_VERSION_LATEST: self.sys_plugins.observer.latest_version,
ATTR_UPDATE_AVAILABLE: self.sys_plugins.observer.need_update,
}
@api_process
async def stats(self, request: web.Request) -> dict[str, Any]:
"""Return resource information."""
stats = await self.sys_plugins.observer.stats()
return {
ATTR_CPU_PERCENT: stats.cpu_percent,
ATTR_MEMORY_USAGE: stats.memory_usage,
ATTR_MEMORY_LIMIT: stats.memory_limit,
ATTR_MEMORY_PERCENT: stats.memory_percent,
ATTR_NETWORK_RX: stats.network_rx,
ATTR_NETWORK_TX: stats.network_tx,
ATTR_BLK_READ: stats.blk_read,
ATTR_BLK_WRITE: stats.blk_write,
}
@api_process
async def update(self, request: web.Request) -> None:
"""Update HA observer."""
body = await api_validate(SCHEMA_VERSION, request)
version = body.get(ATTR_VERSION, self.sys_plugins.observer.latest_version)
await asyncio.shield(self.sys_plugins.observer.update(version))

View File

@ -1,276 +1,47 @@
"""Init file for Supervisor HassOS RESTful API."""
import asyncio
from collections.abc import Awaitable
import logging
import re
from typing import Any
from typing import Any, Awaitable, Dict
from aiohttp import web
import voluptuous as vol
from ..const import (
ATTR_ACTIVITY_LED,
ATTR_BOARD,
ATTR_BOOT,
ATTR_DEVICES,
ATTR_DISK_LED,
ATTR_HEARTBEAT_LED,
ATTR_ID,
ATTR_NAME,
ATTR_POWER_LED,
ATTR_SERIAL,
ATTR_SIZE,
ATTR_STATE,
ATTR_SWAP_SIZE,
ATTR_SWAPPINESS,
ATTR_UPDATE_AVAILABLE,
ATTR_VERSION,
ATTR_VERSION_LATEST,
)
from ..coresys import CoreSysAttributes
from ..exceptions import APINotFound, BoardInvalidError
from ..resolution.const import ContextType, IssueType, SuggestionType
from ..validate import version_tag
from .const import (
ATTR_BOOT_SLOT,
ATTR_BOOT_SLOTS,
ATTR_DATA_DISK,
ATTR_DEV_PATH,
ATTR_DEVICE,
ATTR_DISKS,
ATTR_MODEL,
ATTR_STATUS,
ATTR_SYSTEM_HEALTH_LED,
ATTR_VENDOR,
BootSlot,
)
from .utils import api_process, api_validate
_LOGGER: logging.Logger = logging.getLogger(__name__)
# pylint: disable=no-value-for-parameter
SCHEMA_VERSION = vol.Schema({vol.Optional(ATTR_VERSION): version_tag})
SCHEMA_SET_BOOT_SLOT = vol.Schema({vol.Required(ATTR_BOOT_SLOT): vol.Coerce(BootSlot)})
SCHEMA_DISK = vol.Schema({vol.Required(ATTR_DEVICE): str})
SCHEMA_YELLOW_OPTIONS = vol.Schema(
{
vol.Optional(ATTR_DISK_LED): vol.Boolean(),
vol.Optional(ATTR_HEARTBEAT_LED): vol.Boolean(),
vol.Optional(ATTR_POWER_LED): vol.Boolean(),
}
)
SCHEMA_GREEN_OPTIONS = vol.Schema(
{
vol.Optional(ATTR_ACTIVITY_LED): vol.Boolean(),
vol.Optional(ATTR_POWER_LED): vol.Boolean(),
vol.Optional(ATTR_SYSTEM_HEALTH_LED): vol.Boolean(),
}
)
RE_SWAP_SIZE = re.compile(r"^\d+([KMG](i?B)?|B)?$", re.IGNORECASE)
SCHEMA_SWAP_OPTIONS = vol.Schema(
{
vol.Optional(ATTR_SWAP_SIZE): vol.Match(RE_SWAP_SIZE),
vol.Optional(ATTR_SWAPPINESS): vol.All(int, vol.Range(min=0, max=200)),
}
)
# pylint: enable=no-value-for-parameter
SCHEMA_VERSION = vol.Schema({vol.Optional(ATTR_VERSION): vol.Coerce(str)})
class APIOS(CoreSysAttributes):
"""Handle RESTful API for OS functions."""
@api_process
async def info(self, request: web.Request) -> dict[str, Any]:
async def info(self, request: web.Request) -> Dict[str, Any]:
"""Return OS information."""
return {
ATTR_VERSION: self.sys_os.version,
ATTR_VERSION_LATEST: self.sys_os.latest_version,
ATTR_UPDATE_AVAILABLE: self.sys_os.need_update,
ATTR_BOARD: self.sys_os.board,
ATTR_VERSION: self.sys_hassos.version,
ATTR_VERSION_LATEST: self.sys_hassos.latest_version,
ATTR_BOARD: self.sys_hassos.board,
ATTR_BOOT: self.sys_dbus.rauc.boot_slot,
ATTR_DATA_DISK: self.sys_os.datadisk.disk_used_id,
ATTR_BOOT_SLOTS: {
slot.bootname: {
ATTR_STATE: slot.state,
ATTR_STATUS: slot.boot_status,
ATTR_VERSION: slot.bundle_version,
}
for slot in self.sys_os.slots
if slot.bootname
},
}
@api_process
async def update(self, request: web.Request) -> None:
"""Update OS."""
body = await api_validate(SCHEMA_VERSION, request)
version = body.get(ATTR_VERSION, self.sys_os.latest_version)
version = body.get(ATTR_VERSION, self.sys_hassos.latest_version)
await asyncio.shield(self.sys_os.update(version))
await asyncio.shield(self.sys_hassos.update(version))
@api_process
def config_sync(self, request: web.Request) -> Awaitable[None]:
"""Trigger config reload on OS."""
return asyncio.shield(self.sys_os.config_sync())
@api_process
async def migrate_data(self, request: web.Request) -> None:
"""Trigger data disk migration on Host."""
body = await api_validate(SCHEMA_DISK, request)
await asyncio.shield(self.sys_os.datadisk.migrate_disk(body[ATTR_DEVICE]))
@api_process
def wipe_data(self, request: web.Request) -> Awaitable[None]:
"""Trigger data disk wipe on Host."""
return asyncio.shield(self.sys_os.datadisk.wipe_disk())
@api_process
async def set_boot_slot(self, request: web.Request) -> None:
"""Change the active boot slot and reboot into it."""
body = await api_validate(SCHEMA_SET_BOOT_SLOT, request)
await asyncio.shield(self.sys_os.set_boot_slot(body[ATTR_BOOT_SLOT]))
@api_process
async def list_data(self, request: web.Request) -> dict[str, Any]:
"""Return possible data targets."""
return {
ATTR_DEVICES: [disk.id for disk in self.sys_os.datadisk.available_disks],
ATTR_DISKS: [
{
ATTR_NAME: disk.name,
ATTR_VENDOR: disk.vendor,
ATTR_MODEL: disk.model,
ATTR_SERIAL: disk.serial,
ATTR_SIZE: disk.size,
ATTR_ID: disk.id,
ATTR_DEV_PATH: disk.device_path.as_posix(),
}
for disk in self.sys_os.datadisk.available_disks
],
}
@api_process
async def boards_green_info(self, request: web.Request) -> dict[str, Any]:
"""Get green board settings."""
return {
ATTR_ACTIVITY_LED: self.sys_dbus.agent.board.green.activity_led,
ATTR_POWER_LED: self.sys_dbus.agent.board.green.power_led,
ATTR_SYSTEM_HEALTH_LED: self.sys_dbus.agent.board.green.user_led,
}
@api_process
async def boards_green_options(self, request: web.Request) -> None:
"""Update green board settings."""
body = await api_validate(SCHEMA_GREEN_OPTIONS, request)
if ATTR_ACTIVITY_LED in body:
await self.sys_dbus.agent.board.green.set_activity_led(
body[ATTR_ACTIVITY_LED]
)
if ATTR_POWER_LED in body:
await self.sys_dbus.agent.board.green.set_power_led(body[ATTR_POWER_LED])
if ATTR_SYSTEM_HEALTH_LED in body:
await self.sys_dbus.agent.board.green.set_user_led(
body[ATTR_SYSTEM_HEALTH_LED]
)
await self.sys_dbus.agent.board.green.save_data()
@api_process
async def boards_yellow_info(self, request: web.Request) -> dict[str, Any]:
"""Get yellow board settings."""
return {
ATTR_DISK_LED: self.sys_dbus.agent.board.yellow.disk_led,
ATTR_HEARTBEAT_LED: self.sys_dbus.agent.board.yellow.heartbeat_led,
ATTR_POWER_LED: self.sys_dbus.agent.board.yellow.power_led,
}
@api_process
async def boards_yellow_options(self, request: web.Request) -> None:
"""Update yellow board settings."""
body = await api_validate(SCHEMA_YELLOW_OPTIONS, request)
if ATTR_DISK_LED in body:
await self.sys_dbus.agent.board.yellow.set_disk_led(body[ATTR_DISK_LED])
if ATTR_HEARTBEAT_LED in body:
await self.sys_dbus.agent.board.yellow.set_heartbeat_led(
body[ATTR_HEARTBEAT_LED]
)
if ATTR_POWER_LED in body:
await self.sys_dbus.agent.board.yellow.set_power_led(body[ATTR_POWER_LED])
await self.sys_dbus.agent.board.yellow.save_data()
self.sys_resolution.create_issue(
IssueType.REBOOT_REQUIRED,
ContextType.SYSTEM,
suggestions=[SuggestionType.EXECUTE_REBOOT],
)
@api_process
async def boards_other_info(self, request: web.Request) -> dict[str, Any]:
"""Empty success return if board is in use, error otherwise."""
if request.match_info["board"] != self.sys_os.board:
raise BoardInvalidError(
f"{request.match_info['board']} board is not in use", _LOGGER.error
)
return {}
@api_process
async def config_swap_info(self, request: web.Request) -> dict[str, Any]:
"""Get swap settings."""
if (
not self.coresys.os.available
or not self.coresys.os.version
or self.coresys.os.version < "15.0"
):
raise APINotFound(
"Home Assistant OS 15.0 or newer required for swap settings"
)
return {
ATTR_SWAP_SIZE: self.sys_dbus.agent.swap.swap_size,
ATTR_SWAPPINESS: self.sys_dbus.agent.swap.swappiness,
}
@api_process
async def config_swap_options(self, request: web.Request) -> None:
"""Update swap settings."""
if (
not self.coresys.os.available
or not self.coresys.os.version
or self.coresys.os.version < "15.0"
):
raise APINotFound(
"Home Assistant OS 15.0 or newer required for swap settings"
)
body = await api_validate(SCHEMA_SWAP_OPTIONS, request)
reboot_required = False
if ATTR_SWAP_SIZE in body:
old_size = self.sys_dbus.agent.swap.swap_size
await self.sys_dbus.agent.swap.set_swap_size(body[ATTR_SWAP_SIZE])
reboot_required = reboot_required or old_size != body[ATTR_SWAP_SIZE]
if ATTR_SWAPPINESS in body:
old_swappiness = self.sys_dbus.agent.swap.swappiness
await self.sys_dbus.agent.swap.set_swappiness(body[ATTR_SWAPPINESS])
reboot_required = reboot_required or old_swappiness != body[ATTR_SWAPPINESS]
if reboot_required:
self.sys_resolution.create_issue(
IssueType.REBOOT_REQUIRED,
ContextType.SYSTEM,
suggestions=[SuggestionType.EXECUTE_REBOOT],
)
return asyncio.shield(self.sys_hassos.config_sync())

File diff suppressed because one or more lines are too long

Binary file not shown.

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

Some files were not shown because too many files have changed in this diff Show More