mirror of
https://github.com/home-assistant/supervisor.git
synced 2025-07-25 18:16:32 +00:00
Compare commits
170 Commits
Author | SHA1 | Date | |
---|---|---|---|
![]() |
3b1b03c8a7 | ||
![]() |
680428f304 | ||
![]() |
f34128c37e | ||
![]() |
2ed0682b34 | ||
![]() |
fbb0915ef8 | ||
![]() |
780ae1e15c | ||
![]() |
c617358855 | ||
![]() |
b679c4f4d8 | ||
![]() |
c946c421f2 | ||
![]() |
aeabf7ea25 | ||
![]() |
365b838abf | ||
![]() |
99c040520e | ||
![]() |
eefe2f2e06 | ||
![]() |
a366e36b37 | ||
![]() |
27a2fde9e1 | ||
![]() |
9a0f530a2f | ||
![]() |
baf9695cf7 | ||
![]() |
7873c457d5 | ||
![]() |
cbc48c381f | ||
![]() |
11e37011bd | ||
![]() |
cfda559a90 | ||
![]() |
806bd9f52c | ||
![]() |
953f7d01d7 | ||
![]() |
381e719a0e | ||
![]() |
296071067d | ||
![]() |
8336537f51 | ||
![]() |
5c90a00263 | ||
![]() |
1f2bf77784 | ||
![]() |
9aa4f381b8 | ||
![]() |
ae036ceffe | ||
![]() |
f0ea0d4a44 | ||
![]() |
abc44946bb | ||
![]() |
3e20a0937d | ||
![]() |
6cebf52249 | ||
![]() |
bc57deb474 | ||
![]() |
38750d74a8 | ||
![]() |
d1c1a2d418 | ||
![]() |
cf32f036c0 | ||
![]() |
b8852872fe | ||
![]() |
779f47e25d | ||
![]() |
be8b36b560 | ||
![]() |
8378d434d4 | ||
![]() |
0b79e09bc0 | ||
![]() |
d747a59696 | ||
![]() |
3ee7c082ec | ||
![]() |
3f921e50b3 | ||
![]() |
0370320f75 | ||
![]() |
1e19e26ef3 | ||
![]() |
e1a18eeba8 | ||
![]() |
b030879efd | ||
![]() |
dfa1602ac6 | ||
![]() |
bbda943583 | ||
![]() |
aea15b65b7 | ||
![]() |
5c04249e41 | ||
![]() |
456cec7ed1 | ||
![]() |
52a519e55c | ||
![]() |
fcb20d0ae8 | ||
![]() |
9b3f2b17bd | ||
![]() |
3d026b9534 | ||
![]() |
0e8ace949a | ||
![]() |
1fe6f8ad99 | ||
![]() |
9ef2352d12 | ||
![]() |
2543bcae29 | ||
![]() |
ad9de9f73c | ||
![]() |
a5556651ae | ||
![]() |
ac28deff6d | ||
![]() |
82ee4bc441 | ||
![]() |
bdbd09733a | ||
![]() |
d5b5a328d7 | ||
![]() |
52b24e177f | ||
![]() |
e10c58c424 | ||
![]() |
9682870c2c | ||
![]() |
fd0b894d6a | ||
![]() |
697515b81f | ||
![]() |
d912c234fa | ||
![]() |
e8445ae8f2 | ||
![]() |
6710439ce5 | ||
![]() |
95eec03c91 | ||
![]() |
9b686a2d9a | ||
![]() |
063d69da90 | ||
![]() |
baaf04981f | ||
![]() |
bdb25a7ff8 | ||
![]() |
ad2d6a3156 | ||
![]() |
42f885595e | ||
![]() |
2a88cb9339 | ||
![]() |
4d1a5e2dc2 | ||
![]() |
705e76abe3 | ||
![]() |
7f54383147 | ||
![]() |
63fde3b410 | ||
![]() |
5285e60cd3 | ||
![]() |
2a1e32bb36 | ||
![]() |
a2251e0729 | ||
![]() |
1efee641ba | ||
![]() |
bbb8fa0b92 | ||
![]() |
7593f857e8 | ||
![]() |
87232cf1e4 | ||
![]() |
9e6a4d65cd | ||
![]() |
c80fbd77c8 | ||
![]() |
a452969ffe | ||
![]() |
89fa5c9c7a | ||
![]() |
73069b628e | ||
![]() |
8251b6c61c | ||
![]() |
1faf529b42 | ||
![]() |
86c016b35d | ||
![]() |
4f35759fe3 | ||
![]() |
3b575eedba | ||
![]() |
6e6fe5ba39 | ||
![]() |
b5a7e521ae | ||
![]() |
bac7c21fe8 | ||
![]() |
2eb9ec20d6 | ||
![]() |
406348c068 | ||
![]() |
5e3f4e8ff3 | ||
![]() |
31a67bc642 | ||
![]() |
d0d11db7b1 | ||
![]() |
cbf4b4e27e | ||
![]() |
c855eaab52 | ||
![]() |
6bac751c4c | ||
![]() |
da0ae75e8e | ||
![]() |
154aeaee87 | ||
![]() |
b9bbb99f37 | ||
![]() |
ff849ce692 | ||
![]() |
24456efb6b | ||
![]() |
0cd9d04e63 | ||
![]() |
39bd20c0e7 | ||
![]() |
481bbc5be8 | ||
![]() |
36da382af3 | ||
![]() |
85f8107b60 | ||
![]() |
2e44e6494f | ||
![]() |
cd1cc66c77 | ||
![]() |
b76a1f58ea | ||
![]() |
3fcd254d25 | ||
![]() |
3dff2abe65 | ||
![]() |
ba91be1367 | ||
![]() |
25f93cd338 | ||
![]() |
9b0044edd6 | ||
![]() |
9915c21243 | ||
![]() |
657cb56fb9 | ||
![]() |
1b384cebc9 | ||
![]() |
61089c3507 | ||
![]() |
bc9e3eb95b | ||
![]() |
c1b45406d6 | ||
![]() |
8e714072c2 | ||
![]() |
88087046de | ||
![]() |
53393afe8d | ||
![]() |
4b5bcece64 | ||
![]() |
0e7e4f8b42 | ||
![]() |
9470f44840 | ||
![]() |
0e55e6e67b | ||
![]() |
6116425265 | ||
![]() |
de497cdc19 | ||
![]() |
88b41e80bb | ||
![]() |
876afdb26e | ||
![]() |
9d062c8ed0 | ||
![]() |
122b73202b | ||
![]() |
5d07dd2c42 | ||
![]() |
adfb433f57 | ||
![]() |
198af54d1e | ||
![]() |
c3e63a5669 | ||
![]() |
8f27958e20 | ||
![]() |
6fad7d14e1 | ||
![]() |
f7317134e3 | ||
![]() |
9d8db27701 | ||
![]() |
7da3a34304 | ||
![]() |
d413e0dcb9 | ||
![]() |
542ab0411c | ||
![]() |
999789f7ce | ||
![]() |
de105f8cb7 | ||
![]() |
b37b0ff744 | ||
![]() |
db330ab58a | ||
![]() |
4a00caa2e8 |
69
.github/ISSUE_TEMPLATE.md
vendored
69
.github/ISSUE_TEMPLATE.md
vendored
@ -1,69 +0,0 @@
|
||||
---
|
||||
name: Report a bug with the Supervisor on a supported System
|
||||
about: Report an issue related to the Home Assistant Supervisor.
|
||||
labels: bug
|
||||
---
|
||||
|
||||
<!-- READ THIS FIRST:
|
||||
- If you need additional help with this template please refer to https://www.home-assistant.io/help/reporting_issues/
|
||||
- This is for bugs only. Feature and enhancement requests should go in our community forum: https://community.home-assistant.io/c/feature-requests
|
||||
- Provide as many details as possible. Paste logs, configuration sample and code into the backticks. Do not delete any text from this template!
|
||||
- If you have a problem with an add-on, make an issue in it's repository.
|
||||
-->
|
||||
|
||||
<!--
|
||||
Important: You can only fill a bug repport for an supported system! If you run an unsupported installation. This report would be closed without comment.
|
||||
-->
|
||||
|
||||
### Describe the issue
|
||||
|
||||
<!-- Provide as many details as possible. -->
|
||||
|
||||
### Steps to reproduce
|
||||
|
||||
<!-- What do you do to encounter the issue. -->
|
||||
|
||||
1. ...
|
||||
2. ...
|
||||
3. ...
|
||||
|
||||
### Enviroment details
|
||||
|
||||
<!-- You can find these details in the system tab of the supervisor panel, or by using the `ha` CLI. -->
|
||||
|
||||
- **Operating System:**: xxx
|
||||
- **Supervisor version:**: xxx
|
||||
- **Home Assistant version**: xxx
|
||||
|
||||
### Supervisor logs
|
||||
|
||||
<details>
|
||||
<summary>Supervisor logs</summary>
|
||||
<!--
|
||||
- Frontend -> Supervisor -> System
|
||||
- Or use this command: ha supervisor logs
|
||||
- Logs are more than just errors, even if you don't think it's important, it is.
|
||||
-->
|
||||
|
||||
```
|
||||
Paste supervisor logs here
|
||||
|
||||
```
|
||||
|
||||
</details>
|
||||
|
||||
### System Information
|
||||
|
||||
<details>
|
||||
<summary>System Information</summary>
|
||||
<!--
|
||||
- Use this command: ha info
|
||||
-->
|
||||
|
||||
```
|
||||
Paste system info here
|
||||
|
||||
```
|
||||
|
||||
</details>
|
||||
|
9
.github/ISSUE_TEMPLATE/bug_report.yml
vendored
9
.github/ISSUE_TEMPLATE/bug_report.yml
vendored
@ -1,6 +1,5 @@
|
||||
name: Bug Report Form
|
||||
name: Report an issue with Home Assistant Supervisor
|
||||
description: Report an issue related to the Home Assistant Supervisor.
|
||||
labels: bug
|
||||
body:
|
||||
- type: markdown
|
||||
attributes:
|
||||
@ -9,7 +8,7 @@ body:
|
||||
|
||||
If you have a feature or enhancement request, please use the [feature request][fr] section of our [Community Forum][fr].
|
||||
|
||||
[fr]: https://community.home-assistant.io/c/feature-requests
|
||||
[fr]: https://github.com/orgs/home-assistant/discussions
|
||||
- type: textarea
|
||||
validations:
|
||||
required: true
|
||||
@ -76,7 +75,7 @@ body:
|
||||
description: >
|
||||
The System information can be found in [Settings -> System -> Repairs -> (three dot menu) -> System Information](https://my.home-assistant.io/redirect/system_health/).
|
||||
Click the copy button at the bottom of the pop-up and paste it here.
|
||||
|
||||
|
||||
[](https://my.home-assistant.io/redirect/system_health/)
|
||||
- type: textarea
|
||||
attributes:
|
||||
@ -86,7 +85,7 @@ body:
|
||||
Supervisor diagnostics can be found in [Settings -> Devices & services](https://my.home-assistant.io/redirect/integrations/).
|
||||
Find the card that says `Home Assistant Supervisor`, open it, and select the three dot menu of the Supervisor integration entry
|
||||
and select 'Download diagnostics'.
|
||||
|
||||
|
||||
**Please drag-and-drop the downloaded file into the textbox below. Do not copy and paste its contents.**
|
||||
- type: textarea
|
||||
attributes:
|
||||
|
2
.github/ISSUE_TEMPLATE/config.yml
vendored
2
.github/ISSUE_TEMPLATE/config.yml
vendored
@ -13,7 +13,7 @@ contact_links:
|
||||
about: Our documentation has its own issue tracker. Please report issues with the website there.
|
||||
|
||||
- name: Request a feature for the Supervisor
|
||||
url: https://community.home-assistant.io/c/feature-requests
|
||||
url: https://github.com/orgs/home-assistant/discussions
|
||||
about: Request an new feature for the Supervisor.
|
||||
|
||||
- name: I have a question or need support
|
||||
|
53
.github/ISSUE_TEMPLATE/task.yml
vendored
Normal file
53
.github/ISSUE_TEMPLATE/task.yml
vendored
Normal file
@ -0,0 +1,53 @@
|
||||
name: Task
|
||||
description: For staff only - Create a task
|
||||
type: Task
|
||||
body:
|
||||
- type: markdown
|
||||
attributes:
|
||||
value: |
|
||||
## ⚠️ RESTRICTED ACCESS
|
||||
|
||||
**This form is restricted to Open Home Foundation staff and authorized contributors only.**
|
||||
|
||||
If you are a community member wanting to contribute, please:
|
||||
- For bug reports: Use the [bug report form](https://github.com/home-assistant/supervisor/issues/new?template=bug_report.yml)
|
||||
- For feature requests: Submit to [Feature Requests](https://github.com/orgs/home-assistant/discussions)
|
||||
|
||||
---
|
||||
|
||||
### For authorized contributors
|
||||
|
||||
Use this form to create tasks for development work, improvements, or other actionable items that need to be tracked.
|
||||
- type: textarea
|
||||
id: description
|
||||
attributes:
|
||||
label: Description
|
||||
description: |
|
||||
Provide a clear and detailed description of the task that needs to be accomplished.
|
||||
|
||||
Be specific about what needs to be done, why it's important, and any constraints or requirements.
|
||||
placeholder: |
|
||||
Describe the task, including:
|
||||
- What needs to be done
|
||||
- Why this task is needed
|
||||
- Expected outcome
|
||||
- Any constraints or requirements
|
||||
validations:
|
||||
required: true
|
||||
- type: textarea
|
||||
id: additional_context
|
||||
attributes:
|
||||
label: Additional context
|
||||
description: |
|
||||
Any additional information, links, research, or context that would be helpful.
|
||||
|
||||
Include links to related issues, research, prototypes, roadmap opportunities etc.
|
||||
placeholder: |
|
||||
- Roadmap opportunity: [link]
|
||||
- Epic: [link]
|
||||
- Feature request: [link]
|
||||
- Technical design documents: [link]
|
||||
- Prototype/mockup: [link]
|
||||
- Dependencies: [links]
|
||||
validations:
|
||||
required: false
|
288
.github/copilot-instructions.md
vendored
Normal file
288
.github/copilot-instructions.md
vendored
Normal file
@ -0,0 +1,288 @@
|
||||
# GitHub Copilot & Claude Code Instructions
|
||||
|
||||
This repository contains the Home Assistant Supervisor, a Python 3 based container
|
||||
orchestration and management system for Home Assistant.
|
||||
|
||||
## Supervisor Capabilities & Features
|
||||
|
||||
### Architecture Overview
|
||||
|
||||
Home Assistant Supervisor is a Python-based container orchestration system that
|
||||
communicates with the Docker daemon to manage containerized components. It is tightly
|
||||
integrated with the underlying Operating System and core Operating System components
|
||||
through D-Bus.
|
||||
|
||||
**Managed Components:**
|
||||
- **Home Assistant Core**: The main home automation application running in its own
|
||||
container (also provides the web interface)
|
||||
- **Add-ons**: Third-party applications and services (each add-on runs in its own
|
||||
container)
|
||||
- **Plugins**: Built-in system services like DNS, Audio, CLI, Multicast, and Observer
|
||||
- **Host System Integration**: OS-level operations and hardware access via D-Bus
|
||||
- **Container Networking**: Internal Docker network management and external
|
||||
connectivity
|
||||
- **Storage & Backup**: Data persistence and backup management across all containers
|
||||
|
||||
**Key Dependencies:**
|
||||
- **Docker Engine**: Required for all container operations
|
||||
- **D-Bus**: System-level communication with the host OS
|
||||
- **systemd**: Service management for host system operations
|
||||
- **NetworkManager**: Network configuration and management
|
||||
|
||||
### Add-on System
|
||||
|
||||
**Add-on Architecture**: Add-ons are containerized applications available through
|
||||
add-on stores. Each store contains multiple add-ons, and each add-on includes metadata
|
||||
that tells Supervisor the version, startup configuration (permissions), and available
|
||||
user configurable options. Add-on metadata typically references a container image that
|
||||
Supervisor fetches during installation. If not, the Supervisor builds the container
|
||||
image from a Dockerfile.
|
||||
|
||||
**Built-in Stores**: Supervisor comes with several pre-configured stores:
|
||||
- **Core Add-ons**: Official add-ons maintained by the Home Assistant team
|
||||
- **Community Add-ons**: Popular third-party add-ons repository
|
||||
- **ESPHome**: Add-ons for ESPHome ecosystem integration
|
||||
- **Music Assistant**: Audio and music-related add-ons
|
||||
- **Local Development**: Local folder for testing custom add-ons during development
|
||||
|
||||
**Store Management**: Stores are Git-based repositories that are periodically updated.
|
||||
When updates are available, users receive notifications.
|
||||
|
||||
**Add-on Lifecycle**:
|
||||
- **Installation**: Supervisor fetches or builds container images based on add-on
|
||||
metadata
|
||||
- **Configuration**: Schema-validated options with integrated UI management
|
||||
- **Runtime**: Full container lifecycle management, health monitoring
|
||||
- **Updates**: Automatic or manual version management
|
||||
|
||||
### Update System
|
||||
|
||||
**Core Components**: Supervisor, Home Assistant Core, HAOS, and built-in plugins
|
||||
receive version information from a central JSON file fetched from
|
||||
`https://version.home-assistant.io/{channel}.json`. The `Updater` class handles
|
||||
fetching this data, validating signatures, and updating internal version tracking.
|
||||
|
||||
**Update Channels**: Three channels (`stable`/`beta`/`dev`) determine which version
|
||||
JSON file is fetched, allowing users to opt into different release streams.
|
||||
|
||||
**Add-on Updates**: Add-on version information comes from store repository updates, not
|
||||
the central JSON file. When repositories are refreshed via the store system, add-ons
|
||||
compare their local versions against repository versions to determine update
|
||||
availability.
|
||||
|
||||
### Backup & Recovery System
|
||||
|
||||
**Backup Capabilities**:
|
||||
- **Full Backups**: Complete system state capture including all add-ons,
|
||||
configuration, and data
|
||||
- **Partial Backups**: Selective backup of specific components (Home Assistant,
|
||||
add-ons, folders)
|
||||
- **Encrypted Backups**: Optional backup encryption with user-provided passwords
|
||||
- **Multiple Storage Locations**: Local storage and remote backup destinations
|
||||
|
||||
**Recovery Features**:
|
||||
- **One-click Restore**: Simple restoration from backup files
|
||||
- **Selective Restore**: Choose specific components to restore
|
||||
- **Automatic Recovery**: Self-healing for common system issues
|
||||
|
||||
---
|
||||
|
||||
## Supervisor Development
|
||||
|
||||
### Python Requirements
|
||||
|
||||
- **Compatibility**: Python 3.13+
|
||||
- **Language Features**: Use modern Python features:
|
||||
- Type hints with `typing` module
|
||||
- f-strings (preferred over `%` or `.format()`)
|
||||
- Dataclasses and enum classes
|
||||
- Async/await patterns
|
||||
- Pattern matching where appropriate
|
||||
|
||||
### Code Quality Standards
|
||||
|
||||
- **Formatting**: Ruff
|
||||
- **Linting**: PyLint and Ruff
|
||||
- **Type Checking**: MyPy
|
||||
- **Testing**: pytest with asyncio support
|
||||
- **Language**: American English for all code, comments, and documentation
|
||||
|
||||
### Code Organization
|
||||
|
||||
**Core Structure**:
|
||||
```
|
||||
supervisor/
|
||||
├── __init__.py # Package initialization
|
||||
├── const.py # Constants and enums
|
||||
├── coresys.py # Core system management
|
||||
├── bootstrap.py # System initialization
|
||||
├── exceptions.py # Custom exception classes
|
||||
├── api/ # REST API endpoints
|
||||
├── addons/ # Add-on management
|
||||
├── backups/ # Backup system
|
||||
├── docker/ # Docker integration
|
||||
├── host/ # Host system interface
|
||||
├── homeassistant/ # Home Assistant Core management
|
||||
├── dbus/ # D-Bus system integration
|
||||
├── hardware/ # Hardware detection and management
|
||||
├── plugins/ # Plugin system
|
||||
├── resolution/ # Issue detection and resolution
|
||||
├── security/ # Security management
|
||||
├── services/ # Service discovery and management
|
||||
├── store/ # Add-on store management
|
||||
└── utils/ # Utility functions
|
||||
```
|
||||
|
||||
**Shared Constants**: Use constants from `supervisor/const.py` instead of hardcoding
|
||||
values. Define new constants following existing patterns and group related constants
|
||||
together.
|
||||
|
||||
### Supervisor Architecture Patterns
|
||||
|
||||
**CoreSysAttributes Inheritance Pattern**: Nearly all major classes in Supervisor
|
||||
inherit from `CoreSysAttributes`, providing access to the centralized system state
|
||||
via `self.coresys` and convenient `sys_*` properties.
|
||||
|
||||
```python
|
||||
# Standard Supervisor class pattern
|
||||
class MyManager(CoreSysAttributes):
|
||||
"""Manage my functionality."""
|
||||
|
||||
def __init__(self, coresys: CoreSys):
|
||||
"""Initialize manager."""
|
||||
self.coresys: CoreSys = coresys
|
||||
self._component: MyComponent = MyComponent(coresys)
|
||||
|
||||
@property
|
||||
def component(self) -> MyComponent:
|
||||
"""Return component handler."""
|
||||
return self._component
|
||||
|
||||
# Access system components via inherited properties
|
||||
async def do_something(self):
|
||||
await self.sys_docker.containers.get("my_container")
|
||||
self.sys_bus.fire_event(BusEvent.MY_EVENT, {"data": "value"})
|
||||
```
|
||||
|
||||
**Key Inherited Properties from CoreSysAttributes**:
|
||||
- `self.sys_docker` - Docker API access
|
||||
- `self.sys_run_in_executor()` - Execute blocking operations
|
||||
- `self.sys_create_task()` - Create async tasks
|
||||
- `self.sys_bus` - Event bus for system events
|
||||
- `self.sys_config` - System configuration
|
||||
- `self.sys_homeassistant` - Home Assistant Core management
|
||||
- `self.sys_addons` - Add-on management
|
||||
- `self.sys_host` - Host system access
|
||||
- `self.sys_dbus` - D-Bus system interface
|
||||
|
||||
**Load Pattern**: Many components implement a `load()` method which effectively
|
||||
initialize the component from external sources (containers, files, D-Bus services).
|
||||
|
||||
### API Development
|
||||
|
||||
**REST API Structure**:
|
||||
- **Base Path**: `/api/` for all endpoints
|
||||
- **Authentication**: Bearer token authentication
|
||||
- **Consistent Response Format**: `{"result": "ok", "data": {...}}` or
|
||||
`{"result": "error", "message": "..."}`
|
||||
- **Validation**: Use voluptuous schemas with `api_validate()`
|
||||
|
||||
**Use `@api_process` Decorator**: This decorator handles all standard error handling
|
||||
and response formatting automatically. The decorator catches `APIError`, `HassioError`,
|
||||
and other exceptions, returning appropriate HTTP responses.
|
||||
|
||||
```python
|
||||
from ..api.utils import api_process, api_validate
|
||||
|
||||
@api_process
|
||||
async def backup_full(self, request: web.Request) -> dict[str, Any]:
|
||||
"""Create full backup."""
|
||||
body = await api_validate(SCHEMA_BACKUP_FULL, request)
|
||||
job = await self.sys_backups.do_backup_full(**body)
|
||||
return {ATTR_JOB_ID: job.uuid}
|
||||
```
|
||||
|
||||
### Docker Integration
|
||||
|
||||
- **Container Management**: Use Supervisor's Docker manager instead of direct
|
||||
Docker API
|
||||
- **Networking**: Supervisor manages internal Docker networks with predefined IP
|
||||
ranges
|
||||
- **Security**: AppArmor profiles, capability restrictions, and user namespace
|
||||
isolation
|
||||
- **Health Checks**: Implement health monitoring for all managed containers
|
||||
|
||||
### D-Bus Integration
|
||||
|
||||
- **Use dbus-fast**: Async D-Bus library for system integration
|
||||
- **Service Management**: systemd, NetworkManager, hostname management
|
||||
- **Error Handling**: Wrap D-Bus exceptions in Supervisor-specific exceptions
|
||||
|
||||
### Async Programming
|
||||
|
||||
- **All I/O operations must be async**: File operations, network calls, subprocess
|
||||
execution
|
||||
- **Use asyncio patterns**: Prefer `asyncio.gather()` over sequential awaits
|
||||
- **Executor jobs**: Use `self.sys_run_in_executor()` for blocking operations
|
||||
- **Two-phase initialization**: `__init__` for sync setup, `post_init()` for async
|
||||
initialization
|
||||
|
||||
### Testing
|
||||
|
||||
- **Location**: `tests/` directory with module mirroring
|
||||
- **Fixtures**: Extensive use of pytest fixtures for CoreSys setup
|
||||
- **Mocking**: Mock external dependencies (Docker, D-Bus, network calls)
|
||||
- **Coverage**: Minimum 90% test coverage, 100% for security-sensitive code
|
||||
|
||||
### Error Handling
|
||||
|
||||
- **Custom Exceptions**: Defined in `exceptions.py` with clear inheritance hierarchy
|
||||
- **Error Propagation**: Use `from` clause for exception chaining
|
||||
- **API Errors**: Use `APIError` with appropriate HTTP status codes
|
||||
|
||||
### Security Considerations
|
||||
|
||||
- **Container Security**: AppArmor profiles mandatory for add-ons, minimal
|
||||
capabilities
|
||||
- **Authentication**: Token-based API authentication with role-based access
|
||||
- **Data Protection**: Backup encryption, secure secret management, comprehensive
|
||||
input validation
|
||||
|
||||
### Development Commands
|
||||
|
||||
```bash
|
||||
# Run tests, adjust paths as necessary
|
||||
pytest -qsx tests/
|
||||
|
||||
# Linting and formatting
|
||||
ruff check supervisor/
|
||||
ruff format supervisor/
|
||||
|
||||
# Type checking
|
||||
mypy --ignore-missing-imports supervisor/
|
||||
|
||||
# Pre-commit hooks
|
||||
pre-commit run --all-files
|
||||
```
|
||||
|
||||
Always run the pre-commit hooks at the end of code editing.
|
||||
|
||||
### Common Patterns to Follow
|
||||
|
||||
**✅ Use These Patterns**:
|
||||
- Inherit from `CoreSysAttributes` for system access
|
||||
- Use `@api_process` decorator for API endpoints
|
||||
- Use `self.sys_run_in_executor()` for blocking operations
|
||||
- Access Docker via `self.sys_docker` not direct Docker API
|
||||
- Use constants from `const.py` instead of hardcoding
|
||||
- Store types in (per-module) `const.py` (e.g. supervisor/store/const.py)
|
||||
|
||||
**❌ Avoid These Patterns**:
|
||||
- Direct Docker API usage - use Supervisor's Docker manager
|
||||
- Blocking operations in async context (use asyncio alternatives)
|
||||
- Hardcoded values - use constants from `const.py`
|
||||
- Manual error handling in API endpoints - let `@api_process` handle it
|
||||
|
||||
This guide provides the foundation for contributing to Home Assistant Supervisor.
|
||||
Follow these patterns and guidelines to ensure code quality, security, and
|
||||
maintainability.
|
6
.github/workflows/builder.yml
vendored
6
.github/workflows/builder.yml
vendored
@ -125,15 +125,15 @@ jobs:
|
||||
|
||||
- name: Set up Python ${{ env.DEFAULT_PYTHON }}
|
||||
if: needs.init.outputs.publish == 'true'
|
||||
uses: actions/setup-python@v5.5.0
|
||||
uses: actions/setup-python@v5.6.0
|
||||
with:
|
||||
python-version: ${{ env.DEFAULT_PYTHON }}
|
||||
|
||||
- name: Install Cosign
|
||||
if: needs.init.outputs.publish == 'true'
|
||||
uses: sigstore/cosign-installer@v3.8.1
|
||||
uses: sigstore/cosign-installer@v3.9.2
|
||||
with:
|
||||
cosign-release: "v2.4.0"
|
||||
cosign-release: "v2.4.3"
|
||||
|
||||
- name: Install dirhash and calc hash
|
||||
if: needs.init.outputs.publish == 'true'
|
||||
|
71
.github/workflows/ci.yaml
vendored
71
.github/workflows/ci.yaml
vendored
@ -10,6 +10,7 @@ on:
|
||||
env:
|
||||
DEFAULT_PYTHON: "3.13"
|
||||
PRE_COMMIT_CACHE: ~/.cache/pre-commit
|
||||
MYPY_CACHE_VERSION: 1
|
||||
|
||||
concurrency:
|
||||
group: "${{ github.workflow }}-${{ github.ref }}"
|
||||
@ -28,7 +29,7 @@ jobs:
|
||||
uses: actions/checkout@v4.2.2
|
||||
- name: Set up Python
|
||||
id: python
|
||||
uses: actions/setup-python@v5.5.0
|
||||
uses: actions/setup-python@v5.6.0
|
||||
with:
|
||||
python-version: ${{ env.DEFAULT_PYTHON }}
|
||||
- name: Restore Python virtual environment
|
||||
@ -69,7 +70,7 @@ jobs:
|
||||
- name: Check out code from GitHub
|
||||
uses: actions/checkout@v4.2.2
|
||||
- name: Set up Python ${{ needs.prepare.outputs.python-version }}
|
||||
uses: actions/setup-python@v5.5.0
|
||||
uses: actions/setup-python@v5.6.0
|
||||
id: python
|
||||
with:
|
||||
python-version: ${{ needs.prepare.outputs.python-version }}
|
||||
@ -112,7 +113,7 @@ jobs:
|
||||
- name: Check out code from GitHub
|
||||
uses: actions/checkout@v4.2.2
|
||||
- name: Set up Python ${{ needs.prepare.outputs.python-version }}
|
||||
uses: actions/setup-python@v5.5.0
|
||||
uses: actions/setup-python@v5.6.0
|
||||
id: python
|
||||
with:
|
||||
python-version: ${{ needs.prepare.outputs.python-version }}
|
||||
@ -170,7 +171,7 @@ jobs:
|
||||
- name: Check out code from GitHub
|
||||
uses: actions/checkout@v4.2.2
|
||||
- name: Set up Python ${{ needs.prepare.outputs.python-version }}
|
||||
uses: actions/setup-python@v5.5.0
|
||||
uses: actions/setup-python@v5.6.0
|
||||
id: python
|
||||
with:
|
||||
python-version: ${{ needs.prepare.outputs.python-version }}
|
||||
@ -214,7 +215,7 @@ jobs:
|
||||
- name: Check out code from GitHub
|
||||
uses: actions/checkout@v4.2.2
|
||||
- name: Set up Python ${{ needs.prepare.outputs.python-version }}
|
||||
uses: actions/setup-python@v5.5.0
|
||||
uses: actions/setup-python@v5.6.0
|
||||
id: python
|
||||
with:
|
||||
python-version: ${{ needs.prepare.outputs.python-version }}
|
||||
@ -258,7 +259,7 @@ jobs:
|
||||
- name: Check out code from GitHub
|
||||
uses: actions/checkout@v4.2.2
|
||||
- name: Set up Python ${{ needs.prepare.outputs.python-version }}
|
||||
uses: actions/setup-python@v5.5.0
|
||||
uses: actions/setup-python@v5.6.0
|
||||
id: python
|
||||
with:
|
||||
python-version: ${{ needs.prepare.outputs.python-version }}
|
||||
@ -286,6 +287,52 @@ jobs:
|
||||
. venv/bin/activate
|
||||
pylint supervisor tests
|
||||
|
||||
mypy:
|
||||
name: Check mypy
|
||||
runs-on: ubuntu-latest
|
||||
needs: prepare
|
||||
steps:
|
||||
- name: Check out code from GitHub
|
||||
uses: actions/checkout@v4.2.2
|
||||
- name: Set up Python ${{ needs.prepare.outputs.python-version }}
|
||||
uses: actions/setup-python@v5.6.0
|
||||
id: python
|
||||
with:
|
||||
python-version: ${{ needs.prepare.outputs.python-version }}
|
||||
- name: Generate partial mypy restore key
|
||||
id: generate-mypy-key
|
||||
run: |
|
||||
mypy_version=$(cat requirements_test.txt | grep mypy | cut -d '=' -f 3)
|
||||
echo "version=$mypy_version" >> $GITHUB_OUTPUT
|
||||
echo "key=mypy-${{ env.MYPY_CACHE_VERSION }}-$mypy_version-$(date -u '+%Y-%m-%dT%H:%M:%s')" >> $GITHUB_OUTPUT
|
||||
- name: Restore Python virtual environment
|
||||
id: cache-venv
|
||||
uses: actions/cache@v4.2.3
|
||||
with:
|
||||
path: venv
|
||||
key: >-
|
||||
${{ runner.os }}-venv-${{ needs.prepare.outputs.python-version }}-${{ hashFiles('requirements.txt') }}-${{ hashFiles('requirements_tests.txt') }}
|
||||
- name: Fail job if Python cache restore failed
|
||||
if: steps.cache-venv.outputs.cache-hit != 'true'
|
||||
run: |
|
||||
echo "Failed to restore Python virtual environment from cache"
|
||||
exit 1
|
||||
- name: Restore mypy cache
|
||||
uses: actions/cache@v4.2.3
|
||||
with:
|
||||
path: .mypy_cache
|
||||
key: >-
|
||||
${{ runner.os }}-mypy-${{ needs.prepare.outputs.python-version }}-${{ steps.generate-mypy-key.outputs.key }}
|
||||
restore-keys: >-
|
||||
${{ runner.os }}-venv-${{ needs.prepare.outputs.python-version }}-mypy-${{ env.MYPY_CACHE_VERSION }}-${{ steps.generate-mypy-key.outputs.version }}
|
||||
- name: Register mypy problem matcher
|
||||
run: |
|
||||
echo "::add-matcher::.github/workflows/matchers/mypy.json"
|
||||
- name: Run mypy
|
||||
run: |
|
||||
. venv/bin/activate
|
||||
mypy --ignore-missing-imports supervisor
|
||||
|
||||
pytest:
|
||||
runs-on: ubuntu-latest
|
||||
needs: prepare
|
||||
@ -294,14 +341,14 @@ jobs:
|
||||
- name: Check out code from GitHub
|
||||
uses: actions/checkout@v4.2.2
|
||||
- name: Set up Python ${{ needs.prepare.outputs.python-version }}
|
||||
uses: actions/setup-python@v5.5.0
|
||||
uses: actions/setup-python@v5.6.0
|
||||
id: python
|
||||
with:
|
||||
python-version: ${{ needs.prepare.outputs.python-version }}
|
||||
- name: Install Cosign
|
||||
uses: sigstore/cosign-installer@v3.8.1
|
||||
uses: sigstore/cosign-installer@v3.9.2
|
||||
with:
|
||||
cosign-release: "v2.4.0"
|
||||
cosign-release: "v2.4.3"
|
||||
- name: Restore Python virtual environment
|
||||
id: cache-venv
|
||||
uses: actions/cache@v4.2.3
|
||||
@ -353,7 +400,7 @@ jobs:
|
||||
- name: Check out code from GitHub
|
||||
uses: actions/checkout@v4.2.2
|
||||
- name: Set up Python ${{ needs.prepare.outputs.python-version }}
|
||||
uses: actions/setup-python@v5.5.0
|
||||
uses: actions/setup-python@v5.6.0
|
||||
id: python
|
||||
with:
|
||||
python-version: ${{ needs.prepare.outputs.python-version }}
|
||||
@ -370,7 +417,7 @@ jobs:
|
||||
echo "Failed to restore Python virtual environment from cache"
|
||||
exit 1
|
||||
- name: Download all coverage artifacts
|
||||
uses: actions/download-artifact@v4.2.1
|
||||
uses: actions/download-artifact@v4.3.0
|
||||
- name: Combine coverage results
|
||||
run: |
|
||||
. venv/bin/activate
|
||||
@ -378,4 +425,4 @@ jobs:
|
||||
coverage report
|
||||
coverage xml
|
||||
- name: Upload coverage to Codecov
|
||||
uses: codecov/codecov-action@v5.4.0
|
||||
uses: codecov/codecov-action@v5.4.3
|
||||
|
16
.github/workflows/matchers/mypy.json
vendored
Normal file
16
.github/workflows/matchers/mypy.json
vendored
Normal file
@ -0,0 +1,16 @@
|
||||
{
|
||||
"problemMatcher": [
|
||||
{
|
||||
"owner": "mypy",
|
||||
"pattern": [
|
||||
{
|
||||
"regexp": "^(.+):(\\d+):\\s(error|warning):\\s(.+)$",
|
||||
"file": 1,
|
||||
"line": 2,
|
||||
"severity": 3,
|
||||
"message": 4
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
58
.github/workflows/restrict-task-creation.yml
vendored
Normal file
58
.github/workflows/restrict-task-creation.yml
vendored
Normal file
@ -0,0 +1,58 @@
|
||||
name: Restrict task creation
|
||||
|
||||
# yamllint disable-line rule:truthy
|
||||
on:
|
||||
issues:
|
||||
types: [opened]
|
||||
|
||||
jobs:
|
||||
check-authorization:
|
||||
runs-on: ubuntu-latest
|
||||
# Only run if this is a Task issue type (from the issue form)
|
||||
if: github.event.issue.issue_type == 'Task'
|
||||
steps:
|
||||
- name: Check if user is authorized
|
||||
uses: actions/github-script@v7
|
||||
with:
|
||||
script: |
|
||||
const issueAuthor = context.payload.issue.user.login;
|
||||
|
||||
// Check if user is an organization member
|
||||
try {
|
||||
await github.rest.orgs.checkMembershipForUser({
|
||||
org: 'home-assistant',
|
||||
username: issueAuthor
|
||||
});
|
||||
console.log(`✅ ${issueAuthor} is an organization member`);
|
||||
return; // Authorized
|
||||
} catch (error) {
|
||||
console.log(`❌ ${issueAuthor} is not authorized to create Task issues`);
|
||||
}
|
||||
|
||||
// Close the issue with a comment
|
||||
await github.rest.issues.createComment({
|
||||
owner: context.repo.owner,
|
||||
repo: context.repo.repo,
|
||||
issue_number: context.issue.number,
|
||||
body: `Hi @${issueAuthor}, thank you for your contribution!\n\n` +
|
||||
`Task issues are restricted to Open Home Foundation staff and authorized contributors.\n\n` +
|
||||
`If you would like to:\n` +
|
||||
`- Report a bug: Please use the [bug report form](https://github.com/home-assistant/supervisor/issues/new?template=bug_report.yml)\n` +
|
||||
`- Request a feature: Please submit to [Feature Requests](https://github.com/orgs/home-assistant/discussions)\n\n` +
|
||||
`If you believe you should have access to create Task issues, please contact the maintainers.`
|
||||
});
|
||||
|
||||
await github.rest.issues.update({
|
||||
owner: context.repo.owner,
|
||||
repo: context.repo.repo,
|
||||
issue_number: context.issue.number,
|
||||
state: 'closed'
|
||||
});
|
||||
|
||||
// Add a label to indicate this was auto-closed
|
||||
await github.rest.issues.addLabels({
|
||||
owner: context.repo.owner,
|
||||
repo: context.repo.repo,
|
||||
issue_number: context.issue.number,
|
||||
labels: ['auto-closed']
|
||||
});
|
2
.github/workflows/sentry.yaml
vendored
2
.github/workflows/sentry.yaml
vendored
@ -12,7 +12,7 @@ jobs:
|
||||
- name: Check out code from GitHub
|
||||
uses: actions/checkout@v4.2.2
|
||||
- name: Sentry Release
|
||||
uses: getsentry/action-release@v3.1.1
|
||||
uses: getsentry/action-release@v3.2.0
|
||||
env:
|
||||
SENTRY_AUTH_TOKEN: ${{ secrets.SENTRY_AUTH_TOKEN }}
|
||||
SENTRY_ORG: ${{ secrets.SENTRY_ORG }}
|
||||
|
@ -1,6 +1,6 @@
|
||||
repos:
|
||||
- repo: https://github.com/astral-sh/ruff-pre-commit
|
||||
rev: v0.9.1
|
||||
rev: v0.11.10
|
||||
hooks:
|
||||
- id: ruff
|
||||
args:
|
||||
@ -13,3 +13,15 @@ repos:
|
||||
- id: check-executables-have-shebangs
|
||||
stages: [manual]
|
||||
- id: check-json
|
||||
- repo: local
|
||||
hooks:
|
||||
# Run mypy through our wrapper script in order to get the possible
|
||||
# pyenv and/or virtualenv activated; it may not have been e.g. if
|
||||
# committing from a GUI tool that was not launched from an activated
|
||||
# shell.
|
||||
- id: mypy
|
||||
name: mypy
|
||||
entry: script/run-in-env.sh mypy --ignore-missing-imports
|
||||
language: script
|
||||
types_or: [python, pyi]
|
||||
files: ^supervisor/.+\.(py|pyi)$
|
||||
|
@ -29,7 +29,7 @@ RUN \
|
||||
\
|
||||
&& curl -Lso /usr/bin/cosign "https://github.com/home-assistant/cosign/releases/download/${COSIGN_VERSION}/cosign_${BUILD_ARCH}" \
|
||||
&& chmod a+x /usr/bin/cosign \
|
||||
&& pip3 install uv==0.6.1
|
||||
&& pip3 install uv==0.6.17
|
||||
|
||||
# Install requirements
|
||||
COPY requirements.txt .
|
||||
|
@ -12,7 +12,7 @@ cosign:
|
||||
base_identity: https://github.com/home-assistant/docker-base/.*
|
||||
identity: https://github.com/home-assistant/supervisor/.*
|
||||
args:
|
||||
COSIGN_VERSION: 2.4.0
|
||||
COSIGN_VERSION: 2.4.3
|
||||
labels:
|
||||
io.hass.type: supervisor
|
||||
org.opencontainers.image.title: Home Assistant Supervisor
|
||||
|
@ -1,5 +1,5 @@
|
||||
[build-system]
|
||||
requires = ["setuptools~=78.1.0", "wheel~=0.45.0"]
|
||||
requires = ["setuptools~=80.9.0", "wheel~=0.46.1"]
|
||||
build-backend = "setuptools.build_meta"
|
||||
|
||||
[project]
|
||||
@ -230,6 +230,9 @@ filterwarnings = [
|
||||
"ignore:pkg_resources is deprecated as an API:DeprecationWarning:dirhash",
|
||||
"ignore::pytest.PytestUnraisableExceptionWarning",
|
||||
]
|
||||
markers = [
|
||||
"no_mock_init_websession: disable the autouse mock of init_websession for this test",
|
||||
]
|
||||
|
||||
[tool.ruff]
|
||||
lint.select = [
|
||||
@ -272,7 +275,6 @@ lint.select = [
|
||||
"S317", # suspicious-xml-sax-usage
|
||||
"S318", # suspicious-xml-mini-dom-usage
|
||||
"S319", # suspicious-xml-pull-dom-usage
|
||||
"S320", # suspicious-xmle-tree-usage
|
||||
"S601", # paramiko-call
|
||||
"S602", # subprocess-popen-with-shell-equals-true
|
||||
"S604", # call-with-shell-equals-true
|
||||
|
@ -1,15 +1,15 @@
|
||||
aiodns==3.2.0
|
||||
aiohttp==3.11.16
|
||||
aiodns==3.5.0
|
||||
aiohttp==3.12.14
|
||||
atomicwrites-homeassistant==1.4.1
|
||||
attrs==25.3.0
|
||||
awesomeversion==24.6.0
|
||||
blockbuster==1.5.24
|
||||
awesomeversion==25.5.0
|
||||
blockbuster==1.5.25
|
||||
brotli==1.1.0
|
||||
ciso8601==2.3.2
|
||||
colorlog==6.9.0
|
||||
cpe==1.3.1
|
||||
cryptography==44.0.2
|
||||
debugpy==1.8.13
|
||||
cryptography==45.0.5
|
||||
debugpy==1.8.15
|
||||
deepmerge==2.0
|
||||
dirhash==0.5.0
|
||||
docker==7.1.0
|
||||
@ -17,15 +17,14 @@ faust-cchardet==2.1.19
|
||||
gitpython==3.1.44
|
||||
jinja2==3.1.6
|
||||
log-rate-limit==1.4.2
|
||||
orjson==3.10.16
|
||||
orjson==3.11.0
|
||||
pulsectl==24.12.0
|
||||
pyudev==0.24.3
|
||||
PyYAML==6.0.2
|
||||
requests==2.32.3
|
||||
requests==2.32.4
|
||||
securetar==2025.2.1
|
||||
sentry-sdk==2.25.1
|
||||
setuptools==78.1.0
|
||||
sentry-sdk==2.33.2
|
||||
setuptools==80.9.0
|
||||
voluptuous==0.15.2
|
||||
dbus-fast==2.44.1
|
||||
typing_extensions==4.13.1
|
||||
dbus-fast==2.44.2
|
||||
zlib-fast==0.2.1
|
||||
|
@ -1,13 +1,16 @@
|
||||
astroid==3.3.9
|
||||
coverage==7.8.0
|
||||
astroid==3.3.11
|
||||
coverage==7.9.2
|
||||
mypy==1.17.0
|
||||
pre-commit==4.2.0
|
||||
pylint==3.3.6
|
||||
pylint==3.3.7
|
||||
pytest-aiohttp==1.1.0
|
||||
pytest-asyncio==0.25.2
|
||||
pytest-cov==6.1.1
|
||||
pytest-timeout==2.3.1
|
||||
pytest==8.3.5
|
||||
ruff==0.11.4
|
||||
pytest-cov==6.2.1
|
||||
pytest-timeout==2.4.0
|
||||
pytest==8.4.1
|
||||
ruff==0.12.4
|
||||
time-machine==2.16.0
|
||||
typing_extensions==4.13.1
|
||||
urllib3==2.3.0
|
||||
types-docker==7.1.0.20250705
|
||||
types-pyyaml==6.0.12.20250516
|
||||
types-requests==2.32.4.20250611
|
||||
urllib3==2.5.0
|
||||
|
30
script/run-in-env.sh
Executable file
30
script/run-in-env.sh
Executable file
@ -0,0 +1,30 @@
|
||||
#!/usr/bin/env sh
|
||||
set -eu
|
||||
|
||||
# Used in venv activate script.
|
||||
# Would be an error if undefined.
|
||||
OSTYPE="${OSTYPE-}"
|
||||
|
||||
# Activate pyenv and virtualenv if present, then run the specified command
|
||||
|
||||
# pyenv, pyenv-virtualenv
|
||||
if [ -s .python-version ]; then
|
||||
PYENV_VERSION=$(head -n 1 .python-version)
|
||||
export PYENV_VERSION
|
||||
fi
|
||||
|
||||
if [ -n "${VIRTUAL_ENV-}" ] && [ -f "${VIRTUAL_ENV}/bin/activate" ]; then
|
||||
. "${VIRTUAL_ENV}/bin/activate"
|
||||
else
|
||||
# other common virtualenvs
|
||||
my_path=$(git rev-parse --show-toplevel)
|
||||
|
||||
for venv in venv .venv .; do
|
||||
if [ -f "${my_path}/${venv}/bin/activate" ]; then
|
||||
. "${my_path}/${venv}/bin/activate"
|
||||
break
|
||||
fi
|
||||
done
|
||||
fi
|
||||
|
||||
exec "$@"
|
@ -13,7 +13,7 @@ zlib_fast.enable()
|
||||
|
||||
# pylint: disable=wrong-import-position
|
||||
from supervisor import bootstrap # noqa: E402
|
||||
from supervisor.utils.blockbuster import activate_blockbuster # noqa: E402
|
||||
from supervisor.utils.blockbuster import BlockBusterManager # noqa: E402
|
||||
from supervisor.utils.logging import activate_log_queue_handler # noqa: E402
|
||||
|
||||
# pylint: enable=wrong-import-position
|
||||
@ -55,7 +55,7 @@ if __name__ == "__main__":
|
||||
coresys = loop.run_until_complete(bootstrap.initialize_coresys())
|
||||
loop.set_debug(coresys.config.debug)
|
||||
if coresys.config.detect_blocking_io:
|
||||
activate_blockbuster()
|
||||
BlockBusterManager.activate()
|
||||
loop.run_until_complete(coresys.core.connect())
|
||||
|
||||
loop.run_until_complete(bootstrap.supervisor_debugger(coresys))
|
||||
@ -66,8 +66,15 @@ if __name__ == "__main__":
|
||||
_LOGGER.info("Setting up Supervisor")
|
||||
loop.run_until_complete(coresys.core.setup())
|
||||
|
||||
loop.call_soon_threadsafe(loop.create_task, coresys.core.start())
|
||||
loop.call_soon_threadsafe(bootstrap.reg_signal, loop, coresys)
|
||||
bootstrap.register_signal_handlers(loop, coresys)
|
||||
|
||||
try:
|
||||
loop.run_until_complete(coresys.core.start())
|
||||
except Exception as err: # pylint: disable=broad-except
|
||||
# Supervisor itself is running at this point, just something didn't
|
||||
# start as expected. Log with traceback to get more insights for
|
||||
# such cases.
|
||||
_LOGGER.critical("Supervisor start failed: %s", err, exc_info=True)
|
||||
|
||||
try:
|
||||
_LOGGER.info("Running Supervisor")
|
||||
|
@ -33,8 +33,6 @@ from ..const import (
|
||||
ATTR_AUDIO_OUTPUT,
|
||||
ATTR_AUTO_UPDATE,
|
||||
ATTR_BOOT,
|
||||
ATTR_DATA,
|
||||
ATTR_EVENT,
|
||||
ATTR_IMAGE,
|
||||
ATTR_INGRESS_ENTRY,
|
||||
ATTR_INGRESS_PANEL,
|
||||
@ -50,7 +48,6 @@ from ..const import (
|
||||
ATTR_SYSTEM,
|
||||
ATTR_SYSTEM_MANAGED,
|
||||
ATTR_SYSTEM_MANAGED_CONFIG_ENTRY,
|
||||
ATTR_TYPE,
|
||||
ATTR_USER,
|
||||
ATTR_UUID,
|
||||
ATTR_VERSION,
|
||||
@ -79,7 +76,7 @@ from ..exceptions import (
|
||||
HostAppArmorError,
|
||||
)
|
||||
from ..hardware.data import Device
|
||||
from ..homeassistant.const import WSEvent, WSType
|
||||
from ..homeassistant.const import WSEvent
|
||||
from ..jobs.const import JobExecutionLimit
|
||||
from ..jobs.decorator import Job
|
||||
from ..resolution.const import ContextType, IssueType, UnhealthyReason
|
||||
@ -196,15 +193,12 @@ class Addon(AddonModel):
|
||||
):
|
||||
self.sys_resolution.dismiss_issue(self.device_access_missing_issue)
|
||||
|
||||
self.sys_homeassistant.websocket.send_message(
|
||||
self.sys_homeassistant.websocket.supervisor_event_custom(
|
||||
WSEvent.ADDON,
|
||||
{
|
||||
ATTR_TYPE: WSType.SUPERVISOR_EVENT,
|
||||
ATTR_DATA: {
|
||||
ATTR_EVENT: WSEvent.ADDON,
|
||||
ATTR_SLUG: self.slug,
|
||||
ATTR_STATE: new_state,
|
||||
},
|
||||
}
|
||||
ATTR_SLUG: self.slug,
|
||||
ATTR_STATE: new_state,
|
||||
},
|
||||
)
|
||||
|
||||
@property
|
||||
@ -366,7 +360,7 @@ class Addon(AddonModel):
|
||||
@property
|
||||
def auto_update(self) -> bool:
|
||||
"""Return if auto update is enable."""
|
||||
return self.persist.get(ATTR_AUTO_UPDATE, super().auto_update)
|
||||
return self.persist.get(ATTR_AUTO_UPDATE, False)
|
||||
|
||||
@auto_update.setter
|
||||
def auto_update(self, value: bool) -> None:
|
||||
@ -852,9 +846,10 @@ class Addon(AddonModel):
|
||||
await self.sys_ingress.update_hass_panel(self)
|
||||
|
||||
# Cleanup Ingress dynamic port assignment
|
||||
need_ingress_token_cleanup = False
|
||||
if self.with_ingress:
|
||||
need_ingress_token_cleanup = True
|
||||
await self.sys_ingress.del_dynamic_port(self.slug)
|
||||
self.sys_create_task(self.sys_ingress.reload())
|
||||
|
||||
# Cleanup discovery data
|
||||
for message in self.sys_discovery.list_messages:
|
||||
@ -869,8 +864,12 @@ class Addon(AddonModel):
|
||||
await service.del_service_data(self)
|
||||
|
||||
# Remove from addon manager
|
||||
await self.sys_addons.data.uninstall(self)
|
||||
self.sys_addons.local.pop(self.slug)
|
||||
await self.sys_addons.data.uninstall(self)
|
||||
|
||||
# Cleanup Ingress tokens
|
||||
if need_ingress_token_cleanup:
|
||||
await self.sys_ingress.reload()
|
||||
|
||||
@Job(
|
||||
name="addon_update",
|
||||
@ -1323,8 +1322,8 @@ class Addon(AddonModel):
|
||||
arcname="data",
|
||||
)
|
||||
|
||||
# Backup config
|
||||
if addon_config_used:
|
||||
# Backup config (if used and existing, restore handles this gracefully)
|
||||
if addon_config_used and self.path_config.is_dir():
|
||||
atomic_contents_add(
|
||||
backup,
|
||||
self.path_config,
|
||||
@ -1360,9 +1359,7 @@ class Addon(AddonModel):
|
||||
)
|
||||
_LOGGER.info("Finish backup for addon %s", self.slug)
|
||||
except (tarfile.TarError, OSError, AddFileError) as err:
|
||||
raise AddonsError(
|
||||
f"Can't write tarfile {tar_file}: {err}", _LOGGER.error
|
||||
) from err
|
||||
raise AddonsError(f"Can't write tarfile: {err}", _LOGGER.error) from err
|
||||
finally:
|
||||
if was_running:
|
||||
wait_for_start = await self.end_backup()
|
||||
|
@ -15,6 +15,7 @@ from ..const import (
|
||||
ATTR_SQUASH,
|
||||
FILE_SUFFIX_CONFIGURATION,
|
||||
META_ADDON,
|
||||
SOCKET_DOCKER,
|
||||
)
|
||||
from ..coresys import CoreSys, CoreSysAttributes
|
||||
from ..docker.interface import MAP_ARCH
|
||||
@ -121,39 +122,64 @@ class AddonBuild(FileConfiguration, CoreSysAttributes):
|
||||
except HassioArchNotFound:
|
||||
return False
|
||||
|
||||
def get_docker_args(self, version: AwesomeVersion, image: str | None = None):
|
||||
"""Create a dict with Docker build arguments.
|
||||
def get_docker_args(
|
||||
self, version: AwesomeVersion, image_tag: str
|
||||
) -> dict[str, Any]:
|
||||
"""Create a dict with Docker run args."""
|
||||
dockerfile_path = self.get_dockerfile().relative_to(self.addon.path_location)
|
||||
|
||||
Must be run in executor.
|
||||
"""
|
||||
args: dict[str, Any] = {
|
||||
"path": str(self.addon.path_location),
|
||||
"tag": f"{image or self.addon.image}:{version!s}",
|
||||
"dockerfile": str(self.get_dockerfile()),
|
||||
"pull": True,
|
||||
"forcerm": not self.sys_dev,
|
||||
"squash": self.squash,
|
||||
"platform": MAP_ARCH[self.arch],
|
||||
"labels": {
|
||||
"io.hass.version": version,
|
||||
"io.hass.arch": self.arch,
|
||||
"io.hass.type": META_ADDON,
|
||||
"io.hass.name": self._fix_label("name"),
|
||||
"io.hass.description": self._fix_label("description"),
|
||||
**self.additional_labels,
|
||||
},
|
||||
"buildargs": {
|
||||
"BUILD_FROM": self.base_image,
|
||||
"BUILD_VERSION": version,
|
||||
"BUILD_ARCH": self.sys_arch.default,
|
||||
**self.additional_args,
|
||||
},
|
||||
build_cmd = [
|
||||
"docker",
|
||||
"buildx",
|
||||
"build",
|
||||
".",
|
||||
"--tag",
|
||||
image_tag,
|
||||
"--file",
|
||||
str(dockerfile_path),
|
||||
"--platform",
|
||||
MAP_ARCH[self.arch],
|
||||
"--pull",
|
||||
]
|
||||
|
||||
labels = {
|
||||
"io.hass.version": version,
|
||||
"io.hass.arch": self.arch,
|
||||
"io.hass.type": META_ADDON,
|
||||
"io.hass.name": self._fix_label("name"),
|
||||
"io.hass.description": self._fix_label("description"),
|
||||
**self.additional_labels,
|
||||
}
|
||||
|
||||
if self.addon.url:
|
||||
args["labels"]["io.hass.url"] = self.addon.url
|
||||
labels["io.hass.url"] = self.addon.url
|
||||
|
||||
return args
|
||||
for key, value in labels.items():
|
||||
build_cmd.extend(["--label", f"{key}={value}"])
|
||||
|
||||
build_args = {
|
||||
"BUILD_FROM": self.base_image,
|
||||
"BUILD_VERSION": version,
|
||||
"BUILD_ARCH": self.sys_arch.default,
|
||||
**self.additional_args,
|
||||
}
|
||||
|
||||
for key, value in build_args.items():
|
||||
build_cmd.extend(["--build-arg", f"{key}={value}"])
|
||||
|
||||
# The addon path will be mounted from the host system
|
||||
addon_extern_path = self.sys_config.local_to_extern_path(
|
||||
self.addon.path_location
|
||||
)
|
||||
|
||||
return {
|
||||
"command": build_cmd,
|
||||
"volumes": {
|
||||
SOCKET_DOCKER: {"bind": "/var/run/docker.sock", "mode": "rw"},
|
||||
addon_extern_path: {"bind": "/addon", "mode": "ro"},
|
||||
},
|
||||
"working_dir": "/addon",
|
||||
}
|
||||
|
||||
def _fix_label(self, label_name: str) -> str:
|
||||
"""Remove characters they are not supported."""
|
||||
|
@ -67,6 +67,10 @@ class AddonManager(CoreSysAttributes):
|
||||
return self.store.get(addon_slug)
|
||||
return None
|
||||
|
||||
def get_local_only(self, addon_slug: str) -> Addon | None:
|
||||
"""Return an installed add-on from slug."""
|
||||
return self.local.get(addon_slug)
|
||||
|
||||
def from_token(self, token: str) -> Addon | None:
|
||||
"""Return an add-on from Supervisor token."""
|
||||
for addon in self.installed:
|
||||
@ -262,7 +266,7 @@ class AddonManager(CoreSysAttributes):
|
||||
],
|
||||
on_condition=AddonsJobError,
|
||||
)
|
||||
async def rebuild(self, slug: str) -> asyncio.Task | None:
|
||||
async def rebuild(self, slug: str, *, force: bool = False) -> asyncio.Task | None:
|
||||
"""Perform a rebuild of local build add-on.
|
||||
|
||||
Returns a Task that completes when addon has state 'started' (see addon.start)
|
||||
@ -285,7 +289,7 @@ class AddonManager(CoreSysAttributes):
|
||||
raise AddonsError(
|
||||
"Version changed, use Update instead Rebuild", _LOGGER.error
|
||||
)
|
||||
if not addon.need_build:
|
||||
if not force and not addon.need_build:
|
||||
raise AddonsNotSupportedError(
|
||||
"Can't rebuild a image based add-on", _LOGGER.error
|
||||
)
|
||||
|
@ -664,12 +664,16 @@ class AddonModel(JobGroup, ABC):
|
||||
"""Validate if addon is available for current system."""
|
||||
return self._validate_availability(self.data, logger=_LOGGER.error)
|
||||
|
||||
def __eq__(self, other):
|
||||
"""Compaired add-on objects."""
|
||||
def __eq__(self, other: Any) -> bool:
|
||||
"""Compare add-on objects."""
|
||||
if not isinstance(other, AddonModel):
|
||||
return False
|
||||
return self.slug == other.slug
|
||||
|
||||
def __hash__(self) -> int:
|
||||
"""Hash for add-on objects."""
|
||||
return hash(self.slug)
|
||||
|
||||
def _validate_availability(
|
||||
self, config, *, logger: Callable[..., None] | None = None
|
||||
) -> None:
|
||||
|
@ -8,7 +8,7 @@ from typing import Any
|
||||
|
||||
from aiohttp import hdrs, web
|
||||
|
||||
from ..const import AddonState
|
||||
from ..const import SUPERVISOR_DOCKER_NAME, AddonState
|
||||
from ..coresys import CoreSys, CoreSysAttributes
|
||||
from ..exceptions import APIAddonNotInstalled, HostNotSupportedError
|
||||
from ..utils.sentry import async_capture_exception
|
||||
@ -345,6 +345,9 @@ class RestAPI(CoreSysAttributes):
|
||||
api_root.coresys = self.coresys
|
||||
|
||||
self.webapp.add_routes([web.get("/info", api_root.info)])
|
||||
self.webapp.add_routes([web.post("/reload_updates", api_root.reload_updates)])
|
||||
|
||||
# Discouraged
|
||||
self.webapp.add_routes([web.post("/refresh_updates", api_root.refresh_updates)])
|
||||
self.webapp.add_routes(
|
||||
[web.get("/available_updates", api_root.available_updates)]
|
||||
@ -423,7 +426,7 @@ class RestAPI(CoreSysAttributes):
|
||||
async def get_supervisor_logs(*args, **kwargs):
|
||||
try:
|
||||
return await self._api_host.advanced_logs_handler(
|
||||
*args, identifier="hassio_supervisor", **kwargs
|
||||
*args, identifier=SUPERVISOR_DOCKER_NAME, **kwargs
|
||||
)
|
||||
except Exception as err: # pylint: disable=broad-exception-caught
|
||||
# Supervisor logs are critical, so catch everything, log the exception
|
||||
@ -786,6 +789,7 @@ class RestAPI(CoreSysAttributes):
|
||||
self.webapp.add_routes(
|
||||
[
|
||||
web.get("/docker/info", api_docker.info),
|
||||
web.post("/docker/options", api_docker.options),
|
||||
web.get("/docker/registries", api_docker.registries),
|
||||
web.post("/docker/registries", api_docker.create_registry),
|
||||
web.delete("/docker/registries/{hostname}", api_docker.remove_registry),
|
||||
|
@ -36,6 +36,7 @@ from ..const import (
|
||||
ATTR_DNS,
|
||||
ATTR_DOCKER_API,
|
||||
ATTR_DOCUMENTATION,
|
||||
ATTR_FORCE,
|
||||
ATTR_FULL_ACCESS,
|
||||
ATTR_GPIO,
|
||||
ATTR_HASSIO_API,
|
||||
@ -139,6 +140,8 @@ SCHEMA_SECURITY = vol.Schema({vol.Optional(ATTR_PROTECTED): vol.Boolean()})
|
||||
SCHEMA_UNINSTALL = vol.Schema(
|
||||
{vol.Optional(ATTR_REMOVE_CONFIG, default=False): vol.Boolean()}
|
||||
)
|
||||
|
||||
SCHEMA_REBUILD = vol.Schema({vol.Optional(ATTR_FORCE, default=False): vol.Boolean()})
|
||||
# pylint: enable=no-value-for-parameter
|
||||
|
||||
|
||||
@ -461,7 +464,11 @@ class APIAddons(CoreSysAttributes):
|
||||
async def rebuild(self, request: web.Request) -> None:
|
||||
"""Rebuild local build add-on."""
|
||||
addon = self.get_addon_for_request(request)
|
||||
if start_task := await asyncio.shield(self.sys_addons.rebuild(addon.slug)):
|
||||
body: dict[str, Any] = await api_validate(SCHEMA_REBUILD, request)
|
||||
|
||||
if start_task := await asyncio.shield(
|
||||
self.sys_addons.rebuild(addon.slug, force=body[ATTR_FORCE])
|
||||
):
|
||||
await start_task
|
||||
|
||||
@api_process
|
||||
|
@ -3,18 +3,19 @@
|
||||
import asyncio
|
||||
from collections.abc import Awaitable
|
||||
import logging
|
||||
from typing import Any
|
||||
from typing import Any, cast
|
||||
|
||||
from aiohttp import BasicAuth, web
|
||||
from aiohttp.hdrs import AUTHORIZATION, CONTENT_TYPE, WWW_AUTHENTICATE
|
||||
from aiohttp.web import FileField
|
||||
from aiohttp.web_exceptions import HTTPUnauthorized
|
||||
from multidict import MultiDictProxy
|
||||
import voluptuous as vol
|
||||
|
||||
from ..addons.addon import Addon
|
||||
from ..const import ATTR_NAME, ATTR_PASSWORD, ATTR_USERNAME, REQUEST_FROM
|
||||
from ..coresys import CoreSysAttributes
|
||||
from ..exceptions import APIForbidden
|
||||
from ..utils.json import json_loads
|
||||
from .const import (
|
||||
ATTR_GROUP_IDS,
|
||||
ATTR_IS_ACTIVE,
|
||||
@ -24,7 +25,7 @@ from .const import (
|
||||
CONTENT_TYPE_JSON,
|
||||
CONTENT_TYPE_URL,
|
||||
)
|
||||
from .utils import api_process, api_validate
|
||||
from .utils import api_process, api_validate, json_loads
|
||||
|
||||
_LOGGER: logging.Logger = logging.getLogger(__name__)
|
||||
|
||||
@ -52,7 +53,10 @@ class APIAuth(CoreSysAttributes):
|
||||
return self.sys_auth.check_login(addon, auth.login, auth.password)
|
||||
|
||||
def _process_dict(
|
||||
self, request: web.Request, addon: Addon, data: dict[str, str]
|
||||
self,
|
||||
request: web.Request,
|
||||
addon: Addon,
|
||||
data: dict[str, Any] | MultiDictProxy[str | bytes | FileField],
|
||||
) -> Awaitable[bool]:
|
||||
"""Process login with dict data.
|
||||
|
||||
@ -61,14 +65,22 @@ class APIAuth(CoreSysAttributes):
|
||||
username = data.get("username") or data.get("user")
|
||||
password = data.get("password")
|
||||
|
||||
return self.sys_auth.check_login(addon, username, password)
|
||||
# Test that we did receive strings and not something else, raise if so
|
||||
try:
|
||||
_ = username.encode and password.encode # type: ignore
|
||||
except AttributeError:
|
||||
raise HTTPUnauthorized(headers=REALM_HEADER) from None
|
||||
|
||||
return self.sys_auth.check_login(
|
||||
addon, cast(str, username), cast(str, password)
|
||||
)
|
||||
|
||||
@api_process
|
||||
async def auth(self, request: web.Request) -> bool:
|
||||
"""Process login request."""
|
||||
addon = request[REQUEST_FROM]
|
||||
|
||||
if not addon.access_auth_api:
|
||||
if not isinstance(addon, Addon) or not addon.access_auth_api:
|
||||
raise APIForbidden("Can't use Home Assistant auth!")
|
||||
|
||||
# BasicAuth
|
||||
@ -80,13 +92,18 @@ class APIAuth(CoreSysAttributes):
|
||||
# Json
|
||||
if request.headers.get(CONTENT_TYPE) == CONTENT_TYPE_JSON:
|
||||
data = await request.json(loads=json_loads)
|
||||
return await self._process_dict(request, addon, data)
|
||||
if not await self._process_dict(request, addon, data):
|
||||
raise HTTPUnauthorized()
|
||||
return True
|
||||
|
||||
# URL encoded
|
||||
if request.headers.get(CONTENT_TYPE) == CONTENT_TYPE_URL:
|
||||
data = await request.post()
|
||||
return await self._process_dict(request, addon, data)
|
||||
if not await self._process_dict(request, addon, data):
|
||||
raise HTTPUnauthorized()
|
||||
return True
|
||||
|
||||
# Advertise Basic authentication by default
|
||||
raise HTTPUnauthorized(headers=REALM_HEADER)
|
||||
|
||||
@api_process
|
||||
|
@ -53,7 +53,6 @@ from ..coresys import CoreSysAttributes
|
||||
from ..exceptions import APIError, APIForbidden, APINotFound
|
||||
from ..jobs import JobSchedulerOptions, SupervisorJob
|
||||
from ..mounts.const import MountUsage
|
||||
from ..mounts.mount import Mount
|
||||
from ..resolution.const import UnhealthyReason
|
||||
from .const import (
|
||||
ATTR_ADDITIONAL_LOCATIONS,
|
||||
@ -495,7 +494,7 @@ class APIBackups(CoreSysAttributes):
|
||||
"""Upload a backup file."""
|
||||
location: LOCATION_TYPE = None
|
||||
locations: list[LOCATION_TYPE] | None = None
|
||||
tmp_path = self.sys_config.path_tmp
|
||||
|
||||
if ATTR_LOCATION in request.query:
|
||||
location_names: list[str] = request.query.getall(ATTR_LOCATION, [])
|
||||
self._validate_cloud_backup_location(
|
||||
@ -510,9 +509,6 @@ class APIBackups(CoreSysAttributes):
|
||||
]
|
||||
location = locations.pop(0)
|
||||
|
||||
if location and location != LOCATION_CLOUD_BACKUP:
|
||||
tmp_path = cast(Mount, location).local_where
|
||||
|
||||
filename: str | None = None
|
||||
if ATTR_FILENAME in request.query:
|
||||
filename = request.query.get(ATTR_FILENAME)
|
||||
@ -521,13 +517,14 @@ class APIBackups(CoreSysAttributes):
|
||||
except vol.Invalid as ex:
|
||||
raise APIError(humanize_error(filename, ex)) from None
|
||||
|
||||
tmp_path = await self.sys_backups.get_upload_path_for_location(location)
|
||||
temp_dir: TemporaryDirectory | None = None
|
||||
backup_file_stream: IOBase | None = None
|
||||
|
||||
def open_backup_file() -> Path:
|
||||
nonlocal temp_dir, backup_file_stream
|
||||
temp_dir = TemporaryDirectory(dir=tmp_path.as_posix())
|
||||
tar_file = Path(temp_dir.name, "backup.tar")
|
||||
tar_file = Path(temp_dir.name, "upload.tar")
|
||||
backup_file_stream = tar_file.open("wb")
|
||||
return tar_file
|
||||
|
||||
|
@ -87,4 +87,4 @@ class DetectBlockingIO(StrEnum):
|
||||
|
||||
OFF = "off"
|
||||
ON = "on"
|
||||
ON_AT_STARTUP = "on_at_startup"
|
||||
ON_AT_STARTUP = "on-at-startup"
|
||||
|
@ -1,7 +1,7 @@
|
||||
"""Init file for Supervisor network RESTful API."""
|
||||
|
||||
import logging
|
||||
from typing import Any, cast
|
||||
from typing import Any
|
||||
|
||||
from aiohttp import web
|
||||
import voluptuous as vol
|
||||
@ -56,8 +56,8 @@ class APIDiscovery(CoreSysAttributes):
|
||||
}
|
||||
for message in self.sys_discovery.list_messages
|
||||
if (
|
||||
discovered := cast(
|
||||
Addon, self.sys_addons.get(message.addon, local_only=True)
|
||||
discovered := self.sys_addons.get_local_only(
|
||||
message.addon,
|
||||
)
|
||||
)
|
||||
and discovered.state == AddonState.STARTED
|
||||
|
@ -7,6 +7,7 @@ from aiohttp import web
|
||||
import voluptuous as vol
|
||||
|
||||
from ..const import (
|
||||
ATTR_ENABLE_IPV6,
|
||||
ATTR_HOSTNAME,
|
||||
ATTR_LOGGING,
|
||||
ATTR_PASSWORD,
|
||||
@ -30,10 +31,39 @@ SCHEMA_DOCKER_REGISTRY = vol.Schema(
|
||||
}
|
||||
)
|
||||
|
||||
# pylint: disable=no-value-for-parameter
|
||||
SCHEMA_OPTIONS = vol.Schema({vol.Optional(ATTR_ENABLE_IPV6): vol.Boolean()})
|
||||
|
||||
|
||||
class APIDocker(CoreSysAttributes):
|
||||
"""Handle RESTful API for Docker configuration."""
|
||||
|
||||
@api_process
|
||||
async def info(self, request: web.Request):
|
||||
"""Get docker info."""
|
||||
data_registries = {}
|
||||
for hostname, registry in self.sys_docker.config.registries.items():
|
||||
data_registries[hostname] = {
|
||||
ATTR_USERNAME: registry[ATTR_USERNAME],
|
||||
}
|
||||
return {
|
||||
ATTR_VERSION: self.sys_docker.info.version,
|
||||
ATTR_ENABLE_IPV6: self.sys_docker.config.enable_ipv6,
|
||||
ATTR_STORAGE: self.sys_docker.info.storage,
|
||||
ATTR_LOGGING: self.sys_docker.info.logging,
|
||||
ATTR_REGISTRIES: data_registries,
|
||||
}
|
||||
|
||||
@api_process
|
||||
async def options(self, request: web.Request) -> None:
|
||||
"""Set docker options."""
|
||||
body = await api_validate(SCHEMA_OPTIONS, request)
|
||||
|
||||
if ATTR_ENABLE_IPV6 in body:
|
||||
self.sys_docker.config.enable_ipv6 = body[ATTR_ENABLE_IPV6]
|
||||
|
||||
await self.sys_docker.config.save_data()
|
||||
|
||||
@api_process
|
||||
async def registries(self, request) -> dict[str, Any]:
|
||||
"""Return the list of registries."""
|
||||
@ -64,18 +94,3 @@ class APIDocker(CoreSysAttributes):
|
||||
|
||||
del self.sys_docker.config.registries[hostname]
|
||||
await self.sys_docker.config.save_data()
|
||||
|
||||
@api_process
|
||||
async def info(self, request: web.Request):
|
||||
"""Get docker info."""
|
||||
data_registries = {}
|
||||
for hostname, registry in self.sys_docker.config.registries.items():
|
||||
data_registries[hostname] = {
|
||||
ATTR_USERNAME: registry[ATTR_USERNAME],
|
||||
}
|
||||
return {
|
||||
ATTR_VERSION: self.sys_docker.info.version,
|
||||
ATTR_STORAGE: self.sys_docker.info.storage,
|
||||
ATTR_LOGGING: self.sys_docker.info.logging,
|
||||
ATTR_REGISTRIES: data_registries,
|
||||
}
|
||||
|
@ -118,7 +118,7 @@ class APIHomeAssistant(CoreSysAttributes):
|
||||
body = await api_validate(SCHEMA_OPTIONS, request)
|
||||
|
||||
if ATTR_IMAGE in body:
|
||||
self.sys_homeassistant.image = body[ATTR_IMAGE]
|
||||
self.sys_homeassistant.set_image(body[ATTR_IMAGE])
|
||||
self.sys_homeassistant.override_image = (
|
||||
self.sys_homeassistant.image != self.sys_homeassistant.default_image
|
||||
)
|
||||
|
@ -5,7 +5,7 @@ from contextlib import suppress
|
||||
import logging
|
||||
from typing import Any
|
||||
|
||||
from aiohttp import ClientConnectionResetError, web
|
||||
from aiohttp import ClientConnectionResetError, ClientPayloadError, web
|
||||
from aiohttp.hdrs import ACCEPT, RANGE
|
||||
import voluptuous as vol
|
||||
from voluptuous.error import CoerceInvalid
|
||||
@ -37,6 +37,7 @@ from ..host.const import (
|
||||
LogFormat,
|
||||
LogFormatter,
|
||||
)
|
||||
from ..host.logs import SYSTEMD_JOURNAL_GATEWAYD_LINES_MAX
|
||||
from ..utils.systemd_journal import journal_logs_reader
|
||||
from .const import (
|
||||
ATTR_AGENT_VERSION,
|
||||
@ -238,13 +239,11 @@ class APIHost(CoreSysAttributes):
|
||||
# return 2 lines at minimum.
|
||||
lines = max(2, lines)
|
||||
# entries=cursor[[:num_skip]:num_entries]
|
||||
range_header = f"entries=:-{lines - 1}:{'' if follow else lines}"
|
||||
range_header = f"entries=:-{lines - 1}:{SYSTEMD_JOURNAL_GATEWAYD_LINES_MAX if follow else lines}"
|
||||
elif RANGE in request.headers:
|
||||
range_header = request.headers[RANGE]
|
||||
else:
|
||||
range_header = (
|
||||
f"entries=:-{DEFAULT_LINES - 1}:{'' if follow else DEFAULT_LINES}"
|
||||
)
|
||||
range_header = f"entries=:-{DEFAULT_LINES - 1}:{SYSTEMD_JOURNAL_GATEWAYD_LINES_MAX if follow else DEFAULT_LINES}"
|
||||
|
||||
async with self.sys_host.logs.journald_logs(
|
||||
params=params, range_header=range_header, accept=LogFormat.JOURNAL
|
||||
@ -270,7 +269,15 @@ class APIHost(CoreSysAttributes):
|
||||
err,
|
||||
)
|
||||
break
|
||||
except ConnectionResetError as ex:
|
||||
except ConnectionError as err:
|
||||
_LOGGER.warning(
|
||||
"%s raised when returning journal logs: %s",
|
||||
type(err).__name__,
|
||||
err,
|
||||
)
|
||||
break
|
||||
except (ConnectionResetError, ClientPayloadError) as ex:
|
||||
# ClientPayloadError is most likely caused by the closing the connection
|
||||
raise APIError(
|
||||
"Connection reset when trying to fetch data from systemd-journald."
|
||||
) from ex
|
||||
|
@ -309,9 +309,9 @@ class APIIngress(CoreSysAttributes):
|
||||
|
||||
def _init_header(
|
||||
request: web.Request, addon: Addon, session_data: IngressSessionData | None
|
||||
) -> CIMultiDict | dict[str, str]:
|
||||
) -> CIMultiDict[str]:
|
||||
"""Create initial header."""
|
||||
headers = {}
|
||||
headers = CIMultiDict[str]()
|
||||
|
||||
if session_data is not None:
|
||||
headers[HEADER_REMOTE_USER_ID] = session_data.user.id
|
||||
@ -337,7 +337,7 @@ def _init_header(
|
||||
istr(HEADER_REMOTE_USER_DISPLAY_NAME),
|
||||
):
|
||||
continue
|
||||
headers[name] = value
|
||||
headers.add(name, value)
|
||||
|
||||
# Update X-Forwarded-For
|
||||
if request.transport:
|
||||
@ -348,9 +348,9 @@ def _init_header(
|
||||
return headers
|
||||
|
||||
|
||||
def _response_header(response: aiohttp.ClientResponse) -> dict[str, str]:
|
||||
def _response_header(response: aiohttp.ClientResponse) -> CIMultiDict[str]:
|
||||
"""Create response header."""
|
||||
headers = {}
|
||||
headers = CIMultiDict[str]()
|
||||
|
||||
for name, value in response.headers.items():
|
||||
if name in (
|
||||
@ -360,7 +360,7 @@ def _response_header(response: aiohttp.ClientResponse) -> dict[str, str]:
|
||||
hdrs.CONTENT_ENCODING,
|
||||
):
|
||||
continue
|
||||
headers[name] = value
|
||||
headers.add(name, value)
|
||||
|
||||
return headers
|
||||
|
||||
|
@ -20,7 +20,7 @@ from ...const import (
|
||||
ROLE_DEFAULT,
|
||||
ROLE_HOMEASSISTANT,
|
||||
ROLE_MANAGER,
|
||||
CoreState,
|
||||
VALID_API_STATES,
|
||||
)
|
||||
from ...coresys import CoreSys, CoreSysAttributes
|
||||
from ...utils import version_is_new_enough
|
||||
@ -200,11 +200,7 @@ class SecurityMiddleware(CoreSysAttributes):
|
||||
@middleware
|
||||
async def system_validation(self, request: Request, handler: Callable) -> Response:
|
||||
"""Check if core is ready to response."""
|
||||
if self.sys_core.state not in (
|
||||
CoreState.STARTUP,
|
||||
CoreState.RUNNING,
|
||||
CoreState.FREEZE,
|
||||
):
|
||||
if self.sys_core.state not in VALID_API_STATES:
|
||||
return api_return_error(
|
||||
message=f"System is not ready with state: {self.sys_core.state}"
|
||||
)
|
||||
|
@ -10,6 +10,7 @@ import voluptuous as vol
|
||||
|
||||
from ..const import (
|
||||
ATTR_ACCESSPOINTS,
|
||||
ATTR_ADDR_GEN_MODE,
|
||||
ATTR_ADDRESS,
|
||||
ATTR_AUTH,
|
||||
ATTR_CONNECTED,
|
||||
@ -22,6 +23,7 @@ from ..const import (
|
||||
ATTR_ID,
|
||||
ATTR_INTERFACE,
|
||||
ATTR_INTERFACES,
|
||||
ATTR_IP6_PRIVACY,
|
||||
ATTR_IPV4,
|
||||
ATTR_IPV6,
|
||||
ATTR_MAC,
|
||||
@ -38,15 +40,18 @@ from ..const import (
|
||||
ATTR_TYPE,
|
||||
ATTR_VLAN,
|
||||
ATTR_WIFI,
|
||||
DOCKER_IPV4_NETWORK_MASK,
|
||||
DOCKER_NETWORK,
|
||||
DOCKER_NETWORK_MASK,
|
||||
)
|
||||
from ..coresys import CoreSysAttributes
|
||||
from ..exceptions import APIError, APINotFound, HostNetworkNotFound
|
||||
from ..host.configuration import (
|
||||
AccessPoint,
|
||||
Interface,
|
||||
InterfaceAddrGenMode,
|
||||
InterfaceIp6Privacy,
|
||||
InterfaceMethod,
|
||||
Ip6Setting,
|
||||
IpConfig,
|
||||
IpSetting,
|
||||
VlanConfig,
|
||||
@ -68,6 +73,8 @@ _SCHEMA_IPV6_CONFIG = vol.Schema(
|
||||
{
|
||||
vol.Optional(ATTR_ADDRESS): [vol.Coerce(IPv6Interface)],
|
||||
vol.Optional(ATTR_METHOD): vol.Coerce(InterfaceMethod),
|
||||
vol.Optional(ATTR_ADDR_GEN_MODE): vol.Coerce(InterfaceAddrGenMode),
|
||||
vol.Optional(ATTR_IP6_PRIVACY): vol.Coerce(InterfaceIp6Privacy),
|
||||
vol.Optional(ATTR_GATEWAY): vol.Coerce(IPv6Address),
|
||||
vol.Optional(ATTR_NAMESERVERS): [vol.Coerce(IPv6Address)],
|
||||
}
|
||||
@ -94,8 +101,8 @@ SCHEMA_UPDATE = vol.Schema(
|
||||
)
|
||||
|
||||
|
||||
def ipconfig_struct(config: IpConfig, setting: IpSetting) -> dict[str, Any]:
|
||||
"""Return a dict with information about ip configuration."""
|
||||
def ip4config_struct(config: IpConfig, setting: IpSetting) -> dict[str, Any]:
|
||||
"""Return a dict with information about IPv4 configuration."""
|
||||
return {
|
||||
ATTR_METHOD: setting.method,
|
||||
ATTR_ADDRESS: [address.with_prefixlen for address in config.address],
|
||||
@ -105,6 +112,19 @@ def ipconfig_struct(config: IpConfig, setting: IpSetting) -> dict[str, Any]:
|
||||
}
|
||||
|
||||
|
||||
def ip6config_struct(config: IpConfig, setting: Ip6Setting) -> dict[str, Any]:
|
||||
"""Return a dict with information about IPv6 configuration."""
|
||||
return {
|
||||
ATTR_METHOD: setting.method,
|
||||
ATTR_ADDR_GEN_MODE: setting.addr_gen_mode,
|
||||
ATTR_IP6_PRIVACY: setting.ip6_privacy,
|
||||
ATTR_ADDRESS: [address.with_prefixlen for address in config.address],
|
||||
ATTR_NAMESERVERS: [str(address) for address in config.nameservers],
|
||||
ATTR_GATEWAY: str(config.gateway) if config.gateway else None,
|
||||
ATTR_READY: config.ready,
|
||||
}
|
||||
|
||||
|
||||
def wifi_struct(config: WifiConfig) -> dict[str, Any]:
|
||||
"""Return a dict with information about wifi configuration."""
|
||||
return {
|
||||
@ -132,10 +152,10 @@ def interface_struct(interface: Interface) -> dict[str, Any]:
|
||||
ATTR_CONNECTED: interface.connected,
|
||||
ATTR_PRIMARY: interface.primary,
|
||||
ATTR_MAC: interface.mac,
|
||||
ATTR_IPV4: ipconfig_struct(interface.ipv4, interface.ipv4setting)
|
||||
ATTR_IPV4: ip4config_struct(interface.ipv4, interface.ipv4setting)
|
||||
if interface.ipv4 and interface.ipv4setting
|
||||
else None,
|
||||
ATTR_IPV6: ipconfig_struct(interface.ipv6, interface.ipv6setting)
|
||||
ATTR_IPV6: ip6config_struct(interface.ipv6, interface.ipv6setting)
|
||||
if interface.ipv6 and interface.ipv6setting
|
||||
else None,
|
||||
ATTR_WIFI: wifi_struct(interface.wifi) if interface.wifi else None,
|
||||
@ -183,7 +203,7 @@ class APINetwork(CoreSysAttributes):
|
||||
],
|
||||
ATTR_DOCKER: {
|
||||
ATTR_INTERFACE: DOCKER_NETWORK,
|
||||
ATTR_ADDRESS: str(DOCKER_NETWORK_MASK),
|
||||
ATTR_ADDRESS: str(DOCKER_IPV4_NETWORK_MASK),
|
||||
ATTR_GATEWAY: str(self.sys_docker.network.gateway),
|
||||
ATTR_DNS: str(self.sys_docker.network.dns),
|
||||
},
|
||||
@ -212,25 +232,31 @@ class APINetwork(CoreSysAttributes):
|
||||
for key, config in body.items():
|
||||
if key == ATTR_IPV4:
|
||||
interface.ipv4setting = IpSetting(
|
||||
config.get(ATTR_METHOD, InterfaceMethod.STATIC),
|
||||
config.get(ATTR_ADDRESS, []),
|
||||
config.get(ATTR_GATEWAY),
|
||||
config.get(ATTR_NAMESERVERS, []),
|
||||
method=config.get(ATTR_METHOD, InterfaceMethod.STATIC),
|
||||
address=config.get(ATTR_ADDRESS, []),
|
||||
gateway=config.get(ATTR_GATEWAY),
|
||||
nameservers=config.get(ATTR_NAMESERVERS, []),
|
||||
)
|
||||
elif key == ATTR_IPV6:
|
||||
interface.ipv6setting = IpSetting(
|
||||
config.get(ATTR_METHOD, InterfaceMethod.STATIC),
|
||||
config.get(ATTR_ADDRESS, []),
|
||||
config.get(ATTR_GATEWAY),
|
||||
config.get(ATTR_NAMESERVERS, []),
|
||||
interface.ipv6setting = Ip6Setting(
|
||||
method=config.get(ATTR_METHOD, InterfaceMethod.STATIC),
|
||||
addr_gen_mode=config.get(
|
||||
ATTR_ADDR_GEN_MODE, InterfaceAddrGenMode.DEFAULT
|
||||
),
|
||||
ip6_privacy=config.get(
|
||||
ATTR_IP6_PRIVACY, InterfaceIp6Privacy.DEFAULT
|
||||
),
|
||||
address=config.get(ATTR_ADDRESS, []),
|
||||
gateway=config.get(ATTR_GATEWAY),
|
||||
nameservers=config.get(ATTR_NAMESERVERS, []),
|
||||
)
|
||||
elif key == ATTR_WIFI:
|
||||
interface.wifi = WifiConfig(
|
||||
config.get(ATTR_MODE, WifiMode.INFRASTRUCTURE),
|
||||
config.get(ATTR_SSID, ""),
|
||||
config.get(ATTR_AUTH, AuthMethod.OPEN),
|
||||
config.get(ATTR_PSK, None),
|
||||
None,
|
||||
mode=config.get(ATTR_MODE, WifiMode.INFRASTRUCTURE),
|
||||
ssid=config.get(ATTR_SSID, ""),
|
||||
auth=config.get(ATTR_AUTH, AuthMethod.OPEN),
|
||||
psk=config.get(ATTR_PSK, None),
|
||||
signal=None,
|
||||
)
|
||||
elif key == ATTR_ENABLED:
|
||||
interface.enabled = config
|
||||
@ -277,19 +303,25 @@ class APINetwork(CoreSysAttributes):
|
||||
ipv4_setting = None
|
||||
if ATTR_IPV4 in body:
|
||||
ipv4_setting = IpSetting(
|
||||
body[ATTR_IPV4].get(ATTR_METHOD, InterfaceMethod.AUTO),
|
||||
body[ATTR_IPV4].get(ATTR_ADDRESS, []),
|
||||
body[ATTR_IPV4].get(ATTR_GATEWAY, None),
|
||||
body[ATTR_IPV4].get(ATTR_NAMESERVERS, []),
|
||||
method=body[ATTR_IPV4].get(ATTR_METHOD, InterfaceMethod.AUTO),
|
||||
address=body[ATTR_IPV4].get(ATTR_ADDRESS, []),
|
||||
gateway=body[ATTR_IPV4].get(ATTR_GATEWAY, None),
|
||||
nameservers=body[ATTR_IPV4].get(ATTR_NAMESERVERS, []),
|
||||
)
|
||||
|
||||
ipv6_setting = None
|
||||
if ATTR_IPV6 in body:
|
||||
ipv6_setting = IpSetting(
|
||||
body[ATTR_IPV6].get(ATTR_METHOD, InterfaceMethod.AUTO),
|
||||
body[ATTR_IPV6].get(ATTR_ADDRESS, []),
|
||||
body[ATTR_IPV6].get(ATTR_GATEWAY, None),
|
||||
body[ATTR_IPV6].get(ATTR_NAMESERVERS, []),
|
||||
ipv6_setting = Ip6Setting(
|
||||
method=body[ATTR_IPV6].get(ATTR_METHOD, InterfaceMethod.AUTO),
|
||||
addr_gen_mode=body[ATTR_IPV6].get(
|
||||
ATTR_ADDR_GEN_MODE, InterfaceAddrGenMode.DEFAULT
|
||||
),
|
||||
ip6_privacy=body[ATTR_IPV6].get(
|
||||
ATTR_IP6_PRIVACY, InterfaceIp6Privacy.DEFAULT
|
||||
),
|
||||
address=body[ATTR_IPV6].get(ATTR_ADDRESS, []),
|
||||
gateway=body[ATTR_IPV6].get(ATTR_GATEWAY, None),
|
||||
nameservers=body[ATTR_IPV6].get(ATTR_NAMESERVERS, []),
|
||||
)
|
||||
|
||||
vlan_interface = Interface(
|
||||
|
@ -17,6 +17,7 @@ from ..const import (
|
||||
ATTR_ICON,
|
||||
ATTR_LOGGING,
|
||||
ATTR_MACHINE,
|
||||
ATTR_MACHINE_ID,
|
||||
ATTR_NAME,
|
||||
ATTR_OPERATING_SYSTEM,
|
||||
ATTR_STATE,
|
||||
@ -48,6 +49,7 @@ class APIRoot(CoreSysAttributes):
|
||||
ATTR_OPERATING_SYSTEM: self.sys_host.info.operating_system,
|
||||
ATTR_FEATURES: self.sys_host.features,
|
||||
ATTR_MACHINE: self.sys_machine,
|
||||
ATTR_MACHINE_ID: self.sys_machine_id,
|
||||
ATTR_ARCH: self.sys_arch.default,
|
||||
ATTR_STATE: self.sys_core.state,
|
||||
ATTR_SUPPORTED_ARCH: self.sys_arch.supported,
|
||||
@ -113,3 +115,8 @@ class APIRoot(CoreSysAttributes):
|
||||
await asyncio.shield(
|
||||
asyncio.gather(self.sys_updater.reload(), self.sys_store.reload())
|
||||
)
|
||||
|
||||
@api_process
|
||||
async def reload_updates(self, request: web.Request) -> None:
|
||||
"""Refresh updater update information."""
|
||||
await self.sys_updater.reload()
|
||||
|
@ -126,9 +126,7 @@ class APIStore(CoreSysAttributes):
|
||||
"""Generate addon information."""
|
||||
|
||||
installed = (
|
||||
cast(Addon, self.sys_addons.get(addon.slug, local_only=True))
|
||||
if addon.is_installed
|
||||
else None
|
||||
self.sys_addons.get_local_only(addon.slug) if addon.is_installed else None
|
||||
)
|
||||
|
||||
data = {
|
||||
|
@ -17,6 +17,7 @@ from ..const import (
|
||||
ATTR_BLK_WRITE,
|
||||
ATTR_CHANNEL,
|
||||
ATTR_CONTENT_TRUST,
|
||||
ATTR_COUNTRY,
|
||||
ATTR_CPU_PERCENT,
|
||||
ATTR_DEBUG,
|
||||
ATTR_DEBUG_BLOCK,
|
||||
@ -48,11 +49,7 @@ from ..const import (
|
||||
from ..coresys import CoreSysAttributes
|
||||
from ..exceptions import APIError
|
||||
from ..store.validate import repositories
|
||||
from ..utils.blockbuster import (
|
||||
activate_blockbuster,
|
||||
blockbuster_enabled,
|
||||
deactivate_blockbuster,
|
||||
)
|
||||
from ..utils.blockbuster import BlockBusterManager
|
||||
from ..utils.sentry import close_sentry, init_sentry
|
||||
from ..utils.validate import validate_timezone
|
||||
from ..validate import version_tag, wait_boot
|
||||
@ -76,6 +73,7 @@ SCHEMA_OPTIONS = vol.Schema(
|
||||
vol.Optional(ATTR_FORCE_SECURITY): vol.Boolean(),
|
||||
vol.Optional(ATTR_AUTO_UPDATE): vol.Boolean(),
|
||||
vol.Optional(ATTR_DETECT_BLOCKING_IO): vol.Coerce(DetectBlockingIO),
|
||||
vol.Optional(ATTR_COUNTRY): str,
|
||||
}
|
||||
)
|
||||
|
||||
@ -108,7 +106,8 @@ class APISupervisor(CoreSysAttributes):
|
||||
ATTR_DEBUG_BLOCK: self.sys_config.debug_block,
|
||||
ATTR_DIAGNOSTICS: self.sys_config.diagnostics,
|
||||
ATTR_AUTO_UPDATE: self.sys_updater.auto_update,
|
||||
ATTR_DETECT_BLOCKING_IO: blockbuster_enabled(),
|
||||
ATTR_DETECT_BLOCKING_IO: BlockBusterManager.is_enabled(),
|
||||
ATTR_COUNTRY: self.sys_config.country,
|
||||
# Depricated
|
||||
ATTR_WAIT_BOOT: self.sys_config.wait_boot,
|
||||
ATTR_ADDONS: [
|
||||
@ -147,6 +146,9 @@ class APISupervisor(CoreSysAttributes):
|
||||
if ATTR_CHANNEL in body:
|
||||
self.sys_updater.channel = body[ATTR_CHANNEL]
|
||||
|
||||
if ATTR_COUNTRY in body:
|
||||
self.sys_config.country = body[ATTR_COUNTRY]
|
||||
|
||||
if ATTR_DEBUG in body:
|
||||
self.sys_config.debug = body[ATTR_DEBUG]
|
||||
|
||||
@ -174,10 +176,10 @@ class APISupervisor(CoreSysAttributes):
|
||||
detect_blocking_io = DetectBlockingIO.ON
|
||||
|
||||
if detect_blocking_io == DetectBlockingIO.ON:
|
||||
activate_blockbuster()
|
||||
BlockBusterManager.activate()
|
||||
elif detect_blocking_io == DetectBlockingIO.OFF:
|
||||
self.sys_config.detect_blocking_io = False
|
||||
deactivate_blockbuster()
|
||||
BlockBusterManager.deactivate()
|
||||
|
||||
# Deprecated
|
||||
if ATTR_WAIT_BOOT in body:
|
||||
|
@ -40,7 +40,7 @@ class CpuArch(CoreSysAttributes):
|
||||
@property
|
||||
def supervisor(self) -> str:
|
||||
"""Return supervisor arch."""
|
||||
return self.sys_supervisor.arch
|
||||
return self.sys_supervisor.arch or self._default_arch
|
||||
|
||||
@property
|
||||
def supported(self) -> list[str]:
|
||||
@ -91,4 +91,14 @@ class CpuArch(CoreSysAttributes):
|
||||
for check, value in MAP_CPU.items():
|
||||
if cpu.startswith(check):
|
||||
return value
|
||||
return self.sys_supervisor.arch
|
||||
if self.sys_supervisor.arch:
|
||||
_LOGGER.warning(
|
||||
"Unknown CPU architecture %s, falling back to Supervisor architecture.",
|
||||
cpu,
|
||||
)
|
||||
return self.sys_supervisor.arch
|
||||
_LOGGER.warning(
|
||||
"Unknown CPU architecture %s, assuming CPU architecture equals Supervisor architecture.",
|
||||
cpu,
|
||||
)
|
||||
return cpu
|
||||
|
@ -3,10 +3,10 @@
|
||||
import asyncio
|
||||
import hashlib
|
||||
import logging
|
||||
from typing import Any
|
||||
from typing import Any, TypedDict, cast
|
||||
|
||||
from .addons.addon import Addon
|
||||
from .const import ATTR_ADDON, ATTR_PASSWORD, ATTR_TYPE, ATTR_USERNAME, FILE_HASSIO_AUTH
|
||||
from .const import ATTR_PASSWORD, ATTR_TYPE, ATTR_USERNAME, FILE_HASSIO_AUTH
|
||||
from .coresys import CoreSys, CoreSysAttributes
|
||||
from .exceptions import (
|
||||
AuthError,
|
||||
@ -21,6 +21,17 @@ from .validate import SCHEMA_AUTH_CONFIG
|
||||
_LOGGER: logging.Logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class BackendAuthRequest(TypedDict):
|
||||
"""Model for a backend auth request.
|
||||
|
||||
https://github.com/home-assistant/core/blob/ed9503324d9d255e6fb077f1614fb6d55800f389/homeassistant/components/hassio/auth.py#L66-L73
|
||||
"""
|
||||
|
||||
username: str
|
||||
password: str
|
||||
addon: str
|
||||
|
||||
|
||||
class Auth(FileConfiguration, CoreSysAttributes):
|
||||
"""Manage SSO for Add-ons with Home Assistant user."""
|
||||
|
||||
@ -74,6 +85,9 @@ class Auth(FileConfiguration, CoreSysAttributes):
|
||||
"""Check username login."""
|
||||
if password is None:
|
||||
raise AuthError("None as password is not supported!", _LOGGER.error)
|
||||
if username is None:
|
||||
raise AuthError("None as username is not supported!", _LOGGER.error)
|
||||
|
||||
_LOGGER.info("Auth request from '%s' for '%s'", addon.slug, username)
|
||||
|
||||
# Get from cache
|
||||
@ -103,11 +117,12 @@ class Auth(FileConfiguration, CoreSysAttributes):
|
||||
async with self.sys_homeassistant.api.make_request(
|
||||
"post",
|
||||
"api/hassio_auth",
|
||||
json={
|
||||
ATTR_USERNAME: username,
|
||||
ATTR_PASSWORD: password,
|
||||
ATTR_ADDON: addon.slug,
|
||||
},
|
||||
json=cast(
|
||||
dict[str, Any],
|
||||
BackendAuthRequest(
|
||||
username=username, password=password, addon=addon.slug
|
||||
),
|
||||
),
|
||||
) as req:
|
||||
if req.status == 200:
|
||||
_LOGGER.info("Successful login for '%s'", username)
|
||||
@ -145,13 +160,21 @@ class Auth(FileConfiguration, CoreSysAttributes):
|
||||
async def list_users(self) -> list[dict[str, Any]]:
|
||||
"""List users on the Home Assistant instance."""
|
||||
try:
|
||||
return await self.sys_homeassistant.websocket.async_send_command(
|
||||
users: (
|
||||
list[dict[str, Any]] | None
|
||||
) = await self.sys_homeassistant.websocket.async_send_command(
|
||||
{ATTR_TYPE: "config/auth/list"}
|
||||
)
|
||||
except HomeAssistantWSError:
|
||||
_LOGGER.error("Can't request listing users on Home Assistant!")
|
||||
except HomeAssistantWSError as err:
|
||||
raise AuthListUsersError(
|
||||
f"Can't request listing users on Home Assistant: {err}", _LOGGER.error
|
||||
) from err
|
||||
|
||||
raise AuthListUsersError()
|
||||
if users is not None:
|
||||
return users
|
||||
raise AuthListUsersError(
|
||||
"Can't request listing users on Home Assistant!", _LOGGER.error
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
def _rehash(value: str, salt2: str = "") -> str:
|
||||
|
@ -18,8 +18,6 @@ import time
|
||||
from typing import Any, Self, cast
|
||||
|
||||
from awesomeversion import AwesomeVersion, AwesomeVersionCompareException
|
||||
from cryptography.hazmat.backends import default_backend
|
||||
from cryptography.hazmat.primitives.ciphers import Cipher, algorithms, modes
|
||||
from securetar import AddFileError, SecureTarFile, atomic_contents_add, secure_path
|
||||
import voluptuous as vol
|
||||
from voluptuous.humanize import humanize_error
|
||||
@ -62,9 +60,11 @@ from ..utils.dt import parse_datetime, utcnow
|
||||
from ..utils.json import json_bytes
|
||||
from ..utils.sentinel import DEFAULT
|
||||
from .const import BUF_SIZE, LOCATION_CLOUD_BACKUP, BackupType
|
||||
from .utils import key_to_iv, password_to_key
|
||||
from .utils import password_to_key
|
||||
from .validate import SCHEMA_BACKUP
|
||||
|
||||
IGNORED_COMPARISON_FIELDS = {ATTR_PROTECTED, ATTR_CRYPTO, ATTR_DOCKER}
|
||||
|
||||
_LOGGER: logging.Logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
@ -102,7 +102,6 @@ class Backup(JobGroup):
|
||||
self._tmp: TemporaryDirectory | None = None
|
||||
self._outer_secure_tarfile: SecureTarFile | None = None
|
||||
self._key: bytes | None = None
|
||||
self._aes: Cipher | None = None
|
||||
self._locations: dict[str | None, BackupLocation] = {
|
||||
location: BackupLocation(
|
||||
path=tar_file,
|
||||
@ -244,11 +243,6 @@ class Backup(JobGroup):
|
||||
"""Return backup size in bytes."""
|
||||
return self._locations[self.location].size_bytes
|
||||
|
||||
@property
|
||||
def is_new(self) -> bool:
|
||||
"""Return True if there is new."""
|
||||
return not self.tarfile.exists()
|
||||
|
||||
@property
|
||||
def tarfile(self) -> Path:
|
||||
"""Return path to backup tarfile."""
|
||||
@ -273,7 +267,7 @@ class Backup(JobGroup):
|
||||
|
||||
# Compare all fields except ones about protection. Current encryption status does not affect equality
|
||||
keys = self._data.keys() | other._data.keys()
|
||||
for k in keys - {ATTR_PROTECTED, ATTR_CRYPTO, ATTR_DOCKER}:
|
||||
for k in keys - IGNORED_COMPARISON_FIELDS:
|
||||
if (
|
||||
k not in self._data
|
||||
or k not in other._data
|
||||
@ -353,16 +347,10 @@ class Backup(JobGroup):
|
||||
self._init_password(password)
|
||||
else:
|
||||
self._key = None
|
||||
self._aes = None
|
||||
|
||||
def _init_password(self, password: str) -> None:
|
||||
"""Set password + init aes cipher."""
|
||||
"""Create key from password."""
|
||||
self._key = password_to_key(password)
|
||||
self._aes = Cipher(
|
||||
algorithms.AES(self._key),
|
||||
modes.CBC(key_to_iv(self._key)),
|
||||
backend=default_backend(),
|
||||
)
|
||||
|
||||
async def validate_backup(self, location: str | None) -> None:
|
||||
"""Validate backup.
|
||||
@ -591,13 +579,21 @@ class Backup(JobGroup):
|
||||
@Job(name="backup_addon_save", cleanup=False)
|
||||
async def _addon_save(self, addon: Addon) -> asyncio.Task | None:
|
||||
"""Store an add-on into backup."""
|
||||
self.sys_jobs.current.reference = addon.slug
|
||||
self.sys_jobs.current.reference = slug = addon.slug
|
||||
if not self._outer_secure_tarfile:
|
||||
raise RuntimeError(
|
||||
"Cannot backup components without initializing backup tar"
|
||||
)
|
||||
|
||||
tar_name = f"{addon.slug}.tar{'.gz' if self.compressed else ''}"
|
||||
# Ensure it is still installed and get current data before proceeding
|
||||
if not (curr_addon := self.sys_addons.get_local_only(slug)):
|
||||
_LOGGER.warning(
|
||||
"Skipping backup of add-on %s because it has been uninstalled",
|
||||
slug,
|
||||
)
|
||||
return None
|
||||
|
||||
tar_name = f"{slug}.tar{'.gz' if self.compressed else ''}"
|
||||
|
||||
addon_file = self._outer_secure_tarfile.create_inner_tar(
|
||||
f"./{tar_name}",
|
||||
@ -606,18 +602,16 @@ class Backup(JobGroup):
|
||||
)
|
||||
# Take backup
|
||||
try:
|
||||
start_task = await addon.backup(addon_file)
|
||||
start_task = await curr_addon.backup(addon_file)
|
||||
except AddonsError as err:
|
||||
raise BackupError(
|
||||
f"Can't create backup for {addon.slug}", _LOGGER.error
|
||||
) from err
|
||||
raise BackupError(str(err)) from err
|
||||
|
||||
# Store to config
|
||||
self._data[ATTR_ADDONS].append(
|
||||
{
|
||||
ATTR_SLUG: addon.slug,
|
||||
ATTR_NAME: addon.name,
|
||||
ATTR_VERSION: addon.version,
|
||||
ATTR_SLUG: slug,
|
||||
ATTR_NAME: curr_addon.name,
|
||||
ATTR_VERSION: curr_addon.version,
|
||||
# Bug - addon_file.size used to give us this information
|
||||
# It always returns 0 in current securetar. Skipping until fixed
|
||||
ATTR_SIZE: 0,
|
||||
@ -639,8 +633,11 @@ class Backup(JobGroup):
|
||||
try:
|
||||
if start_task := await self._addon_save(addon):
|
||||
start_tasks.append(start_task)
|
||||
except Exception as err: # pylint: disable=broad-except
|
||||
_LOGGER.warning("Can't save Add-on %s: %s", addon.slug, err)
|
||||
except BackupError as err:
|
||||
err = BackupError(
|
||||
f"Can't backup add-on {addon.slug}: {str(err)}", _LOGGER.error
|
||||
)
|
||||
self.sys_jobs.current.capture_error(err)
|
||||
|
||||
return start_tasks
|
||||
|
||||
@ -769,16 +766,20 @@ class Backup(JobGroup):
|
||||
if await self.sys_run_in_executor(_save):
|
||||
self._data[ATTR_FOLDERS].append(name)
|
||||
except (tarfile.TarError, OSError, AddFileError) as err:
|
||||
raise BackupError(
|
||||
f"Can't backup folder {name}: {str(err)}", _LOGGER.error
|
||||
) from err
|
||||
raise BackupError(f"Can't write tarfile: {str(err)}") from err
|
||||
|
||||
@Job(name="backup_store_folders", cleanup=False)
|
||||
async def store_folders(self, folder_list: list[str]):
|
||||
"""Backup Supervisor data into backup."""
|
||||
# Save folder sequential avoid issue on slow IO
|
||||
for folder in folder_list:
|
||||
await self._folder_save(folder)
|
||||
try:
|
||||
await self._folder_save(folder)
|
||||
except BackupError as err:
|
||||
err = BackupError(
|
||||
f"Can't backup folder {folder}: {str(err)}", _LOGGER.error
|
||||
)
|
||||
self.sys_jobs.current.capture_error(err)
|
||||
|
||||
@Job(name="backup_folder_restore", cleanup=False)
|
||||
async def _folder_restore(self, name: str) -> None:
|
||||
@ -930,5 +931,5 @@ class Backup(JobGroup):
|
||||
Return a coroutine.
|
||||
"""
|
||||
return self.sys_store.update_repositories(
|
||||
self.repositories, add_with_errors=True, replace=replace
|
||||
set(self.repositories), issue_on_error=True, replace=replace
|
||||
)
|
||||
|
@ -122,6 +122,25 @@ class BackupManager(FileConfiguration, JobGroup):
|
||||
|
||||
return self.sys_config.path_backup
|
||||
|
||||
async def get_upload_path_for_location(self, location: LOCATION_TYPE) -> Path:
|
||||
"""Get a path (temporary) upload path for a backup location."""
|
||||
target_path = self._get_base_path(location)
|
||||
|
||||
# Return target path for mounts since tmp will always be local, mounts
|
||||
# will never be the same device.
|
||||
if location is not None and location != LOCATION_CLOUD_BACKUP:
|
||||
return target_path
|
||||
|
||||
tmp_path = self.sys_config.path_tmp
|
||||
|
||||
def check_same_mount() -> bool:
|
||||
"""Check if the target path is on the same mount as the backup location."""
|
||||
return target_path.stat().st_dev == tmp_path.stat().st_dev
|
||||
|
||||
if await self.sys_run_in_executor(check_same_mount):
|
||||
return tmp_path
|
||||
return target_path
|
||||
|
||||
async def _check_location(self, location: LOCATION_TYPE | type[DEFAULT] = DEFAULT):
|
||||
"""Check if backup location is accessible."""
|
||||
if location == DEFAULT and self.sys_mounts.default_backup_mount:
|
||||
@ -359,66 +378,69 @@ class BackupManager(FileConfiguration, JobGroup):
|
||||
if not backup.all_locations:
|
||||
del self._backups[backup.slug]
|
||||
|
||||
@Job(name="backup_copy_to_location", cleanup=False)
|
||||
async def _copy_to_location(
|
||||
self, backup: Backup, location: LOCATION_TYPE
|
||||
) -> tuple[str | None, Path]:
|
||||
"""Copy a backup file to the default location."""
|
||||
location_name = location.name if isinstance(location, Mount) else location
|
||||
self.sys_jobs.current.reference = location_name
|
||||
try:
|
||||
if location == LOCATION_CLOUD_BACKUP:
|
||||
destination = self.sys_config.path_core_backup
|
||||
elif location:
|
||||
location_mount = cast(Mount, location)
|
||||
if not location_mount.local_where.is_mount():
|
||||
raise BackupMountDownError(
|
||||
f"{location_mount.name} is down, cannot copy to it",
|
||||
_LOGGER.error,
|
||||
)
|
||||
destination = location_mount.local_where
|
||||
else:
|
||||
destination = self.sys_config.path_backup
|
||||
|
||||
path = await self.sys_run_in_executor(copy, backup.tarfile, destination)
|
||||
return (location_name, Path(path))
|
||||
except OSError as err:
|
||||
msg = f"Could not copy backup to {location_name} due to: {err!s}"
|
||||
|
||||
if err.errno == errno.EBADMSG and location in {
|
||||
LOCATION_CLOUD_BACKUP,
|
||||
None,
|
||||
}:
|
||||
raise BackupDataDiskBadMessageError(msg, _LOGGER.error) from err
|
||||
raise BackupError(msg, _LOGGER.error) from err
|
||||
|
||||
@Job(name="backup_copy_to_additional_locations", cleanup=False)
|
||||
async def _copy_to_additional_locations(
|
||||
self,
|
||||
backup: Backup,
|
||||
locations: list[LOCATION_TYPE],
|
||||
):
|
||||
"""Copy a backup file to additional locations."""
|
||||
|
||||
all_new_locations: dict[str | None, Path] = {}
|
||||
for location in locations:
|
||||
try:
|
||||
location_name, path = await self._copy_to_location(backup, location)
|
||||
all_new_locations[location_name] = path
|
||||
except BackupDataDiskBadMessageError as err:
|
||||
self.sys_resolution.add_unhealthy_reason(
|
||||
UnhealthyReason.OSERROR_BAD_MESSAGE
|
||||
)
|
||||
self.sys_jobs.current.capture_error(err)
|
||||
except BackupError as err:
|
||||
self.sys_jobs.current.capture_error(err)
|
||||
|
||||
def copy_to_additional_locations() -> None:
|
||||
"""Copy backup file to additional locations."""
|
||||
nonlocal all_new_locations
|
||||
for location in locations:
|
||||
try:
|
||||
if location == LOCATION_CLOUD_BACKUP:
|
||||
all_new_locations[LOCATION_CLOUD_BACKUP] = Path(
|
||||
copy(backup.tarfile, self.sys_config.path_core_backup)
|
||||
)
|
||||
elif location:
|
||||
location_mount = cast(Mount, location)
|
||||
if not location_mount.local_where.is_mount():
|
||||
raise BackupMountDownError(
|
||||
f"{location_mount.name} is down, cannot copy to it",
|
||||
_LOGGER.error,
|
||||
)
|
||||
all_new_locations[location_mount.name] = Path(
|
||||
copy(backup.tarfile, location_mount.local_where)
|
||||
)
|
||||
else:
|
||||
all_new_locations[None] = Path(
|
||||
copy(backup.tarfile, self.sys_config.path_backup)
|
||||
)
|
||||
except OSError as err:
|
||||
msg = f"Could not copy backup to {location.name if isinstance(location, Mount) else location} due to: {err!s}"
|
||||
|
||||
if err.errno == errno.EBADMSG and location in {
|
||||
LOCATION_CLOUD_BACKUP,
|
||||
None,
|
||||
}:
|
||||
raise BackupDataDiskBadMessageError(msg, _LOGGER.error) from err
|
||||
raise BackupError(msg, _LOGGER.error) from err
|
||||
|
||||
try:
|
||||
await self.sys_run_in_executor(copy_to_additional_locations)
|
||||
except BackupDataDiskBadMessageError:
|
||||
self.sys_resolution.add_unhealthy_reason(
|
||||
UnhealthyReason.OSERROR_BAD_MESSAGE
|
||||
)
|
||||
raise
|
||||
finally:
|
||||
backup.all_locations.update(
|
||||
{
|
||||
loc: BackupLocation(
|
||||
path=path,
|
||||
protected=backup.protected,
|
||||
size_bytes=backup.size_bytes,
|
||||
)
|
||||
for loc, path in all_new_locations.items()
|
||||
}
|
||||
)
|
||||
backup.all_locations.update(
|
||||
{
|
||||
loc: BackupLocation(
|
||||
path=path,
|
||||
protected=backup.protected,
|
||||
size_bytes=backup.size_bytes,
|
||||
)
|
||||
for loc, path in all_new_locations.items()
|
||||
}
|
||||
)
|
||||
|
||||
@Job(name="backup_manager_import_backup")
|
||||
async def import_backup(
|
||||
@ -499,7 +521,8 @@ class BackupManager(FileConfiguration, JobGroup):
|
||||
) -> Backup | None:
|
||||
"""Create a backup.
|
||||
|
||||
Must be called from an existing backup job.
|
||||
Must be called from an existing backup job. If the backup failed, the
|
||||
backup file is being deleted and None is returned.
|
||||
"""
|
||||
addon_start_tasks: list[Awaitable[None]] | None = None
|
||||
|
||||
@ -529,9 +552,12 @@ class BackupManager(FileConfiguration, JobGroup):
|
||||
self._change_stage(BackupJobStage.FINISHING_FILE, backup)
|
||||
|
||||
except BackupError as err:
|
||||
await self.sys_run_in_executor(backup.tarfile.unlink, missing_ok=True)
|
||||
_LOGGER.error("Backup %s error: %s", backup.slug, err)
|
||||
self.sys_jobs.current.capture_error(err)
|
||||
return None
|
||||
except Exception as err: # pylint: disable=broad-except
|
||||
await self.sys_run_in_executor(backup.tarfile.unlink, missing_ok=True)
|
||||
_LOGGER.exception("Backup %s error", backup.slug)
|
||||
await async_capture_exception(err)
|
||||
self.sys_jobs.current.capture_error(
|
||||
@ -543,12 +569,7 @@ class BackupManager(FileConfiguration, JobGroup):
|
||||
|
||||
if additional_locations:
|
||||
self._change_stage(BackupJobStage.COPY_ADDITONAL_LOCATIONS, backup)
|
||||
try:
|
||||
await self._copy_to_additional_locations(
|
||||
backup, additional_locations
|
||||
)
|
||||
except BackupError as err:
|
||||
self.sys_jobs.current.capture_error(err)
|
||||
await self._copy_to_additional_locations(backup, additional_locations)
|
||||
|
||||
if addon_start_tasks:
|
||||
self._change_stage(BackupJobStage.AWAIT_ADDON_RESTARTS, backup)
|
||||
|
@ -1,6 +1,7 @@
|
||||
"""Bootstrap Supervisor."""
|
||||
|
||||
# ruff: noqa: T100
|
||||
import asyncio
|
||||
from importlib import import_module
|
||||
import logging
|
||||
import os
|
||||
@ -54,6 +55,14 @@ async def initialize_coresys() -> CoreSys:
|
||||
"""Initialize supervisor coresys/objects."""
|
||||
coresys = await CoreSys().load_config()
|
||||
|
||||
# Check if ENV is in development mode
|
||||
if coresys.dev:
|
||||
_LOGGER.warning("Environment variable 'SUPERVISOR_DEV' is set")
|
||||
coresys.config.logging = LogLevel.DEBUG
|
||||
coresys.config.debug = True
|
||||
else:
|
||||
coresys.config.modify_log_level()
|
||||
|
||||
# Initialize core objects
|
||||
coresys.docker = await DockerAPI(coresys).post_init()
|
||||
coresys.resolution = await ResolutionManager(coresys).load_config()
|
||||
@ -70,7 +79,7 @@ async def initialize_coresys() -> CoreSys:
|
||||
coresys.addons = await AddonManager(coresys).load_config()
|
||||
coresys.backups = await BackupManager(coresys).load_config()
|
||||
coresys.host = await HostManager(coresys).post_init()
|
||||
coresys.hardware = await HardwareManager(coresys).post_init()
|
||||
coresys.hardware = await HardwareManager.create(coresys)
|
||||
coresys.ingress = await Ingress(coresys).load_config()
|
||||
coresys.tasks = Tasks(coresys)
|
||||
coresys.services = await ServiceManager(coresys).load_config()
|
||||
@ -93,15 +102,9 @@ async def initialize_coresys() -> CoreSys:
|
||||
# bootstrap config
|
||||
initialize_system(coresys)
|
||||
|
||||
# Check if ENV is in development mode
|
||||
if coresys.dev:
|
||||
_LOGGER.warning("Environment variable 'SUPERVISOR_DEV' is set")
|
||||
coresys.config.logging = LogLevel.DEBUG
|
||||
coresys.config.debug = True
|
||||
coresys.updater.channel = UpdateChannel.DEV
|
||||
coresys.security.content_trust = False
|
||||
else:
|
||||
coresys.config.modify_log_level()
|
||||
|
||||
# Convert datetime
|
||||
logging.Formatter.converter = lambda *args: coresys.now().timetuple()
|
||||
@ -282,8 +285,8 @@ def check_environment() -> None:
|
||||
_LOGGER.critical("Can't find Docker socket!")
|
||||
|
||||
|
||||
def reg_signal(loop, coresys: CoreSys) -> None:
|
||||
"""Register SIGTERM and SIGKILL to stop system."""
|
||||
def register_signal_handlers(loop: asyncio.AbstractEventLoop, coresys: CoreSys) -> None:
|
||||
"""Register SIGTERM, SIGHUP and SIGKILL to stop the Supervisor."""
|
||||
try:
|
||||
loop.add_signal_handler(
|
||||
signal.SIGTERM, lambda: loop.create_task(coresys.core.stop())
|
||||
|
@ -2,7 +2,7 @@
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from collections.abc import Awaitable, Callable
|
||||
from collections.abc import Callable, Coroutine
|
||||
import logging
|
||||
from typing import Any
|
||||
|
||||
@ -19,7 +19,7 @@ class EventListener:
|
||||
"""Event listener."""
|
||||
|
||||
event_type: BusEvent = attr.ib()
|
||||
callback: Callable[[Any], Awaitable[None]] = attr.ib()
|
||||
callback: Callable[[Any], Coroutine[Any, Any, None]] = attr.ib()
|
||||
|
||||
|
||||
class Bus(CoreSysAttributes):
|
||||
@ -31,7 +31,7 @@ class Bus(CoreSysAttributes):
|
||||
self._listeners: dict[BusEvent, list[EventListener]] = {}
|
||||
|
||||
def register_event(
|
||||
self, event: BusEvent, callback: Callable[[Any], Awaitable[None]]
|
||||
self, event: BusEvent, callback: Callable[[Any], Coroutine[Any, Any, None]]
|
||||
) -> EventListener:
|
||||
"""Register callback for an event."""
|
||||
listener = EventListener(event, callback)
|
||||
|
@ -10,6 +10,7 @@ from awesomeversion import AwesomeVersion
|
||||
|
||||
from .const import (
|
||||
ATTR_ADDONS_CUSTOM_LIST,
|
||||
ATTR_COUNTRY,
|
||||
ATTR_DEBUG,
|
||||
ATTR_DEBUG_BLOCK,
|
||||
ATTR_DETECT_BLOCKING_IO,
|
||||
@ -65,7 +66,7 @@ _UTC = "UTC"
|
||||
class CoreConfig(FileConfiguration):
|
||||
"""Hold all core config data."""
|
||||
|
||||
def __init__(self):
|
||||
def __init__(self) -> None:
|
||||
"""Initialize config object."""
|
||||
super().__init__(FILE_HASSIO_CONFIG, SCHEMA_SUPERVISOR_CONFIG)
|
||||
self._timezone_tzinfo: tzinfo | None = None
|
||||
@ -93,6 +94,20 @@ class CoreConfig(FileConfiguration):
|
||||
None, get_time_zone, value
|
||||
)
|
||||
|
||||
@property
|
||||
def country(self) -> str | None:
|
||||
"""Return supervisor country.
|
||||
|
||||
The format follows what Home Assistant Core provides, which today is
|
||||
ISO 3166-1 alpha-2.
|
||||
"""
|
||||
return self._data.get(ATTR_COUNTRY)
|
||||
|
||||
@country.setter
|
||||
def country(self, value: str | None) -> None:
|
||||
"""Set supervisor country."""
|
||||
self._data[ATTR_COUNTRY] = value
|
||||
|
||||
@property
|
||||
def version(self) -> AwesomeVersion:
|
||||
"""Return supervisor version."""
|
||||
|
@ -2,16 +2,20 @@
|
||||
|
||||
from dataclasses import dataclass
|
||||
from enum import StrEnum
|
||||
from ipaddress import ip_network
|
||||
from ipaddress import IPv4Network, IPv6Network
|
||||
from pathlib import Path
|
||||
from sys import version_info as systemversion
|
||||
from typing import Self
|
||||
from typing import NotRequired, Self, TypedDict
|
||||
|
||||
from aiohttp import __version__ as aiohttpversion
|
||||
|
||||
SUPERVISOR_VERSION = "9999.09.9.dev9999"
|
||||
SERVER_SOFTWARE = f"HomeAssistantSupervisor/{SUPERVISOR_VERSION} aiohttp/{aiohttpversion} Python/{systemversion[0]}.{systemversion[1]}"
|
||||
|
||||
DOCKER_PREFIX: str = "hassio"
|
||||
OBSERVER_DOCKER_NAME: str = f"{DOCKER_PREFIX}_observer"
|
||||
SUPERVISOR_DOCKER_NAME: str = f"{DOCKER_PREFIX}_supervisor"
|
||||
|
||||
URL_HASSIO_ADDONS = "https://github.com/home-assistant/addons"
|
||||
URL_HASSIO_APPARMOR = "https://version.home-assistant.io/apparmor_{channel}.txt"
|
||||
URL_HASSIO_VERSION = "https://version.home-assistant.io/{channel}.json"
|
||||
@ -41,8 +45,10 @@ SYSTEMD_JOURNAL_PERSISTENT = Path("/var/log/journal")
|
||||
SYSTEMD_JOURNAL_VOLATILE = Path("/run/log/journal")
|
||||
|
||||
DOCKER_NETWORK = "hassio"
|
||||
DOCKER_NETWORK_MASK = ip_network("172.30.32.0/23")
|
||||
DOCKER_NETWORK_RANGE = ip_network("172.30.33.0/24")
|
||||
DOCKER_NETWORK_DRIVER = "bridge"
|
||||
DOCKER_IPV6_NETWORK_MASK = IPv6Network("fd0c:ac1e:2100::/48")
|
||||
DOCKER_IPV4_NETWORK_MASK = IPv4Network("172.30.32.0/23")
|
||||
DOCKER_IPV4_NETWORK_RANGE = IPv4Network("172.30.33.0/24")
|
||||
|
||||
# This needs to match the dockerd --cpu-rt-runtime= argument.
|
||||
DOCKER_CPU_RUNTIME_TOTAL = 950_000
|
||||
@ -97,6 +103,7 @@ ATTR_ADDON = "addon"
|
||||
ATTR_ADDONS = "addons"
|
||||
ATTR_ADDONS_CUSTOM_LIST = "addons_custom_list"
|
||||
ATTR_ADDONS_REPOSITORIES = "addons_repositories"
|
||||
ATTR_ADDR_GEN_MODE = "addr_gen_mode"
|
||||
ATTR_ADDRESS = "address"
|
||||
ATTR_ADDRESS_DATA = "address-data"
|
||||
ATTR_ADMIN = "admin"
|
||||
@ -140,6 +147,7 @@ ATTR_CONNECTIONS = "connections"
|
||||
ATTR_CONTAINERS = "containers"
|
||||
ATTR_CONTENT = "content"
|
||||
ATTR_CONTENT_TRUST = "content_trust"
|
||||
ATTR_COUNTRY = "country"
|
||||
ATTR_CPE = "cpe"
|
||||
ATTR_CPU_PERCENT = "cpu_percent"
|
||||
ATTR_CRYPTO = "crypto"
|
||||
@ -170,6 +178,7 @@ ATTR_DOCKER_API = "docker_api"
|
||||
ATTR_DOCUMENTATION = "documentation"
|
||||
ATTR_DOMAINS = "domains"
|
||||
ATTR_ENABLE = "enable"
|
||||
ATTR_ENABLE_IPV6 = "enable_ipv6"
|
||||
ATTR_ENABLED = "enabled"
|
||||
ATTR_ENVIRONMENT = "environment"
|
||||
ATTR_EVENT = "event"
|
||||
@ -179,6 +188,7 @@ ATTR_FEATURES = "features"
|
||||
ATTR_FILENAME = "filename"
|
||||
ATTR_FLAGS = "flags"
|
||||
ATTR_FOLDERS = "folders"
|
||||
ATTR_FORCE = "force"
|
||||
ATTR_FORCE_SECURITY = "force_security"
|
||||
ATTR_FREQUENCY = "frequency"
|
||||
ATTR_FULL_ACCESS = "full_access"
|
||||
@ -219,6 +229,7 @@ ATTR_INSTALLED = "installed"
|
||||
ATTR_INTERFACE = "interface"
|
||||
ATTR_INTERFACES = "interfaces"
|
||||
ATTR_IP_ADDRESS = "ip_address"
|
||||
ATTR_IP6_PRIVACY = "ip6_privacy"
|
||||
ATTR_IPV4 = "ipv4"
|
||||
ATTR_IPV6 = "ipv6"
|
||||
ATTR_ISSUES = "issues"
|
||||
@ -236,6 +247,7 @@ ATTR_LOGO = "logo"
|
||||
ATTR_LONG_DESCRIPTION = "long_description"
|
||||
ATTR_MAC = "mac"
|
||||
ATTR_MACHINE = "machine"
|
||||
ATTR_MACHINE_ID = "machine_id"
|
||||
ATTR_MAINTAINER = "maintainer"
|
||||
ATTR_MAP = "map"
|
||||
ATTR_MEMORY_LIMIT = "memory_limit"
|
||||
@ -404,10 +416,12 @@ class AddonBoot(StrEnum):
|
||||
MANUAL = "manual"
|
||||
|
||||
@classmethod
|
||||
def _missing_(cls, value: str) -> Self | None:
|
||||
def _missing_(cls, value: object) -> Self | None:
|
||||
"""Convert 'forced' config values to their counterpart."""
|
||||
if value == AddonBootConfig.MANUAL_ONLY:
|
||||
return AddonBoot.MANUAL
|
||||
for member in cls:
|
||||
if member == AddonBoot.MANUAL:
|
||||
return member
|
||||
return None
|
||||
|
||||
|
||||
@ -504,6 +518,16 @@ class CpuArch(StrEnum):
|
||||
AMD64 = "amd64"
|
||||
|
||||
|
||||
class IngressSessionDataUserDict(TypedDict):
|
||||
"""Response object for ingress session user."""
|
||||
|
||||
id: str
|
||||
username: NotRequired[str | None]
|
||||
# Name is an alias for displayname, only one should be used
|
||||
displayname: NotRequired[str | None]
|
||||
name: NotRequired[str | None]
|
||||
|
||||
|
||||
@dataclass
|
||||
class IngressSessionDataUser:
|
||||
"""Format of an IngressSessionDataUser object."""
|
||||
@ -512,38 +536,42 @@ class IngressSessionDataUser:
|
||||
display_name: str | None = None
|
||||
username: str | None = None
|
||||
|
||||
def to_dict(self) -> dict[str, str | None]:
|
||||
def to_dict(self) -> IngressSessionDataUserDict:
|
||||
"""Get dictionary representation."""
|
||||
return {
|
||||
ATTR_ID: self.id,
|
||||
ATTR_DISPLAYNAME: self.display_name,
|
||||
ATTR_USERNAME: self.username,
|
||||
}
|
||||
return IngressSessionDataUserDict(
|
||||
id=self.id, displayname=self.display_name, username=self.username
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def from_dict(cls, data: dict[str, str | None]) -> Self:
|
||||
def from_dict(cls, data: IngressSessionDataUserDict) -> Self:
|
||||
"""Return object from dictionary representation."""
|
||||
return cls(
|
||||
id=data[ATTR_ID],
|
||||
display_name=data.get(ATTR_DISPLAYNAME),
|
||||
username=data.get(ATTR_USERNAME),
|
||||
id=data["id"],
|
||||
display_name=data.get("displayname") or data.get("name"),
|
||||
username=data.get("username"),
|
||||
)
|
||||
|
||||
|
||||
class IngressSessionDataDict(TypedDict):
|
||||
"""Response object for ingress session data."""
|
||||
|
||||
user: IngressSessionDataUserDict
|
||||
|
||||
|
||||
@dataclass
|
||||
class IngressSessionData:
|
||||
"""Format of an IngressSessionData object."""
|
||||
|
||||
user: IngressSessionDataUser
|
||||
|
||||
def to_dict(self) -> dict[str, dict[str, str | None]]:
|
||||
def to_dict(self) -> IngressSessionDataDict:
|
||||
"""Get dictionary representation."""
|
||||
return {ATTR_USER: self.user.to_dict()}
|
||||
return IngressSessionDataDict(user=self.user.to_dict())
|
||||
|
||||
@classmethod
|
||||
def from_dict(cls, data: dict[str, dict[str, str | None]]) -> Self:
|
||||
def from_dict(cls, data: IngressSessionDataDict) -> Self:
|
||||
"""Return object from dictionary representation."""
|
||||
return cls(user=IngressSessionDataUser.from_dict(data[ATTR_USER]))
|
||||
return cls(user=IngressSessionDataUser.from_dict(data["user"]))
|
||||
|
||||
|
||||
STARTING_STATES = [
|
||||
@ -551,3 +579,12 @@ STARTING_STATES = [
|
||||
CoreState.STARTUP,
|
||||
CoreState.SETUP,
|
||||
]
|
||||
|
||||
# States in which the API can be used (enforced by system_validation())
|
||||
VALID_API_STATES = frozenset(
|
||||
{
|
||||
CoreState.STARTUP,
|
||||
CoreState.RUNNING,
|
||||
CoreState.FREEZE,
|
||||
}
|
||||
)
|
||||
|
@ -28,7 +28,7 @@ from .homeassistant.core import LANDINGPAGE
|
||||
from .resolution.const import ContextType, IssueType, SuggestionType, UnhealthyReason
|
||||
from .utils.dt import utcnow
|
||||
from .utils.sentry import async_capture_exception
|
||||
from .utils.whoami import WhoamiData, retrieve_whoami
|
||||
from .utils.whoami import retrieve_whoami
|
||||
|
||||
_LOGGER: logging.Logger = logging.getLogger(__name__)
|
||||
|
||||
@ -36,7 +36,7 @@ _LOGGER: logging.Logger = logging.getLogger(__name__)
|
||||
class Core(CoreSysAttributes):
|
||||
"""Main object of Supervisor."""
|
||||
|
||||
def __init__(self, coresys: CoreSys):
|
||||
def __init__(self, coresys: CoreSys) -> None:
|
||||
"""Initialize Supervisor object."""
|
||||
self.coresys: CoreSys = coresys
|
||||
self._state: CoreState = CoreState.INITIALIZE
|
||||
@ -91,7 +91,7 @@ class Core(CoreSysAttributes):
|
||||
"info", {"state": self._state}
|
||||
)
|
||||
|
||||
async def connect(self):
|
||||
async def connect(self) -> None:
|
||||
"""Connect Supervisor container."""
|
||||
# Load information from container
|
||||
await self.sys_supervisor.load()
|
||||
@ -120,10 +120,23 @@ class Core(CoreSysAttributes):
|
||||
self.sys_config.version = self.sys_supervisor.version
|
||||
await self.sys_config.save_data()
|
||||
|
||||
async def setup(self):
|
||||
async def setup(self) -> None:
|
||||
"""Start setting up supervisor orchestration."""
|
||||
await self.set_state(CoreState.SETUP)
|
||||
|
||||
# Initialize websession early. At this point we'll use the Docker DNS proxy
|
||||
# at 127.0.0.11, which does not have the fallback feature and hence might
|
||||
# fail in certain environments. But a websession is required to get the
|
||||
# initial version information after a device wipe or otherwise empty state
|
||||
# (e.g. CI environment, Supervised).
|
||||
#
|
||||
# An OS installation has the plug-in container images pre-installed, so we
|
||||
# setup can continue even if this early websession fails to connect to the
|
||||
# internet. We'll reinitialize the websession when the DNS plug-in is up to
|
||||
# make sure the DNS plug-in along with its fallback capabilities is used
|
||||
# (see #5857).
|
||||
await self.coresys.init_websession()
|
||||
|
||||
# Check internet on startup
|
||||
await self.sys_supervisor.check_connectivity()
|
||||
|
||||
@ -175,7 +188,10 @@ class Core(CoreSysAttributes):
|
||||
await setup_task
|
||||
except Exception as err: # pylint: disable=broad-except
|
||||
_LOGGER.critical(
|
||||
"Fatal error happening on load Task %s: %s", setup_task, err
|
||||
"Fatal error happening on load Task %s: %s",
|
||||
setup_task,
|
||||
err,
|
||||
exc_info=True,
|
||||
)
|
||||
self.sys_resolution.add_unhealthy_reason(UnhealthyReason.SETUP)
|
||||
await async_capture_exception(err)
|
||||
@ -200,7 +216,7 @@ class Core(CoreSysAttributes):
|
||||
# Evaluate the system
|
||||
await self.sys_resolution.evaluate.evaluate_system()
|
||||
|
||||
async def start(self):
|
||||
async def start(self) -> None:
|
||||
"""Start Supervisor orchestration."""
|
||||
await self.set_state(CoreState.STARTUP)
|
||||
|
||||
@ -224,10 +240,10 @@ class Core(CoreSysAttributes):
|
||||
await self.sys_supervisor.update()
|
||||
return
|
||||
|
||||
# Start addon mark as initialize
|
||||
await self.sys_addons.boot(AddonStartup.INITIALIZE)
|
||||
|
||||
try:
|
||||
# Start addon mark as initialize
|
||||
await self.sys_addons.boot(AddonStartup.INITIALIZE)
|
||||
|
||||
# HomeAssistant is already running, only Supervisor restarted
|
||||
if await self.sys_hardware.helper.last_boot() == self.sys_config.last_boot:
|
||||
_LOGGER.info("Detected Supervisor restart")
|
||||
@ -294,7 +310,7 @@ class Core(CoreSysAttributes):
|
||||
)
|
||||
_LOGGER.info("Supervisor is up and running")
|
||||
|
||||
async def stop(self):
|
||||
async def stop(self) -> None:
|
||||
"""Stop a running orchestration."""
|
||||
# store new last boot / prevent time adjustments
|
||||
if self.state in (CoreState.RUNNING, CoreState.SHUTDOWN):
|
||||
@ -342,7 +358,7 @@ class Core(CoreSysAttributes):
|
||||
_LOGGER.info("Supervisor is down - %d", self.exit_code)
|
||||
self.sys_loop.stop()
|
||||
|
||||
async def shutdown(self, *, remove_homeassistant_container: bool = False):
|
||||
async def shutdown(self, *, remove_homeassistant_container: bool = False) -> None:
|
||||
"""Shutdown all running containers in correct order."""
|
||||
# don't process scheduler anymore
|
||||
if self.state == CoreState.RUNNING:
|
||||
@ -366,19 +382,15 @@ class Core(CoreSysAttributes):
|
||||
if self.state in (CoreState.STOPPING, CoreState.SHUTDOWN):
|
||||
await self.sys_plugins.shutdown()
|
||||
|
||||
async def _update_last_boot(self):
|
||||
async def _update_last_boot(self) -> None:
|
||||
"""Update last boot time."""
|
||||
self.sys_config.last_boot = await self.sys_hardware.helper.last_boot()
|
||||
if not (last_boot := await self.sys_hardware.helper.last_boot()):
|
||||
_LOGGER.error("Could not update last boot information!")
|
||||
return
|
||||
self.sys_config.last_boot = last_boot
|
||||
await self.sys_config.save_data()
|
||||
|
||||
async def _retrieve_whoami(self, with_ssl: bool) -> WhoamiData | None:
|
||||
try:
|
||||
return await retrieve_whoami(self.sys_websession, with_ssl)
|
||||
except WhoamiSSLError:
|
||||
_LOGGER.info("Whoami service SSL error")
|
||||
return None
|
||||
|
||||
async def _adjust_system_datetime(self):
|
||||
async def _adjust_system_datetime(self) -> None:
|
||||
"""Adjust system time/date on startup."""
|
||||
# If no timezone is detect or set
|
||||
# If we are not connected or time sync
|
||||
@ -390,11 +402,13 @@ class Core(CoreSysAttributes):
|
||||
|
||||
# Get Timezone data
|
||||
try:
|
||||
data = await self._retrieve_whoami(True)
|
||||
try:
|
||||
data = await retrieve_whoami(self.sys_websession, True)
|
||||
except WhoamiSSLError:
|
||||
# SSL Date Issue & possible time drift
|
||||
_LOGGER.info("Whoami service SSL error")
|
||||
data = await retrieve_whoami(self.sys_websession, False)
|
||||
|
||||
# SSL Date Issue & possible time drift
|
||||
if not data:
|
||||
data = await self._retrieve_whoami(False)
|
||||
except WhoamiError as err:
|
||||
_LOGGER.warning("Can't adjust Time/Date settings: %s", err)
|
||||
return
|
||||
@ -410,7 +424,7 @@ class Core(CoreSysAttributes):
|
||||
await self.sys_host.control.set_datetime(data.dt_utc)
|
||||
await self.sys_supervisor.check_connectivity()
|
||||
|
||||
async def repair(self):
|
||||
async def repair(self) -> None:
|
||||
"""Repair system integrity."""
|
||||
_LOGGER.info("Starting repair of Supervisor Environment")
|
||||
await self.sys_run_in_executor(self.sys_docker.repair)
|
||||
|
@ -13,6 +13,7 @@ from types import MappingProxyType
|
||||
from typing import TYPE_CHECKING, Any, Self, TypeVar
|
||||
|
||||
import aiohttp
|
||||
from pycares import AresError
|
||||
|
||||
from .config import CoreConfig
|
||||
from .const import (
|
||||
@ -21,6 +22,7 @@ from .const import (
|
||||
ENV_SUPERVISOR_MACHINE,
|
||||
MACHINE_ID,
|
||||
SERVER_SOFTWARE,
|
||||
VALID_API_STATES,
|
||||
)
|
||||
|
||||
if TYPE_CHECKING:
|
||||
@ -60,18 +62,17 @@ _LOGGER: logging.Logger = logging.getLogger(__name__)
|
||||
class CoreSys:
|
||||
"""Class that handle all shared data."""
|
||||
|
||||
def __init__(self):
|
||||
def __init__(self) -> None:
|
||||
"""Initialize coresys."""
|
||||
# Static attributes protected
|
||||
self._machine_id: str | None = None
|
||||
self._machine: str | None = None
|
||||
|
||||
# External objects
|
||||
self._loop: asyncio.BaseEventLoop = asyncio.get_running_loop()
|
||||
self._websession = None
|
||||
self._loop = asyncio.get_running_loop()
|
||||
|
||||
# Global objects
|
||||
self._config: CoreConfig = CoreConfig()
|
||||
self._config = CoreConfig()
|
||||
|
||||
# Internal objects pointers
|
||||
self._docker: DockerAPI | None = None
|
||||
@ -100,9 +101,7 @@ class CoreSys:
|
||||
self._security: Security | None = None
|
||||
self._bus: Bus | None = None
|
||||
self._mounts: MountManager | None = None
|
||||
|
||||
# Setup aiohttp session
|
||||
self.create_websession()
|
||||
self._websession: aiohttp.ClientSession | None = None
|
||||
|
||||
# Task factory attributes
|
||||
self._set_task_context: list[Callable[[Context], Context]] = []
|
||||
@ -112,7 +111,44 @@ class CoreSys:
|
||||
await self.config.read_data()
|
||||
return self
|
||||
|
||||
async def init_machine(self):
|
||||
async def init_websession(self) -> None:
|
||||
"""Initialize global aiohttp ClientSession."""
|
||||
if self.core.state in VALID_API_STATES:
|
||||
# Make sure we don't reinitialize the session if the API is running (see #5851)
|
||||
raise RuntimeError(
|
||||
"Initializing ClientSession is not safe when API is running"
|
||||
)
|
||||
|
||||
if self._websession:
|
||||
await self._websession.close()
|
||||
|
||||
resolver: aiohttp.abc.AbstractResolver
|
||||
try:
|
||||
# Use "unused" kwargs to force dedicated resolver instance. Otherwise
|
||||
# aiodns won't reload /etc/resolv.conf which we need to make our connection
|
||||
# check work in all cases.
|
||||
resolver = aiohttp.AsyncResolver(loop=self.loop, timeout=None)
|
||||
# pylint: disable=protected-access
|
||||
_LOGGER.debug(
|
||||
"Initializing ClientSession with AsyncResolver. Using nameservers %s",
|
||||
resolver._resolver.nameservers,
|
||||
)
|
||||
except AresError as err:
|
||||
_LOGGER.critical(
|
||||
"Unable to initialize async DNS resolver: %s", err, exc_info=True
|
||||
)
|
||||
resolver = aiohttp.ThreadedResolver(loop=self.loop)
|
||||
|
||||
connector = aiohttp.TCPConnector(loop=self.loop, resolver=resolver)
|
||||
|
||||
session = aiohttp.ClientSession(
|
||||
headers=MappingProxyType({aiohttp.hdrs.USER_AGENT: SERVER_SOFTWARE}),
|
||||
connector=connector,
|
||||
)
|
||||
|
||||
self._websession = session
|
||||
|
||||
async def init_machine(self) -> None:
|
||||
"""Initialize machine information."""
|
||||
|
||||
def _load_machine_id() -> str | None:
|
||||
@ -135,7 +171,7 @@ class CoreSys:
|
||||
@property
|
||||
def dev(self) -> bool:
|
||||
"""Return True if we run dev mode."""
|
||||
return bool(os.environ.get(ENV_SUPERVISOR_DEV, 0))
|
||||
return bool(os.environ.get(ENV_SUPERVISOR_DEV) == "1")
|
||||
|
||||
@property
|
||||
def timezone(self) -> str:
|
||||
@ -156,13 +192,15 @@ class CoreSys:
|
||||
return UTC
|
||||
|
||||
@property
|
||||
def loop(self) -> asyncio.BaseEventLoop:
|
||||
def loop(self) -> asyncio.AbstractEventLoop:
|
||||
"""Return loop object."""
|
||||
return self._loop
|
||||
|
||||
@property
|
||||
def websession(self) -> aiohttp.ClientSession:
|
||||
"""Return websession object."""
|
||||
if self._websession is None:
|
||||
raise RuntimeError("WebSession not setup yet")
|
||||
return self._websession
|
||||
|
||||
@property
|
||||
@ -552,7 +590,7 @@ class CoreSys:
|
||||
return self._machine_id
|
||||
|
||||
@machine_id.setter
|
||||
def machine_id(self, value: str) -> None:
|
||||
def machine_id(self, value: str | None) -> None:
|
||||
"""Set a machine-id type string."""
|
||||
if self._machine_id:
|
||||
raise RuntimeError("Machine-ID type already set!")
|
||||
@ -574,24 +612,14 @@ class CoreSys:
|
||||
self._set_task_context.append(callback)
|
||||
|
||||
def run_in_executor(
|
||||
self, funct: Callable[..., T], *args: tuple[Any], **kwargs: dict[str, Any]
|
||||
) -> Coroutine[Any, Any, T]:
|
||||
self, funct: Callable[..., T], *args, **kwargs
|
||||
) -> asyncio.Future[T]:
|
||||
"""Add an job to the executor pool."""
|
||||
if kwargs:
|
||||
funct = partial(funct, **kwargs)
|
||||
|
||||
return self.loop.run_in_executor(None, funct, *args)
|
||||
|
||||
def create_websession(self) -> None:
|
||||
"""Create a new aiohttp session."""
|
||||
if self._websession:
|
||||
self.create_task(self._websession.close())
|
||||
|
||||
# Create session and set default header for aiohttp
|
||||
self._websession: aiohttp.ClientSession = aiohttp.ClientSession(
|
||||
headers=MappingProxyType({aiohttp.hdrs.USER_AGENT: SERVER_SOFTWARE})
|
||||
)
|
||||
|
||||
def _create_context(self) -> Context:
|
||||
"""Create a new context for a task."""
|
||||
context = copy_context()
|
||||
@ -606,9 +634,9 @@ class CoreSys:
|
||||
def call_later(
|
||||
self,
|
||||
delay: float,
|
||||
funct: Callable[..., Coroutine[Any, Any, T]],
|
||||
*args: tuple[Any],
|
||||
**kwargs: dict[str, Any],
|
||||
funct: Callable[..., Any],
|
||||
*args,
|
||||
**kwargs,
|
||||
) -> asyncio.TimerHandle:
|
||||
"""Start a task after a delay."""
|
||||
if kwargs:
|
||||
@ -619,9 +647,9 @@ class CoreSys:
|
||||
def call_at(
|
||||
self,
|
||||
when: datetime,
|
||||
funct: Callable[..., Coroutine[Any, Any, T]],
|
||||
*args: tuple[Any],
|
||||
**kwargs: dict[str, Any],
|
||||
funct: Callable[..., Any],
|
||||
*args,
|
||||
**kwargs,
|
||||
) -> asyncio.TimerHandle:
|
||||
"""Start a task at the specified datetime."""
|
||||
if kwargs:
|
||||
@ -649,7 +677,7 @@ class CoreSysAttributes:
|
||||
|
||||
@property
|
||||
def sys_machine_id(self) -> str | None:
|
||||
"""Return machine id."""
|
||||
"""Return machine ID."""
|
||||
return self.coresys.machine_id
|
||||
|
||||
@property
|
||||
@ -658,7 +686,7 @@ class CoreSysAttributes:
|
||||
return self.coresys.dev
|
||||
|
||||
@property
|
||||
def sys_loop(self) -> asyncio.BaseEventLoop:
|
||||
def sys_loop(self) -> asyncio.AbstractEventLoop:
|
||||
"""Return loop object."""
|
||||
return self.coresys.loop
|
||||
|
||||
@ -808,7 +836,7 @@ class CoreSysAttributes:
|
||||
|
||||
def sys_run_in_executor(
|
||||
self, funct: Callable[..., T], *args, **kwargs
|
||||
) -> Coroutine[Any, Any, T]:
|
||||
) -> asyncio.Future[T]:
|
||||
"""Add a job to the executor pool."""
|
||||
return self.coresys.run_in_executor(funct, *args, **kwargs)
|
||||
|
||||
@ -819,7 +847,7 @@ class CoreSysAttributes:
|
||||
def sys_call_later(
|
||||
self,
|
||||
delay: float,
|
||||
funct: Callable[..., Coroutine[Any, Any, T]],
|
||||
funct: Callable[..., Any],
|
||||
*args,
|
||||
**kwargs,
|
||||
) -> asyncio.TimerHandle:
|
||||
@ -829,7 +857,7 @@ class CoreSysAttributes:
|
||||
def sys_call_at(
|
||||
self,
|
||||
when: datetime,
|
||||
funct: Callable[..., Coroutine[Any, Any, T]],
|
||||
funct: Callable[..., Any],
|
||||
*args,
|
||||
**kwargs,
|
||||
) -> asyncio.TimerHandle:
|
||||
|
@ -135,6 +135,7 @@ DBUS_ATTR_LAST_ERROR = "LastError"
|
||||
DBUS_ATTR_LLMNR = "LLMNR"
|
||||
DBUS_ATTR_LLMNR_HOSTNAME = "LLMNRHostname"
|
||||
DBUS_ATTR_LOADER_TIMESTAMP_MONOTONIC = "LoaderTimestampMonotonic"
|
||||
DBUS_ATTR_LOCAL_RTC = "LocalRTC"
|
||||
DBUS_ATTR_MANAGED = "Managed"
|
||||
DBUS_ATTR_MODE = "Mode"
|
||||
DBUS_ATTR_MODEL = "Model"
|
||||
@ -210,6 +211,24 @@ class InterfaceMethod(StrEnum):
|
||||
LINK_LOCAL = "link-local"
|
||||
|
||||
|
||||
class InterfaceAddrGenMode(IntEnum):
|
||||
"""Interface addr_gen_mode."""
|
||||
|
||||
EUI64 = 0
|
||||
STABLE_PRIVACY = 1
|
||||
DEFAULT_OR_EUI64 = 2
|
||||
DEFAULT = 3
|
||||
|
||||
|
||||
class InterfaceIp6Privacy(IntEnum):
|
||||
"""Interface ip6_privacy."""
|
||||
|
||||
DEFAULT = -1
|
||||
DISABLED = 0
|
||||
ENABLED_PREFER_PUBLIC = 1
|
||||
ENABLED = 2
|
||||
|
||||
|
||||
class ConnectionType(StrEnum):
|
||||
"""Connection type."""
|
||||
|
||||
|
@ -117,7 +117,7 @@ class DBusInterfaceProxy(DBusInterface, ABC):
|
||||
"""Initialize object with already connected dbus object."""
|
||||
await super().initialize(connected_dbus)
|
||||
|
||||
if not self.connected_dbus.properties:
|
||||
if not self.connected_dbus.supports_properties:
|
||||
self.disconnect()
|
||||
raise DBusInterfaceError(
|
||||
f"D-Bus object {self.object_path} is not usable, introspection is missing required properties interface"
|
||||
|
@ -8,7 +8,7 @@ from dbus_fast.aio.message_bus import MessageBus
|
||||
|
||||
from ..const import SOCKET_DBUS
|
||||
from ..coresys import CoreSys, CoreSysAttributes
|
||||
from ..exceptions import DBusFatalError
|
||||
from ..exceptions import DBusFatalError, DBusNotConnectedError
|
||||
from .agent import OSAgent
|
||||
from .hostname import Hostname
|
||||
from .interface import DBusInterface
|
||||
@ -91,6 +91,13 @@ class DBusManager(CoreSysAttributes):
|
||||
"""Return the message bus."""
|
||||
return self._bus
|
||||
|
||||
@property
|
||||
def connected_bus(self) -> MessageBus:
|
||||
"""Return the message bus. Raise if not connected."""
|
||||
if not self._bus:
|
||||
raise DBusNotConnectedError()
|
||||
return self._bus
|
||||
|
||||
@property
|
||||
def all(self) -> list[DBusInterface]:
|
||||
"""Return all managed dbus interfaces."""
|
||||
|
@ -185,10 +185,14 @@ class NetworkManager(DBusInterfaceProxy):
|
||||
if not changed and self.dns.is_connected:
|
||||
await self.dns.update()
|
||||
|
||||
if changed and (
|
||||
DBUS_ATTR_DEVICES not in changed
|
||||
or {intr.object_path for intr in self.interfaces if intr.managed}.issubset(
|
||||
set(changed[DBUS_ATTR_DEVICES])
|
||||
if (
|
||||
changed
|
||||
and DBUS_ATTR_PRIMARY_CONNECTION not in changed
|
||||
and (
|
||||
DBUS_ATTR_DEVICES not in changed
|
||||
or {
|
||||
intr.object_path for intr in self.interfaces if intr.managed
|
||||
}.issubset(set(changed[DBUS_ATTR_DEVICES]))
|
||||
)
|
||||
):
|
||||
# If none of our managed devices were removed then most likely this is just veths changing.
|
||||
@ -255,7 +259,7 @@ class NetworkManager(DBusInterfaceProxy):
|
||||
else:
|
||||
interface.primary = False
|
||||
|
||||
interfaces[interface.name] = interface
|
||||
interfaces[interface.interface_name] = interface
|
||||
interfaces[interface.hw_address] = interface
|
||||
|
||||
# Disconnect removed devices
|
||||
|
@ -1,5 +1,6 @@
|
||||
"""NetworkConnection objects for Network Manager."""
|
||||
|
||||
from abc import ABC
|
||||
from dataclasses import dataclass
|
||||
from ipaddress import IPv4Address, IPv6Address
|
||||
|
||||
@ -29,7 +30,7 @@ class ConnectionProperties:
|
||||
class WirelessProperties:
|
||||
"""Wireless Properties object for Network Manager."""
|
||||
|
||||
ssid: str | None
|
||||
ssid: str
|
||||
assigned_mac: str | None
|
||||
mode: str | None
|
||||
powersave: int | None
|
||||
@ -55,7 +56,7 @@ class EthernetProperties:
|
||||
class VlanProperties:
|
||||
"""Ethernet properties object for Network Manager."""
|
||||
|
||||
id: int | None
|
||||
id: int
|
||||
parent: str | None
|
||||
|
||||
|
||||
@ -67,14 +68,29 @@ class IpAddress:
|
||||
prefix: int
|
||||
|
||||
|
||||
@dataclass(slots=True)
|
||||
class IpProperties:
|
||||
@dataclass
|
||||
class IpProperties(ABC):
|
||||
"""IP properties object for Network Manager."""
|
||||
|
||||
method: str | None
|
||||
address_data: list[IpAddress] | None
|
||||
gateway: str | None
|
||||
dns: list[bytes | int] | None
|
||||
|
||||
|
||||
@dataclass(slots=True)
|
||||
class Ip4Properties(IpProperties):
|
||||
"""IPv4 properties object."""
|
||||
|
||||
dns: list[int] | None
|
||||
|
||||
|
||||
@dataclass(slots=True)
|
||||
class Ip6Properties(IpProperties):
|
||||
"""IPv6 properties object for Network Manager."""
|
||||
|
||||
addr_gen_mode: int
|
||||
ip6_privacy: int
|
||||
dns: list[bytes] | None
|
||||
|
||||
|
||||
@dataclass(slots=True)
|
||||
|
@ -96,7 +96,7 @@ class NetworkConnection(DBusInterfaceProxy):
|
||||
|
||||
@ipv4.setter
|
||||
def ipv4(self, ipv4: IpConfiguration | None) -> None:
|
||||
"""Set ipv4 configuration."""
|
||||
"""Set IPv4 configuration."""
|
||||
if self._ipv4 and self._ipv4 is not ipv4:
|
||||
self._ipv4.shutdown()
|
||||
|
||||
@ -109,7 +109,7 @@ class NetworkConnection(DBusInterfaceProxy):
|
||||
|
||||
@ipv6.setter
|
||||
def ipv6(self, ipv6: IpConfiguration | None) -> None:
|
||||
"""Set ipv6 configuration."""
|
||||
"""Set IPv6 configuration."""
|
||||
if self._ipv6 and self._ipv6 is not ipv6:
|
||||
self._ipv6.shutdown()
|
||||
|
||||
|
@ -49,7 +49,7 @@ class NetworkInterface(DBusInterfaceProxy):
|
||||
|
||||
@property
|
||||
@dbus_property
|
||||
def name(self) -> str:
|
||||
def interface_name(self) -> str:
|
||||
"""Return interface name."""
|
||||
return self.properties[DBUS_ATTR_DEVICE_INTERFACE]
|
||||
|
||||
|
@ -12,8 +12,9 @@ from ...utils import dbus_connected
|
||||
from ..configuration import (
|
||||
ConnectionProperties,
|
||||
EthernetProperties,
|
||||
Ip4Properties,
|
||||
Ip6Properties,
|
||||
IpAddress,
|
||||
IpProperties,
|
||||
MatchProperties,
|
||||
VlanProperties,
|
||||
WirelessProperties,
|
||||
@ -58,6 +59,8 @@ CONF_ATTR_IPV4_GATEWAY = "gateway"
|
||||
CONF_ATTR_IPV4_DNS = "dns"
|
||||
|
||||
CONF_ATTR_IPV6_METHOD = "method"
|
||||
CONF_ATTR_IPV6_ADDR_GEN_MODE = "addr-gen-mode"
|
||||
CONF_ATTR_IPV6_PRIVACY = "ip6-privacy"
|
||||
CONF_ATTR_IPV6_ADDRESS_DATA = "address-data"
|
||||
CONF_ATTR_IPV6_GATEWAY = "gateway"
|
||||
CONF_ATTR_IPV6_DNS = "dns"
|
||||
@ -69,6 +72,8 @@ IPV4_6_IGNORE_FIELDS = [
|
||||
"dns-data",
|
||||
"gateway",
|
||||
"method",
|
||||
"addr-gen-mode",
|
||||
"ip6-privacy",
|
||||
]
|
||||
|
||||
_LOGGER: logging.Logger = logging.getLogger(__name__)
|
||||
@ -110,8 +115,8 @@ class NetworkSetting(DBusInterface):
|
||||
self._wireless_security: WirelessSecurityProperties | None = None
|
||||
self._ethernet: EthernetProperties | None = None
|
||||
self._vlan: VlanProperties | None = None
|
||||
self._ipv4: IpProperties | None = None
|
||||
self._ipv6: IpProperties | None = None
|
||||
self._ipv4: Ip4Properties | None = None
|
||||
self._ipv6: Ip6Properties | None = None
|
||||
self._match: MatchProperties | None = None
|
||||
super().__init__()
|
||||
|
||||
@ -146,13 +151,13 @@ class NetworkSetting(DBusInterface):
|
||||
return self._vlan
|
||||
|
||||
@property
|
||||
def ipv4(self) -> IpProperties | None:
|
||||
"""Return ipv4 properties if any."""
|
||||
def ipv4(self) -> Ip4Properties | None:
|
||||
"""Return IPv4 properties if any."""
|
||||
return self._ipv4
|
||||
|
||||
@property
|
||||
def ipv6(self) -> IpProperties | None:
|
||||
"""Return ipv6 properties if any."""
|
||||
def ipv6(self) -> Ip6Properties | None:
|
||||
"""Return IPv6 properties if any."""
|
||||
return self._ipv6
|
||||
|
||||
@property
|
||||
@ -223,66 +228,83 @@ class NetworkSetting(DBusInterface):
|
||||
# See: https://developer-old.gnome.org/NetworkManager/stable/ch01.html
|
||||
if CONF_ATTR_CONNECTION in data:
|
||||
self._connection = ConnectionProperties(
|
||||
data[CONF_ATTR_CONNECTION].get(CONF_ATTR_CONNECTION_ID),
|
||||
data[CONF_ATTR_CONNECTION].get(CONF_ATTR_CONNECTION_UUID),
|
||||
data[CONF_ATTR_CONNECTION].get(CONF_ATTR_CONNECTION_TYPE),
|
||||
data[CONF_ATTR_CONNECTION].get(CONF_ATTR_CONNECTION_INTERFACE_NAME),
|
||||
id=data[CONF_ATTR_CONNECTION].get(CONF_ATTR_CONNECTION_ID),
|
||||
uuid=data[CONF_ATTR_CONNECTION].get(CONF_ATTR_CONNECTION_UUID),
|
||||
type=data[CONF_ATTR_CONNECTION].get(CONF_ATTR_CONNECTION_TYPE),
|
||||
interface_name=data[CONF_ATTR_CONNECTION].get(
|
||||
CONF_ATTR_CONNECTION_INTERFACE_NAME
|
||||
),
|
||||
)
|
||||
|
||||
if CONF_ATTR_802_ETHERNET in data:
|
||||
self._ethernet = EthernetProperties(
|
||||
data[CONF_ATTR_802_ETHERNET].get(CONF_ATTR_802_ETHERNET_ASSIGNED_MAC),
|
||||
assigned_mac=data[CONF_ATTR_802_ETHERNET].get(
|
||||
CONF_ATTR_802_ETHERNET_ASSIGNED_MAC
|
||||
),
|
||||
)
|
||||
|
||||
if CONF_ATTR_802_WIRELESS in data:
|
||||
self._wireless = WirelessProperties(
|
||||
bytes(
|
||||
ssid=bytes(
|
||||
data[CONF_ATTR_802_WIRELESS].get(CONF_ATTR_802_WIRELESS_SSID, [])
|
||||
).decode(),
|
||||
data[CONF_ATTR_802_WIRELESS].get(CONF_ATTR_802_WIRELESS_ASSIGNED_MAC),
|
||||
data[CONF_ATTR_802_WIRELESS].get(CONF_ATTR_802_WIRELESS_MODE),
|
||||
data[CONF_ATTR_802_WIRELESS].get(CONF_ATTR_802_WIRELESS_POWERSAVE),
|
||||
assigned_mac=data[CONF_ATTR_802_WIRELESS].get(
|
||||
CONF_ATTR_802_WIRELESS_ASSIGNED_MAC
|
||||
),
|
||||
mode=data[CONF_ATTR_802_WIRELESS].get(CONF_ATTR_802_WIRELESS_MODE),
|
||||
powersave=data[CONF_ATTR_802_WIRELESS].get(
|
||||
CONF_ATTR_802_WIRELESS_POWERSAVE
|
||||
),
|
||||
)
|
||||
|
||||
if CONF_ATTR_802_WIRELESS_SECURITY in data:
|
||||
self._wireless_security = WirelessSecurityProperties(
|
||||
data[CONF_ATTR_802_WIRELESS_SECURITY].get(
|
||||
auth_alg=data[CONF_ATTR_802_WIRELESS_SECURITY].get(
|
||||
CONF_ATTR_802_WIRELESS_SECURITY_AUTH_ALG
|
||||
),
|
||||
data[CONF_ATTR_802_WIRELESS_SECURITY].get(
|
||||
key_mgmt=data[CONF_ATTR_802_WIRELESS_SECURITY].get(
|
||||
CONF_ATTR_802_WIRELESS_SECURITY_KEY_MGMT
|
||||
),
|
||||
data[CONF_ATTR_802_WIRELESS_SECURITY].get(
|
||||
psk=data[CONF_ATTR_802_WIRELESS_SECURITY].get(
|
||||
CONF_ATTR_802_WIRELESS_SECURITY_PSK
|
||||
),
|
||||
)
|
||||
|
||||
if CONF_ATTR_VLAN in data:
|
||||
self._vlan = VlanProperties(
|
||||
data[CONF_ATTR_VLAN].get(CONF_ATTR_VLAN_ID),
|
||||
data[CONF_ATTR_VLAN].get(CONF_ATTR_VLAN_PARENT),
|
||||
)
|
||||
if CONF_ATTR_VLAN_ID in data[CONF_ATTR_VLAN]:
|
||||
self._vlan = VlanProperties(
|
||||
data[CONF_ATTR_VLAN][CONF_ATTR_VLAN_ID],
|
||||
data[CONF_ATTR_VLAN].get(CONF_ATTR_VLAN_PARENT),
|
||||
)
|
||||
else:
|
||||
self._vlan = None
|
||||
_LOGGER.warning(
|
||||
"Network settings for vlan connection %s missing required vlan id, cannot process it",
|
||||
self.connection.interface_name,
|
||||
)
|
||||
|
||||
if CONF_ATTR_IPV4 in data:
|
||||
address_data = None
|
||||
if ips := data[CONF_ATTR_IPV4].get(CONF_ATTR_IPV4_ADDRESS_DATA):
|
||||
address_data = [IpAddress(ip["address"], ip["prefix"]) for ip in ips]
|
||||
self._ipv4 = IpProperties(
|
||||
data[CONF_ATTR_IPV4].get(CONF_ATTR_IPV4_METHOD),
|
||||
address_data,
|
||||
data[CONF_ATTR_IPV4].get(CONF_ATTR_IPV4_GATEWAY),
|
||||
data[CONF_ATTR_IPV4].get(CONF_ATTR_IPV4_DNS),
|
||||
self._ipv4 = Ip4Properties(
|
||||
method=data[CONF_ATTR_IPV4].get(CONF_ATTR_IPV4_METHOD),
|
||||
address_data=address_data,
|
||||
gateway=data[CONF_ATTR_IPV4].get(CONF_ATTR_IPV4_GATEWAY),
|
||||
dns=data[CONF_ATTR_IPV4].get(CONF_ATTR_IPV4_DNS),
|
||||
)
|
||||
|
||||
if CONF_ATTR_IPV6 in data:
|
||||
address_data = None
|
||||
if ips := data[CONF_ATTR_IPV6].get(CONF_ATTR_IPV6_ADDRESS_DATA):
|
||||
address_data = [IpAddress(ip["address"], ip["prefix"]) for ip in ips]
|
||||
self._ipv6 = IpProperties(
|
||||
data[CONF_ATTR_IPV6].get(CONF_ATTR_IPV6_METHOD),
|
||||
address_data,
|
||||
data[CONF_ATTR_IPV6].get(CONF_ATTR_IPV6_GATEWAY),
|
||||
data[CONF_ATTR_IPV6].get(CONF_ATTR_IPV6_DNS),
|
||||
self._ipv6 = Ip6Properties(
|
||||
method=data[CONF_ATTR_IPV6].get(CONF_ATTR_IPV6_METHOD),
|
||||
addr_gen_mode=data[CONF_ATTR_IPV6].get(CONF_ATTR_IPV6_ADDR_GEN_MODE),
|
||||
ip6_privacy=data[CONF_ATTR_IPV6].get(CONF_ATTR_IPV6_PRIVACY),
|
||||
address_data=address_data,
|
||||
gateway=data[CONF_ATTR_IPV6].get(CONF_ATTR_IPV6_GATEWAY),
|
||||
dns=data[CONF_ATTR_IPV6].get(CONF_ATTR_IPV6_DNS),
|
||||
)
|
||||
|
||||
if CONF_ATTR_MATCH in data:
|
||||
|
@ -8,8 +8,13 @@ from uuid import uuid4
|
||||
|
||||
from dbus_fast import Variant
|
||||
|
||||
from ....host.configuration import VlanConfig
|
||||
from ....host.const import InterfaceMethod, InterfaceType
|
||||
from ....host.configuration import Ip6Setting, IpSetting, VlanConfig
|
||||
from ....host.const import (
|
||||
InterfaceAddrGenMode,
|
||||
InterfaceIp6Privacy,
|
||||
InterfaceMethod,
|
||||
InterfaceType,
|
||||
)
|
||||
from .. import NetworkManager
|
||||
from . import (
|
||||
CONF_ATTR_802_ETHERNET,
|
||||
@ -36,10 +41,12 @@ from . import (
|
||||
CONF_ATTR_IPV4_GATEWAY,
|
||||
CONF_ATTR_IPV4_METHOD,
|
||||
CONF_ATTR_IPV6,
|
||||
CONF_ATTR_IPV6_ADDR_GEN_MODE,
|
||||
CONF_ATTR_IPV6_ADDRESS_DATA,
|
||||
CONF_ATTR_IPV6_DNS,
|
||||
CONF_ATTR_IPV6_GATEWAY,
|
||||
CONF_ATTR_IPV6_METHOD,
|
||||
CONF_ATTR_IPV6_PRIVACY,
|
||||
CONF_ATTR_MATCH,
|
||||
CONF_ATTR_MATCH_PATH,
|
||||
CONF_ATTR_VLAN,
|
||||
@ -51,7 +58,7 @@ if TYPE_CHECKING:
|
||||
from ....host.configuration import Interface
|
||||
|
||||
|
||||
def _get_ipv4_connection_settings(ipv4setting) -> dict:
|
||||
def _get_ipv4_connection_settings(ipv4setting: IpSetting | None) -> dict:
|
||||
ipv4 = {}
|
||||
if not ipv4setting or ipv4setting.method == InterfaceMethod.AUTO:
|
||||
ipv4[CONF_ATTR_IPV4_METHOD] = Variant("s", "auto")
|
||||
@ -93,10 +100,32 @@ def _get_ipv4_connection_settings(ipv4setting) -> dict:
|
||||
return ipv4
|
||||
|
||||
|
||||
def _get_ipv6_connection_settings(ipv6setting) -> dict:
|
||||
def _get_ipv6_connection_settings(
|
||||
ipv6setting: Ip6Setting | None, support_addr_gen_mode_defaults: bool = False
|
||||
) -> dict:
|
||||
ipv6 = {}
|
||||
if not ipv6setting or ipv6setting.method == InterfaceMethod.AUTO:
|
||||
ipv6[CONF_ATTR_IPV6_METHOD] = Variant("s", "auto")
|
||||
if ipv6setting:
|
||||
if ipv6setting.addr_gen_mode == InterfaceAddrGenMode.EUI64:
|
||||
ipv6[CONF_ATTR_IPV6_ADDR_GEN_MODE] = Variant("i", 0)
|
||||
elif (
|
||||
not support_addr_gen_mode_defaults
|
||||
or ipv6setting.addr_gen_mode == InterfaceAddrGenMode.STABLE_PRIVACY
|
||||
):
|
||||
ipv6[CONF_ATTR_IPV6_ADDR_GEN_MODE] = Variant("i", 1)
|
||||
elif ipv6setting.addr_gen_mode == InterfaceAddrGenMode.DEFAULT_OR_EUI64:
|
||||
ipv6[CONF_ATTR_IPV6_ADDR_GEN_MODE] = Variant("i", 2)
|
||||
else:
|
||||
ipv6[CONF_ATTR_IPV6_ADDR_GEN_MODE] = Variant("i", 3)
|
||||
if ipv6setting.ip6_privacy == InterfaceIp6Privacy.DISABLED:
|
||||
ipv6[CONF_ATTR_IPV6_PRIVACY] = Variant("i", 0)
|
||||
elif ipv6setting.ip6_privacy == InterfaceIp6Privacy.ENABLED_PREFER_PUBLIC:
|
||||
ipv6[CONF_ATTR_IPV6_PRIVACY] = Variant("i", 1)
|
||||
elif ipv6setting.ip6_privacy == InterfaceIp6Privacy.ENABLED:
|
||||
ipv6[CONF_ATTR_IPV6_PRIVACY] = Variant("i", 2)
|
||||
else:
|
||||
ipv6[CONF_ATTR_IPV6_PRIVACY] = Variant("i", -1)
|
||||
elif ipv6setting.method == InterfaceMethod.DISABLED:
|
||||
ipv6[CONF_ATTR_IPV6_METHOD] = Variant("s", "link-local")
|
||||
elif ipv6setting.method == InterfaceMethod.STATIC:
|
||||
@ -183,7 +212,9 @@ def get_connection_from_interface(
|
||||
|
||||
conn[CONF_ATTR_IPV4] = _get_ipv4_connection_settings(interface.ipv4setting)
|
||||
|
||||
conn[CONF_ATTR_IPV6] = _get_ipv6_connection_settings(interface.ipv6setting)
|
||||
conn[CONF_ATTR_IPV6] = _get_ipv6_connection_settings(
|
||||
interface.ipv6setting, network_manager.version >= "1.40.0"
|
||||
)
|
||||
|
||||
if interface.type == InterfaceType.ETHERNET:
|
||||
conn[CONF_ATTR_802_ETHERNET] = {
|
||||
@ -191,8 +222,10 @@ def get_connection_from_interface(
|
||||
}
|
||||
elif interface.type == "vlan":
|
||||
parent = cast(VlanConfig, interface.vlan).interface
|
||||
if parent in network_manager and (
|
||||
parent_connection := network_manager.get(parent).connection
|
||||
if (
|
||||
parent
|
||||
and parent in network_manager
|
||||
and (parent_connection := network_manager.get(parent).connection)
|
||||
):
|
||||
parent = parent_connection.uuid
|
||||
|
||||
|
@ -10,6 +10,7 @@ from dbus_fast.aio.message_bus import MessageBus
|
||||
from ..exceptions import DBusError, DBusInterfaceError, DBusServiceUnkownError
|
||||
from ..utils.dt import get_time_zone, utc_from_timestamp
|
||||
from .const import (
|
||||
DBUS_ATTR_LOCAL_RTC,
|
||||
DBUS_ATTR_NTP,
|
||||
DBUS_ATTR_NTPSYNCHRONIZED,
|
||||
DBUS_ATTR_TIMEUSEC,
|
||||
@ -46,6 +47,12 @@ class TimeDate(DBusInterfaceProxy):
|
||||
"""Return host timezone."""
|
||||
return self.properties[DBUS_ATTR_TIMEZONE]
|
||||
|
||||
@property
|
||||
@dbus_property
|
||||
def local_rtc(self) -> bool:
|
||||
"""Return whether rtc is local time or utc."""
|
||||
return self.properties[DBUS_ATTR_LOCAL_RTC]
|
||||
|
||||
@property
|
||||
@dbus_property
|
||||
def ntp(self) -> bool:
|
||||
|
@ -28,6 +28,8 @@ class DeviceSpecificationDataType(TypedDict, total=False):
|
||||
path: str
|
||||
label: str
|
||||
uuid: str
|
||||
partuuid: str
|
||||
partlabel: str
|
||||
|
||||
|
||||
@dataclass(slots=True)
|
||||
@ -40,6 +42,8 @@ class DeviceSpecification:
|
||||
path: Path | None = None
|
||||
label: str | None = None
|
||||
uuid: str | None = None
|
||||
partuuid: str | None = None
|
||||
partlabel: str | None = None
|
||||
|
||||
@staticmethod
|
||||
def from_dict(data: DeviceSpecificationDataType) -> "DeviceSpecification":
|
||||
@ -48,6 +52,8 @@ class DeviceSpecification:
|
||||
path=Path(data["path"]) if "path" in data else None,
|
||||
label=data.get("label"),
|
||||
uuid=data.get("uuid"),
|
||||
partuuid=data.get("partuuid"),
|
||||
partlabel=data.get("partlabel"),
|
||||
)
|
||||
|
||||
def to_dict(self) -> dict[str, Variant]:
|
||||
@ -56,6 +62,8 @@ class DeviceSpecification:
|
||||
"path": Variant("s", self.path.as_posix()) if self.path else None,
|
||||
"label": _optional_variant("s", self.label),
|
||||
"uuid": _optional_variant("s", self.uuid),
|
||||
"partuuid": _optional_variant("s", self.partuuid),
|
||||
"partlabel": _optional_variant("s", self.partlabel),
|
||||
}
|
||||
return {k: v for k, v in data.items() if v}
|
||||
|
||||
|
@ -2,17 +2,17 @@
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from collections.abc import Awaitable
|
||||
from contextlib import suppress
|
||||
from ipaddress import IPv4Address, ip_address
|
||||
from ipaddress import IPv4Address
|
||||
import logging
|
||||
import os
|
||||
from pathlib import Path
|
||||
from typing import TYPE_CHECKING
|
||||
from typing import TYPE_CHECKING, cast
|
||||
|
||||
from attr import evolve
|
||||
from awesomeversion import AwesomeVersion
|
||||
import docker
|
||||
import docker.errors
|
||||
from docker.types import Mount
|
||||
import requests
|
||||
|
||||
@ -44,6 +44,7 @@ from ..jobs.decorator import Job
|
||||
from ..resolution.const import CGROUP_V2_VERSION, ContextType, IssueType, SuggestionType
|
||||
from ..utils.sentry import async_capture_exception
|
||||
from .const import (
|
||||
ADDON_BUILDER_IMAGE,
|
||||
ENV_TIME,
|
||||
ENV_TOKEN,
|
||||
ENV_TOKEN_OLD,
|
||||
@ -73,7 +74,7 @@ if TYPE_CHECKING:
|
||||
|
||||
_LOGGER: logging.Logger = logging.getLogger(__name__)
|
||||
|
||||
NO_ADDDRESS = ip_address("0.0.0.0")
|
||||
NO_ADDDRESS = IPv4Address("0.0.0.0")
|
||||
|
||||
|
||||
class DockerAddon(DockerInterface):
|
||||
@ -101,10 +102,12 @@ class DockerAddon(DockerInterface):
|
||||
"""Return IP address of this container."""
|
||||
if self.addon.host_network:
|
||||
return self.sys_docker.network.gateway
|
||||
if not self._meta:
|
||||
return NO_ADDDRESS
|
||||
|
||||
# Extract IP-Address
|
||||
try:
|
||||
return ip_address(
|
||||
return IPv4Address(
|
||||
self._meta["NetworkSettings"]["Networks"]["hassio"]["IPAddress"]
|
||||
)
|
||||
except (KeyError, TypeError, ValueError):
|
||||
@ -121,7 +124,7 @@ class DockerAddon(DockerInterface):
|
||||
return self.addon.version
|
||||
|
||||
@property
|
||||
def arch(self) -> str:
|
||||
def arch(self) -> str | None:
|
||||
"""Return arch of Docker image."""
|
||||
if self.addon.legacy:
|
||||
return self.sys_arch.default
|
||||
@ -133,9 +136,9 @@ class DockerAddon(DockerInterface):
|
||||
return DockerAddon.slug_to_name(self.addon.slug)
|
||||
|
||||
@property
|
||||
def environment(self) -> dict[str, str | None]:
|
||||
def environment(self) -> dict[str, str | int | None]:
|
||||
"""Return environment for Docker add-on."""
|
||||
addon_env = self.addon.environment or {}
|
||||
addon_env = cast(dict[str, str | int | None], self.addon.environment or {})
|
||||
|
||||
# Provide options for legacy add-ons
|
||||
if self.addon.legacy:
|
||||
@ -336,14 +339,14 @@ class DockerAddon(DockerInterface):
|
||||
"""Return mounts for container."""
|
||||
addon_mapping = self.addon.map_volumes
|
||||
|
||||
target_data_path = ""
|
||||
target_data_path: str | None = None
|
||||
if MappingType.DATA in addon_mapping:
|
||||
target_data_path = addon_mapping[MappingType.DATA].path
|
||||
|
||||
mounts = [
|
||||
MOUNT_DEV,
|
||||
Mount(
|
||||
type=MountType.BIND,
|
||||
type=MountType.BIND.value,
|
||||
source=self.addon.path_extern_data.as_posix(),
|
||||
target=target_data_path or PATH_PRIVATE_DATA.as_posix(),
|
||||
read_only=False,
|
||||
@ -354,7 +357,7 @@ class DockerAddon(DockerInterface):
|
||||
if MappingType.CONFIG in addon_mapping:
|
||||
mounts.append(
|
||||
Mount(
|
||||
type=MountType.BIND,
|
||||
type=MountType.BIND.value,
|
||||
source=self.sys_config.path_extern_homeassistant.as_posix(),
|
||||
target=addon_mapping[MappingType.CONFIG].path
|
||||
or PATH_HOMEASSISTANT_CONFIG_LEGACY.as_posix(),
|
||||
@ -367,7 +370,7 @@ class DockerAddon(DockerInterface):
|
||||
if self.addon.addon_config_used:
|
||||
mounts.append(
|
||||
Mount(
|
||||
type=MountType.BIND,
|
||||
type=MountType.BIND.value,
|
||||
source=self.addon.path_extern_config.as_posix(),
|
||||
target=addon_mapping[MappingType.ADDON_CONFIG].path
|
||||
or PATH_PUBLIC_CONFIG.as_posix(),
|
||||
@ -379,7 +382,7 @@ class DockerAddon(DockerInterface):
|
||||
if MappingType.HOMEASSISTANT_CONFIG in addon_mapping:
|
||||
mounts.append(
|
||||
Mount(
|
||||
type=MountType.BIND,
|
||||
type=MountType.BIND.value,
|
||||
source=self.sys_config.path_extern_homeassistant.as_posix(),
|
||||
target=addon_mapping[MappingType.HOMEASSISTANT_CONFIG].path
|
||||
or PATH_HOMEASSISTANT_CONFIG.as_posix(),
|
||||
@ -392,7 +395,7 @@ class DockerAddon(DockerInterface):
|
||||
if MappingType.ALL_ADDON_CONFIGS in addon_mapping:
|
||||
mounts.append(
|
||||
Mount(
|
||||
type=MountType.BIND,
|
||||
type=MountType.BIND.value,
|
||||
source=self.sys_config.path_extern_addon_configs.as_posix(),
|
||||
target=addon_mapping[MappingType.ALL_ADDON_CONFIGS].path
|
||||
or PATH_ALL_ADDON_CONFIGS.as_posix(),
|
||||
@ -403,7 +406,7 @@ class DockerAddon(DockerInterface):
|
||||
if MappingType.SSL in addon_mapping:
|
||||
mounts.append(
|
||||
Mount(
|
||||
type=MountType.BIND,
|
||||
type=MountType.BIND.value,
|
||||
source=self.sys_config.path_extern_ssl.as_posix(),
|
||||
target=addon_mapping[MappingType.SSL].path or PATH_SSL.as_posix(),
|
||||
read_only=addon_mapping[MappingType.SSL].read_only,
|
||||
@ -413,7 +416,7 @@ class DockerAddon(DockerInterface):
|
||||
if MappingType.ADDONS in addon_mapping:
|
||||
mounts.append(
|
||||
Mount(
|
||||
type=MountType.BIND,
|
||||
type=MountType.BIND.value,
|
||||
source=self.sys_config.path_extern_addons_local.as_posix(),
|
||||
target=addon_mapping[MappingType.ADDONS].path
|
||||
or PATH_LOCAL_ADDONS.as_posix(),
|
||||
@ -424,7 +427,7 @@ class DockerAddon(DockerInterface):
|
||||
if MappingType.BACKUP in addon_mapping:
|
||||
mounts.append(
|
||||
Mount(
|
||||
type=MountType.BIND,
|
||||
type=MountType.BIND.value,
|
||||
source=self.sys_config.path_extern_backup.as_posix(),
|
||||
target=addon_mapping[MappingType.BACKUP].path
|
||||
or PATH_BACKUP.as_posix(),
|
||||
@ -435,7 +438,7 @@ class DockerAddon(DockerInterface):
|
||||
if MappingType.SHARE in addon_mapping:
|
||||
mounts.append(
|
||||
Mount(
|
||||
type=MountType.BIND,
|
||||
type=MountType.BIND.value,
|
||||
source=self.sys_config.path_extern_share.as_posix(),
|
||||
target=addon_mapping[MappingType.SHARE].path
|
||||
or PATH_SHARE.as_posix(),
|
||||
@ -447,7 +450,7 @@ class DockerAddon(DockerInterface):
|
||||
if MappingType.MEDIA in addon_mapping:
|
||||
mounts.append(
|
||||
Mount(
|
||||
type=MountType.BIND,
|
||||
type=MountType.BIND.value,
|
||||
source=self.sys_config.path_extern_media.as_posix(),
|
||||
target=addon_mapping[MappingType.MEDIA].path
|
||||
or PATH_MEDIA.as_posix(),
|
||||
@ -465,7 +468,7 @@ class DockerAddon(DockerInterface):
|
||||
continue
|
||||
mounts.append(
|
||||
Mount(
|
||||
type=MountType.BIND,
|
||||
type=MountType.BIND.value,
|
||||
source=gpio_path,
|
||||
target=gpio_path,
|
||||
read_only=False,
|
||||
@ -476,7 +479,7 @@ class DockerAddon(DockerInterface):
|
||||
if self.addon.with_devicetree:
|
||||
mounts.append(
|
||||
Mount(
|
||||
type=MountType.BIND,
|
||||
type=MountType.BIND.value,
|
||||
source="/sys/firmware/devicetree/base",
|
||||
target="/device-tree",
|
||||
read_only=True,
|
||||
@ -491,7 +494,7 @@ class DockerAddon(DockerInterface):
|
||||
if self.addon.with_kernel_modules:
|
||||
mounts.append(
|
||||
Mount(
|
||||
type=MountType.BIND,
|
||||
type=MountType.BIND.value,
|
||||
source="/lib/modules",
|
||||
target="/lib/modules",
|
||||
read_only=True,
|
||||
@ -510,19 +513,19 @@ class DockerAddon(DockerInterface):
|
||||
if self.addon.with_audio:
|
||||
mounts += [
|
||||
Mount(
|
||||
type=MountType.BIND,
|
||||
type=MountType.BIND.value,
|
||||
source=self.addon.path_extern_pulse.as_posix(),
|
||||
target="/etc/pulse/client.conf",
|
||||
read_only=True,
|
||||
),
|
||||
Mount(
|
||||
type=MountType.BIND,
|
||||
type=MountType.BIND.value,
|
||||
source=self.sys_plugins.audio.path_extern_pulse.as_posix(),
|
||||
target="/run/audio",
|
||||
read_only=True,
|
||||
),
|
||||
Mount(
|
||||
type=MountType.BIND,
|
||||
type=MountType.BIND.value,
|
||||
source=self.sys_plugins.audio.path_extern_asound.as_posix(),
|
||||
target="/etc/asound.conf",
|
||||
read_only=True,
|
||||
@ -533,13 +536,13 @@ class DockerAddon(DockerInterface):
|
||||
if self.addon.with_journald:
|
||||
mounts += [
|
||||
Mount(
|
||||
type=MountType.BIND,
|
||||
type=MountType.BIND.value,
|
||||
source=SYSTEMD_JOURNAL_PERSISTENT.as_posix(),
|
||||
target=SYSTEMD_JOURNAL_PERSISTENT.as_posix(),
|
||||
read_only=True,
|
||||
),
|
||||
Mount(
|
||||
type=MountType.BIND,
|
||||
type=MountType.BIND.value,
|
||||
source=SYSTEMD_JOURNAL_VOLATILE.as_posix(),
|
||||
target=SYSTEMD_JOURNAL_VOLATILE.as_posix(),
|
||||
read_only=True,
|
||||
@ -672,36 +675,63 @@ class DockerAddon(DockerInterface):
|
||||
_LOGGER.info("Starting build for %s:%s", self.image, version)
|
||||
|
||||
def build_image():
|
||||
return self.sys_docker.images.build(
|
||||
use_config_proxy=False, **build_env.get_docker_args(version, image)
|
||||
if build_env.squash:
|
||||
_LOGGER.warning(
|
||||
"Ignoring squash build option for %s as Docker BuildKit does not support it.",
|
||||
self.addon.slug,
|
||||
)
|
||||
|
||||
addon_image_tag = f"{image or self.addon.image}:{version!s}"
|
||||
|
||||
docker_version = self.sys_docker.info.version
|
||||
builder_version_tag = f"{docker_version.major}.{docker_version.minor}.{docker_version.micro}-cli"
|
||||
|
||||
builder_name = f"addon_builder_{self.addon.slug}"
|
||||
|
||||
# Remove dangling builder container if it exists by any chance
|
||||
# E.g. because of an abrupt host shutdown/reboot during a build
|
||||
with suppress(docker.errors.NotFound):
|
||||
self.sys_docker.containers.get(builder_name).remove(force=True, v=True)
|
||||
|
||||
result = self.sys_docker.run_command(
|
||||
ADDON_BUILDER_IMAGE,
|
||||
version=builder_version_tag,
|
||||
name=builder_name,
|
||||
**build_env.get_docker_args(version, addon_image_tag),
|
||||
)
|
||||
|
||||
logs = result.output.decode("utf-8")
|
||||
|
||||
if result.exit_code != 0:
|
||||
error_message = f"Docker build failed for {addon_image_tag} (exit code {result.exit_code}). Build output:\n{logs}"
|
||||
raise docker.errors.DockerException(error_message)
|
||||
|
||||
addon_image = self.sys_docker.images.get(addon_image_tag)
|
||||
|
||||
return addon_image, logs
|
||||
|
||||
try:
|
||||
image, log = await self.sys_run_in_executor(build_image)
|
||||
docker_image, log = await self.sys_run_in_executor(build_image)
|
||||
|
||||
_LOGGER.debug("Build %s:%s done: %s", self.image, version, log)
|
||||
|
||||
# Update meta data
|
||||
self._meta = image.attrs
|
||||
self._meta = docker_image.attrs
|
||||
|
||||
except (docker.errors.DockerException, requests.RequestException) as err:
|
||||
_LOGGER.error("Can't build %s:%s: %s", self.image, version, err)
|
||||
if hasattr(err, "build_log"):
|
||||
log = "\n".join(
|
||||
[
|
||||
x["stream"]
|
||||
for x in err.build_log # pylint: disable=no-member
|
||||
if isinstance(x, dict) and "stream" in x
|
||||
]
|
||||
)
|
||||
_LOGGER.error("Build log: \n%s", log)
|
||||
raise DockerError() from err
|
||||
|
||||
_LOGGER.info("Build %s:%s done", self.image, version)
|
||||
|
||||
def export_image(self, tar_file: Path) -> Awaitable[None]:
|
||||
"""Export current images into a tar file."""
|
||||
return self.sys_docker.export_image(self.image, self.version, tar_file)
|
||||
def export_image(self, tar_file: Path) -> None:
|
||||
"""Export current images into a tar file.
|
||||
|
||||
Must be run in executor.
|
||||
"""
|
||||
if not self.image:
|
||||
raise RuntimeError("Cannot export without image!")
|
||||
self.sys_docker.export_image(self.image, self.version, tar_file)
|
||||
|
||||
@Job(
|
||||
name="docker_addon_import_image",
|
||||
@ -805,15 +835,15 @@ class DockerAddon(DockerInterface):
|
||||
):
|
||||
self.sys_resolution.dismiss_issue(self.addon.device_access_missing_issue)
|
||||
|
||||
async def _validate_trust(
|
||||
self, image_id: str, image: str, version: AwesomeVersion
|
||||
) -> None:
|
||||
async def _validate_trust(self, image_id: str) -> None:
|
||||
"""Validate trust of content."""
|
||||
if not self.addon.signed:
|
||||
return
|
||||
|
||||
checksum = image_id.partition(":")[2]
|
||||
return await self.sys_security.verify_content(self.addon.codenotary, checksum)
|
||||
return await self.sys_security.verify_content(
|
||||
cast(str, self.addon.codenotary), checksum
|
||||
)
|
||||
|
||||
@Job(
|
||||
name="docker_addon_hardware_events",
|
||||
@ -834,7 +864,8 @@ class DockerAddon(DockerInterface):
|
||||
self.sys_docker.containers.get, self.name
|
||||
)
|
||||
except docker.errors.NotFound:
|
||||
self.sys_bus.remove_listener(self._hw_listener)
|
||||
if self._hw_listener:
|
||||
self.sys_bus.remove_listener(self._hw_listener)
|
||||
self._hw_listener = None
|
||||
return
|
||||
except (docker.errors.DockerException, requests.RequestException) as err:
|
||||
|
@ -47,7 +47,7 @@ class DockerAudio(DockerInterface, CoreSysAttributes):
|
||||
mounts = [
|
||||
MOUNT_DEV,
|
||||
Mount(
|
||||
type=MountType.BIND,
|
||||
type=MountType.BIND.value,
|
||||
source=self.sys_config.path_extern_audio.as_posix(),
|
||||
target=PATH_PRIVATE_DATA.as_posix(),
|
||||
read_only=False,
|
||||
|
@ -74,24 +74,26 @@ ENV_TOKEN_OLD = "HASSIO_TOKEN"
|
||||
LABEL_MANAGED = "supervisor_managed"
|
||||
|
||||
MOUNT_DBUS = Mount(
|
||||
type=MountType.BIND, source="/run/dbus", target="/run/dbus", read_only=True
|
||||
type=MountType.BIND.value, source="/run/dbus", target="/run/dbus", read_only=True
|
||||
)
|
||||
MOUNT_DEV = Mount(
|
||||
type=MountType.BIND.value, source="/dev", target="/dev", read_only=True
|
||||
)
|
||||
MOUNT_DEV = Mount(type=MountType.BIND, source="/dev", target="/dev", read_only=True)
|
||||
MOUNT_DEV.setdefault("BindOptions", {})["ReadOnlyNonRecursive"] = True
|
||||
MOUNT_DOCKER = Mount(
|
||||
type=MountType.BIND,
|
||||
type=MountType.BIND.value,
|
||||
source="/run/docker.sock",
|
||||
target="/run/docker.sock",
|
||||
read_only=True,
|
||||
)
|
||||
MOUNT_MACHINE_ID = Mount(
|
||||
type=MountType.BIND,
|
||||
type=MountType.BIND.value,
|
||||
source=MACHINE_ID.as_posix(),
|
||||
target=MACHINE_ID.as_posix(),
|
||||
read_only=True,
|
||||
)
|
||||
MOUNT_UDEV = Mount(
|
||||
type=MountType.BIND, source="/run/udev", target="/run/udev", read_only=True
|
||||
type=MountType.BIND.value, source="/run/udev", target="/run/udev", read_only=True
|
||||
)
|
||||
|
||||
PATH_PRIVATE_DATA = PurePath("/data")
|
||||
@ -105,3 +107,6 @@ PATH_BACKUP = PurePath("/backup")
|
||||
PATH_SHARE = PurePath("/share")
|
||||
PATH_MEDIA = PurePath("/media")
|
||||
PATH_CLOUD_BACKUP = PurePath("/cloud_backup")
|
||||
|
||||
# https://hub.docker.com/_/docker
|
||||
ADDON_BUILDER_IMAGE = "docker.io/library/docker"
|
||||
|
@ -48,7 +48,7 @@ class DockerDNS(DockerInterface, CoreSysAttributes):
|
||||
environment={ENV_TIME: self.sys_timezone},
|
||||
mounts=[
|
||||
Mount(
|
||||
type=MountType.BIND,
|
||||
type=MountType.BIND.value,
|
||||
source=self.sys_config.path_extern_dns.as_posix(),
|
||||
target="/config",
|
||||
read_only=False,
|
||||
|
@ -99,7 +99,7 @@ class DockerHomeAssistant(DockerInterface):
|
||||
MOUNT_UDEV,
|
||||
# HA config folder
|
||||
Mount(
|
||||
type=MountType.BIND,
|
||||
type=MountType.BIND.value,
|
||||
source=self.sys_config.path_extern_homeassistant.as_posix(),
|
||||
target=PATH_PUBLIC_CONFIG.as_posix(),
|
||||
read_only=False,
|
||||
@ -112,20 +112,20 @@ class DockerHomeAssistant(DockerInterface):
|
||||
[
|
||||
# All other folders
|
||||
Mount(
|
||||
type=MountType.BIND,
|
||||
type=MountType.BIND.value,
|
||||
source=self.sys_config.path_extern_ssl.as_posix(),
|
||||
target=PATH_SSL.as_posix(),
|
||||
read_only=True,
|
||||
),
|
||||
Mount(
|
||||
type=MountType.BIND,
|
||||
type=MountType.BIND.value,
|
||||
source=self.sys_config.path_extern_share.as_posix(),
|
||||
target=PATH_SHARE.as_posix(),
|
||||
read_only=False,
|
||||
propagation=PropagationMode.RSLAVE.value,
|
||||
),
|
||||
Mount(
|
||||
type=MountType.BIND,
|
||||
type=MountType.BIND.value,
|
||||
source=self.sys_config.path_extern_media.as_posix(),
|
||||
target=PATH_MEDIA.as_posix(),
|
||||
read_only=False,
|
||||
@ -133,19 +133,19 @@ class DockerHomeAssistant(DockerInterface):
|
||||
),
|
||||
# Configuration audio
|
||||
Mount(
|
||||
type=MountType.BIND,
|
||||
type=MountType.BIND.value,
|
||||
source=self.sys_homeassistant.path_extern_pulse.as_posix(),
|
||||
target="/etc/pulse/client.conf",
|
||||
read_only=True,
|
||||
),
|
||||
Mount(
|
||||
type=MountType.BIND,
|
||||
type=MountType.BIND.value,
|
||||
source=self.sys_plugins.audio.path_extern_pulse.as_posix(),
|
||||
target="/run/audio",
|
||||
read_only=True,
|
||||
),
|
||||
Mount(
|
||||
type=MountType.BIND,
|
||||
type=MountType.BIND.value,
|
||||
source=self.sys_plugins.audio.path_extern_asound.as_posix(),
|
||||
target="/etc/asound.conf",
|
||||
read_only=True,
|
||||
@ -213,24 +213,21 @@ class DockerHomeAssistant(DockerInterface):
|
||||
privileged=True,
|
||||
init=True,
|
||||
entrypoint=[],
|
||||
detach=True,
|
||||
stdout=True,
|
||||
stderr=True,
|
||||
mounts=[
|
||||
Mount(
|
||||
type=MountType.BIND,
|
||||
type=MountType.BIND.value,
|
||||
source=self.sys_config.path_extern_homeassistant.as_posix(),
|
||||
target="/config",
|
||||
read_only=False,
|
||||
),
|
||||
Mount(
|
||||
type=MountType.BIND,
|
||||
type=MountType.BIND.value,
|
||||
source=self.sys_config.path_extern_ssl.as_posix(),
|
||||
target="/ssl",
|
||||
read_only=True,
|
||||
),
|
||||
Mount(
|
||||
type=MountType.BIND,
|
||||
type=MountType.BIND.value,
|
||||
source=self.sys_config.path_extern_share.as_posix(),
|
||||
target="/share",
|
||||
read_only=False,
|
||||
@ -248,14 +245,12 @@ class DockerHomeAssistant(DockerInterface):
|
||||
self.sys_homeassistant.version,
|
||||
)
|
||||
|
||||
async def _validate_trust(
|
||||
self, image_id: str, image: str, version: AwesomeVersion
|
||||
) -> None:
|
||||
async def _validate_trust(self, image_id: str) -> None:
|
||||
"""Validate trust of content."""
|
||||
try:
|
||||
if version != LANDINGPAGE and version < _VERIFY_TRUST:
|
||||
if self.version in {None, LANDINGPAGE} or self.version < _VERIFY_TRUST:
|
||||
return
|
||||
except AwesomeVersionCompareException:
|
||||
return
|
||||
|
||||
await super()._validate_trust(image_id, image, version)
|
||||
await super()._validate_trust(image_id)
|
||||
|
@ -2,13 +2,14 @@
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from abc import ABC, abstractmethod
|
||||
from collections import defaultdict
|
||||
from collections.abc import Awaitable
|
||||
from contextlib import suppress
|
||||
import logging
|
||||
import re
|
||||
from time import time
|
||||
from typing import Any
|
||||
from typing import Any, cast
|
||||
from uuid import uuid4
|
||||
|
||||
from awesomeversion import AwesomeVersion
|
||||
@ -79,7 +80,7 @@ def _container_state_from_model(docker_container: Container) -> ContainerState:
|
||||
return ContainerState.STOPPED
|
||||
|
||||
|
||||
class DockerInterface(JobGroup):
|
||||
class DockerInterface(JobGroup, ABC):
|
||||
"""Docker Supervisor interface."""
|
||||
|
||||
def __init__(self, coresys: CoreSys):
|
||||
@ -100,9 +101,9 @@ class DockerInterface(JobGroup):
|
||||
return 10
|
||||
|
||||
@property
|
||||
def name(self) -> str | None:
|
||||
@abstractmethod
|
||||
def name(self) -> str:
|
||||
"""Return name of Docker container."""
|
||||
return None
|
||||
|
||||
@property
|
||||
def meta_config(self) -> dict[str, Any]:
|
||||
@ -153,7 +154,7 @@ class DockerInterface(JobGroup):
|
||||
@property
|
||||
def in_progress(self) -> bool:
|
||||
"""Return True if a task is in progress."""
|
||||
return self.active_job
|
||||
return self.active_job is not None
|
||||
|
||||
@property
|
||||
def restart_policy(self) -> RestartPolicy | None:
|
||||
@ -230,7 +231,10 @@ class DockerInterface(JobGroup):
|
||||
) -> None:
|
||||
"""Pull docker image."""
|
||||
image = image or self.image
|
||||
arch = arch or self.sys_arch.supervisor
|
||||
if not image:
|
||||
raise ValueError("Cannot pull without an image!")
|
||||
|
||||
image_arch = str(arch) if arch else self.sys_arch.supervisor
|
||||
|
||||
_LOGGER.info("Downloading docker image %s with tag %s.", image, version)
|
||||
try:
|
||||
@ -242,12 +246,12 @@ class DockerInterface(JobGroup):
|
||||
docker_image = await self.sys_run_in_executor(
|
||||
self.sys_docker.images.pull,
|
||||
f"{image}:{version!s}",
|
||||
platform=MAP_ARCH[arch],
|
||||
platform=MAP_ARCH[image_arch],
|
||||
)
|
||||
|
||||
# Validate content
|
||||
try:
|
||||
await self._validate_trust(docker_image.id, image, version)
|
||||
await self._validate_trust(cast(str, docker_image.id))
|
||||
except CodeNotaryError:
|
||||
with suppress(docker.errors.DockerException):
|
||||
await self.sys_run_in_executor(
|
||||
@ -355,7 +359,7 @@ class DockerInterface(JobGroup):
|
||||
self.sys_bus.fire_event(
|
||||
BusEvent.DOCKER_CONTAINER_STATE_CHANGE,
|
||||
DockerContainerStateEvent(
|
||||
self.name, state, docker_container.id, int(time())
|
||||
self.name, state, cast(str, docker_container.id), int(time())
|
||||
),
|
||||
)
|
||||
|
||||
@ -451,10 +455,12 @@ class DockerInterface(JobGroup):
|
||||
self,
|
||||
version: AwesomeVersion,
|
||||
expected_image: str,
|
||||
expected_arch: CpuArch | None = None,
|
||||
expected_cpu_arch: CpuArch | None = None,
|
||||
) -> None:
|
||||
"""Check we have expected image with correct arch."""
|
||||
expected_arch = expected_arch or self.sys_arch.supervisor
|
||||
expected_image_cpu_arch = (
|
||||
str(expected_cpu_arch) if expected_cpu_arch else self.sys_arch.supervisor
|
||||
)
|
||||
image_name = f"{expected_image}:{version!s}"
|
||||
if self.image == expected_image:
|
||||
try:
|
||||
@ -472,13 +478,22 @@ class DockerInterface(JobGroup):
|
||||
image_arch = f"{image_arch}/{image.attrs['Variant']}"
|
||||
|
||||
# If we have an image and its the right arch, all set
|
||||
if MAP_ARCH[expected_arch] == image_arch:
|
||||
# It seems that newer Docker version return a variant for arm64 images.
|
||||
# Make sure we match linux/arm64 and linux/arm64/v8.
|
||||
expected_image_arch = MAP_ARCH[expected_image_cpu_arch]
|
||||
if image_arch.startswith(expected_image_arch):
|
||||
return
|
||||
_LOGGER.info(
|
||||
"Image %s has arch %s, expected %s. Reinstalling.",
|
||||
image_name,
|
||||
image_arch,
|
||||
expected_image_arch,
|
||||
)
|
||||
|
||||
# We're missing the image we need. Stop and clean up what we have then pull the right one
|
||||
with suppress(DockerError):
|
||||
await self.remove()
|
||||
await self.install(version, expected_image, arch=expected_arch)
|
||||
await self.install(version, expected_image, arch=expected_image_cpu_arch)
|
||||
|
||||
@Job(
|
||||
name="docker_interface_update",
|
||||
@ -613,9 +628,7 @@ class DockerInterface(JobGroup):
|
||||
self.sys_docker.container_run_inside, self.name, command
|
||||
)
|
||||
|
||||
async def _validate_trust(
|
||||
self, image_id: str, image: str, version: AwesomeVersion
|
||||
) -> None:
|
||||
async def _validate_trust(self, image_id: str) -> None:
|
||||
"""Validate trust of content."""
|
||||
checksum = image_id.partition(":")[2]
|
||||
return await self.sys_security.verify_own_content(checksum)
|
||||
@ -634,4 +647,4 @@ class DockerInterface(JobGroup):
|
||||
except (docker.errors.DockerException, requests.RequestException):
|
||||
return
|
||||
|
||||
await self._validate_trust(image.id, self.image, self.version)
|
||||
await self._validate_trust(cast(str, image.id))
|
||||
|
@ -7,7 +7,7 @@ from ipaddress import IPv4Address
|
||||
import logging
|
||||
import os
|
||||
from pathlib import Path
|
||||
from typing import Any, Final, Self
|
||||
from typing import Any, Final, Self, cast
|
||||
|
||||
import attr
|
||||
from awesomeversion import AwesomeVersion, AwesomeVersionCompareException
|
||||
@ -22,6 +22,7 @@ from docker.types.daemon import CancellableStream
|
||||
import requests
|
||||
|
||||
from ..const import (
|
||||
ATTR_ENABLE_IPV6,
|
||||
ATTR_REGISTRIES,
|
||||
DNS_SUFFIX,
|
||||
DOCKER_NETWORK,
|
||||
@ -83,7 +84,7 @@ class DockerInfo:
|
||||
"""Return true, if CONFIG_RT_GROUP_SCHED is loaded."""
|
||||
if not Path("/sys/fs/cgroup/cpu/cpu.rt_runtime_us").exists():
|
||||
return False
|
||||
return bool(os.environ.get(ENV_SUPERVISOR_CPU_RT, 0))
|
||||
return bool(os.environ.get(ENV_SUPERVISOR_CPU_RT) == "1")
|
||||
|
||||
|
||||
class DockerConfig(FileConfiguration):
|
||||
@ -93,6 +94,16 @@ class DockerConfig(FileConfiguration):
|
||||
"""Initialize the JSON configuration."""
|
||||
super().__init__(FILE_HASSIO_DOCKER, SCHEMA_DOCKER_CONFIG)
|
||||
|
||||
@property
|
||||
def enable_ipv6(self) -> bool:
|
||||
"""Return IPv6 configuration for docker network."""
|
||||
return self._data.get(ATTR_ENABLE_IPV6, False)
|
||||
|
||||
@enable_ipv6.setter
|
||||
def enable_ipv6(self, value: bool) -> None:
|
||||
"""Set IPv6 configuration for docker network."""
|
||||
self._data[ATTR_ENABLE_IPV6] = value
|
||||
|
||||
@property
|
||||
def registries(self) -> dict[str, Any]:
|
||||
"""Return credentials for docker registries."""
|
||||
@ -124,9 +135,11 @@ class DockerAPI:
|
||||
timeout=900,
|
||||
),
|
||||
)
|
||||
self._network = DockerNetwork(self._docker)
|
||||
self._info = DockerInfo.new(self.docker.info())
|
||||
await self.config.read_data()
|
||||
self._network = await DockerNetwork(self.docker).post_init(
|
||||
self.config.enable_ipv6
|
||||
)
|
||||
return self
|
||||
|
||||
@property
|
||||
@ -202,7 +215,7 @@ class DockerAPI:
|
||||
if "labels" not in kwargs:
|
||||
kwargs["labels"] = {}
|
||||
elif isinstance(kwargs["labels"], list):
|
||||
kwargs["labels"] = {label: "" for label in kwargs["labels"]}
|
||||
kwargs["labels"] = dict.fromkeys(kwargs["labels"], "")
|
||||
|
||||
kwargs["labels"][LABEL_MANAGED] = ""
|
||||
|
||||
@ -255,7 +268,7 @@ class DockerAPI:
|
||||
|
||||
# Check if container is register on host
|
||||
# https://github.com/moby/moby/issues/23302
|
||||
if name in (
|
||||
if name and name in (
|
||||
val.get("Name")
|
||||
for val in host_network.attrs.get("Containers", {}).values()
|
||||
):
|
||||
@ -281,8 +294,8 @@ class DockerAPI:
|
||||
def run_command(
|
||||
self,
|
||||
image: str,
|
||||
tag: str = "latest",
|
||||
command: str | None = None,
|
||||
version: str = "latest",
|
||||
command: str | list[str] | None = None,
|
||||
**kwargs: Any,
|
||||
) -> CommandReturn:
|
||||
"""Create a temporary container and run command.
|
||||
@ -292,12 +305,15 @@ class DockerAPI:
|
||||
stdout = kwargs.get("stdout", True)
|
||||
stderr = kwargs.get("stderr", True)
|
||||
|
||||
_LOGGER.info("Runing command '%s' on %s", command, image)
|
||||
image_with_tag = f"{image}:{version}"
|
||||
|
||||
_LOGGER.info("Runing command '%s' on %s", command, image_with_tag)
|
||||
container = None
|
||||
try:
|
||||
container = self.docker.containers.run(
|
||||
f"{image}:{tag}",
|
||||
image_with_tag,
|
||||
command=command,
|
||||
detach=True,
|
||||
network=self.network.name,
|
||||
use_config_proxy=False,
|
||||
**kwargs,
|
||||
@ -314,9 +330,9 @@ class DockerAPI:
|
||||
# cleanup container
|
||||
if container:
|
||||
with suppress(docker_errors.DockerException, requests.RequestException):
|
||||
container.remove(force=True)
|
||||
container.remove(force=True, v=True)
|
||||
|
||||
return CommandReturn(result.get("StatusCode"), output)
|
||||
return CommandReturn(result["StatusCode"], output)
|
||||
|
||||
def repair(self) -> None:
|
||||
"""Repair local docker overlayfs2 issues."""
|
||||
@ -405,7 +421,8 @@ class DockerAPI:
|
||||
|
||||
# Check the image is correct and state is good
|
||||
return (
|
||||
docker_container.image.id == docker_image.id
|
||||
docker_container.image is not None
|
||||
and docker_container.image.id == docker_image.id
|
||||
and docker_container.status in ("exited", "running", "created")
|
||||
)
|
||||
|
||||
@ -428,7 +445,7 @@ class DockerAPI:
|
||||
if remove_container:
|
||||
with suppress(DockerException, requests.RequestException):
|
||||
_LOGGER.info("Cleaning %s application", name)
|
||||
docker_container.remove(force=True)
|
||||
docker_container.remove(force=True, v=True)
|
||||
|
||||
def start_container(self, name: str) -> None:
|
||||
"""Start Docker container."""
|
||||
@ -540,7 +557,7 @@ class DockerAPI:
|
||||
"""Import a tar file as image."""
|
||||
try:
|
||||
with tar_file.open("rb") as read_tar:
|
||||
docker_image_list: list[Image] = self.images.load(read_tar)
|
||||
docker_image_list: list[Image] = self.images.load(read_tar) # type: ignore
|
||||
|
||||
if len(docker_image_list) != 1:
|
||||
_LOGGER.warning(
|
||||
@ -557,7 +574,7 @@ class DockerAPI:
|
||||
def export_image(self, image: str, version: AwesomeVersion, tar_file: Path) -> None:
|
||||
"""Export current images into a tar file."""
|
||||
try:
|
||||
image = self.api.get_image(f"{image}:{version}")
|
||||
docker_image = self.api.get_image(f"{image}:{version}")
|
||||
except (DockerException, requests.RequestException) as err:
|
||||
raise DockerError(
|
||||
f"Can't fetch image {image}: {err}", _LOGGER.error
|
||||
@ -566,7 +583,7 @@ class DockerAPI:
|
||||
_LOGGER.info("Export image %s to %s", image, tar_file)
|
||||
try:
|
||||
with tar_file.open("wb") as write_tar:
|
||||
for chunk in image:
|
||||
for chunk in docker_image:
|
||||
write_tar.write(chunk)
|
||||
except (OSError, requests.RequestException) as err:
|
||||
raise DockerError(
|
||||
@ -586,7 +603,7 @@ class DockerAPI:
|
||||
"""Clean up old versions of an image."""
|
||||
image = f"{current_image}:{current_version!s}"
|
||||
try:
|
||||
keep: set[str] = {self.images.get(image).id}
|
||||
keep = {cast(str, self.images.get(image).id)}
|
||||
except ImageNotFound:
|
||||
raise DockerNotFound(
|
||||
f"{current_image} not found for cleanup", _LOGGER.warning
|
||||
@ -602,7 +619,7 @@ class DockerAPI:
|
||||
for image in keep_images:
|
||||
# If its not found, no need to preserve it from getting removed
|
||||
with suppress(ImageNotFound):
|
||||
keep.add(self.images.get(image).id)
|
||||
keep.add(cast(str, self.images.get(image).id))
|
||||
except (DockerException, requests.RequestException) as err:
|
||||
raise DockerError(
|
||||
f"Failed to get one or more images from {keep} during cleanup",
|
||||
@ -614,16 +631,18 @@ class DockerAPI:
|
||||
old_images | {current_image} if old_images else {current_image}
|
||||
)
|
||||
try:
|
||||
images_list = self.images.list(name=image_names)
|
||||
# This API accepts a list of image names. Tested and confirmed working on docker==7.1.0
|
||||
# Its typing does say only `str` though. Bit concerning, could an update break this?
|
||||
images_list = self.images.list(name=image_names) # type: ignore
|
||||
except (DockerException, requests.RequestException) as err:
|
||||
raise DockerError(
|
||||
f"Corrupt docker overlayfs found: {err}", _LOGGER.warning
|
||||
) from err
|
||||
|
||||
for image in images_list:
|
||||
if image.id in keep:
|
||||
for docker_image in images_list:
|
||||
if docker_image.id in keep:
|
||||
continue
|
||||
|
||||
with suppress(DockerException, requests.RequestException):
|
||||
_LOGGER.info("Cleanup images: %s", image.tags)
|
||||
self.images.remove(image.id, force=True)
|
||||
_LOGGER.info("Cleanup images: %s", docker_image.tags)
|
||||
self.images.remove(docker_image.id, force=True)
|
||||
|
@ -37,7 +37,7 @@ class DockerMonitor(CoreSysAttributes, Thread):
|
||||
|
||||
def watch_container(self, container: Container):
|
||||
"""If container is missing the managed label, add name to list."""
|
||||
if LABEL_MANAGED not in container.labels:
|
||||
if LABEL_MANAGED not in container.labels and container.name:
|
||||
self._unlabeled_managed_containers += [container.name]
|
||||
|
||||
async def load(self):
|
||||
@ -54,8 +54,11 @@ class DockerMonitor(CoreSysAttributes, Thread):
|
||||
|
||||
_LOGGER.info("Stopped docker events monitor")
|
||||
|
||||
def run(self):
|
||||
def run(self) -> None:
|
||||
"""Monitor and process docker events."""
|
||||
if not self._events:
|
||||
raise RuntimeError("Monitor has not been loaded!")
|
||||
|
||||
for event in self._events:
|
||||
attributes: dict[str, str] = event.get("Actor", {}).get("Attributes", {})
|
||||
|
||||
|
@ -1,17 +1,52 @@
|
||||
"""Internal network manager for Supervisor."""
|
||||
|
||||
import asyncio
|
||||
from contextlib import suppress
|
||||
from ipaddress import IPv4Address
|
||||
import logging
|
||||
from typing import Self
|
||||
|
||||
import docker
|
||||
import requests
|
||||
|
||||
from ..const import DOCKER_NETWORK, DOCKER_NETWORK_MASK, DOCKER_NETWORK_RANGE
|
||||
from ..const import (
|
||||
ATTR_AUDIO,
|
||||
ATTR_CLI,
|
||||
ATTR_DNS,
|
||||
ATTR_ENABLE_IPV6,
|
||||
ATTR_OBSERVER,
|
||||
ATTR_SUPERVISOR,
|
||||
DOCKER_IPV4_NETWORK_MASK,
|
||||
DOCKER_IPV4_NETWORK_RANGE,
|
||||
DOCKER_IPV6_NETWORK_MASK,
|
||||
DOCKER_NETWORK,
|
||||
DOCKER_NETWORK_DRIVER,
|
||||
DOCKER_PREFIX,
|
||||
OBSERVER_DOCKER_NAME,
|
||||
SUPERVISOR_DOCKER_NAME,
|
||||
)
|
||||
from ..exceptions import DockerError
|
||||
|
||||
_LOGGER: logging.Logger = logging.getLogger(__name__)
|
||||
|
||||
DOCKER_ENABLEIPV6 = "EnableIPv6"
|
||||
DOCKER_NETWORK_PARAMS = {
|
||||
"name": DOCKER_NETWORK,
|
||||
"driver": DOCKER_NETWORK_DRIVER,
|
||||
"ipam": docker.types.IPAMConfig(
|
||||
pool_configs=[
|
||||
docker.types.IPAMPool(subnet=str(DOCKER_IPV6_NETWORK_MASK)),
|
||||
docker.types.IPAMPool(
|
||||
subnet=str(DOCKER_IPV4_NETWORK_MASK),
|
||||
gateway=str(DOCKER_IPV4_NETWORK_MASK[1]),
|
||||
iprange=str(DOCKER_IPV4_NETWORK_RANGE),
|
||||
),
|
||||
]
|
||||
),
|
||||
ATTR_ENABLE_IPV6: True,
|
||||
"options": {"com.docker.network.bridge.name": DOCKER_NETWORK},
|
||||
}
|
||||
|
||||
|
||||
class DockerNetwork:
|
||||
"""Internal Supervisor Network.
|
||||
@ -22,7 +57,14 @@ class DockerNetwork:
|
||||
def __init__(self, docker_client: docker.DockerClient):
|
||||
"""Initialize internal Supervisor network."""
|
||||
self.docker: docker.DockerClient = docker_client
|
||||
self._network: docker.models.networks.Network = self._get_network()
|
||||
self._network: docker.models.networks.Network
|
||||
|
||||
async def post_init(self, enable_ipv6: bool = False) -> Self:
|
||||
"""Post init actions that must be done in event loop."""
|
||||
self._network = await asyncio.get_running_loop().run_in_executor(
|
||||
None, self._get_network, enable_ipv6
|
||||
)
|
||||
return self
|
||||
|
||||
@property
|
||||
def name(self) -> str:
|
||||
@ -42,55 +84,101 @@ class DockerNetwork:
|
||||
@property
|
||||
def gateway(self) -> IPv4Address:
|
||||
"""Return gateway of the network."""
|
||||
return DOCKER_NETWORK_MASK[1]
|
||||
return DOCKER_IPV4_NETWORK_MASK[1]
|
||||
|
||||
@property
|
||||
def supervisor(self) -> IPv4Address:
|
||||
"""Return supervisor of the network."""
|
||||
return DOCKER_NETWORK_MASK[2]
|
||||
return DOCKER_IPV4_NETWORK_MASK[2]
|
||||
|
||||
@property
|
||||
def dns(self) -> IPv4Address:
|
||||
"""Return dns of the network."""
|
||||
return DOCKER_NETWORK_MASK[3]
|
||||
return DOCKER_IPV4_NETWORK_MASK[3]
|
||||
|
||||
@property
|
||||
def audio(self) -> IPv4Address:
|
||||
"""Return audio of the network."""
|
||||
return DOCKER_NETWORK_MASK[4]
|
||||
return DOCKER_IPV4_NETWORK_MASK[4]
|
||||
|
||||
@property
|
||||
def cli(self) -> IPv4Address:
|
||||
"""Return cli of the network."""
|
||||
return DOCKER_NETWORK_MASK[5]
|
||||
return DOCKER_IPV4_NETWORK_MASK[5]
|
||||
|
||||
@property
|
||||
def observer(self) -> IPv4Address:
|
||||
"""Return observer of the network."""
|
||||
return DOCKER_NETWORK_MASK[6]
|
||||
return DOCKER_IPV4_NETWORK_MASK[6]
|
||||
|
||||
def _get_network(self) -> docker.models.networks.Network:
|
||||
def _get_network(self, enable_ipv6: bool = False) -> docker.models.networks.Network:
|
||||
"""Get supervisor network."""
|
||||
try:
|
||||
return self.docker.networks.get(DOCKER_NETWORK)
|
||||
if network := self.docker.networks.get(DOCKER_NETWORK):
|
||||
if network.attrs.get(DOCKER_ENABLEIPV6) == enable_ipv6:
|
||||
return network
|
||||
_LOGGER.info(
|
||||
"Migrating Supervisor network to %s",
|
||||
"IPv4/IPv6 Dual-Stack" if enable_ipv6 else "IPv4-Only",
|
||||
)
|
||||
if (containers := network.containers) and (
|
||||
containers_all := all(
|
||||
container.name in (OBSERVER_DOCKER_NAME, SUPERVISOR_DOCKER_NAME)
|
||||
for container in containers
|
||||
)
|
||||
):
|
||||
for container in containers:
|
||||
with suppress(
|
||||
docker.errors.APIError,
|
||||
docker.errors.DockerException,
|
||||
requests.RequestException,
|
||||
):
|
||||
network.disconnect(container, force=True)
|
||||
if not containers or containers_all:
|
||||
try:
|
||||
network.remove()
|
||||
except docker.errors.APIError:
|
||||
_LOGGER.warning("Failed to remove existing Supervisor network")
|
||||
return network
|
||||
else:
|
||||
_LOGGER.warning(
|
||||
"System appears to be running, "
|
||||
"not applying Supervisor network change. "
|
||||
"Reboot your system to apply the change."
|
||||
)
|
||||
return network
|
||||
except docker.errors.NotFound:
|
||||
_LOGGER.info("Can't find Supervisor network, creating a new network")
|
||||
|
||||
ipam_pool = docker.types.IPAMPool(
|
||||
subnet=str(DOCKER_NETWORK_MASK),
|
||||
gateway=str(self.gateway),
|
||||
iprange=str(DOCKER_NETWORK_RANGE),
|
||||
)
|
||||
network_params = DOCKER_NETWORK_PARAMS.copy()
|
||||
network_params[ATTR_ENABLE_IPV6] = enable_ipv6
|
||||
|
||||
ipam_config = docker.types.IPAMConfig(pool_configs=[ipam_pool])
|
||||
try:
|
||||
self._network = self.docker.networks.create(**network_params) # type: ignore
|
||||
except docker.errors.APIError as err:
|
||||
raise DockerError(
|
||||
f"Can't create Supervisor network: {err}", _LOGGER.error
|
||||
) from err
|
||||
|
||||
return self.docker.networks.create(
|
||||
DOCKER_NETWORK,
|
||||
driver="bridge",
|
||||
ipam=ipam_config,
|
||||
enable_ipv6=False,
|
||||
options={"com.docker.network.bridge.name": DOCKER_NETWORK},
|
||||
)
|
||||
with suppress(DockerError):
|
||||
self.attach_container_by_name(
|
||||
SUPERVISOR_DOCKER_NAME, [ATTR_SUPERVISOR], self.supervisor
|
||||
)
|
||||
|
||||
with suppress(DockerError):
|
||||
self.attach_container_by_name(
|
||||
OBSERVER_DOCKER_NAME, [ATTR_OBSERVER], self.observer
|
||||
)
|
||||
|
||||
for name, ip in (
|
||||
(ATTR_CLI, self.cli),
|
||||
(ATTR_DNS, self.dns),
|
||||
(ATTR_AUDIO, self.audio),
|
||||
):
|
||||
with suppress(DockerError):
|
||||
self.attach_container_by_name(f"{DOCKER_PREFIX}_{name}", [name], ip)
|
||||
|
||||
return self._network
|
||||
|
||||
def attach_container(
|
||||
self,
|
||||
@ -102,26 +190,55 @@ class DockerNetwork:
|
||||
|
||||
Need run inside executor.
|
||||
"""
|
||||
ipv4_address = str(ipv4) if ipv4 else None
|
||||
|
||||
# Reload Network information
|
||||
with suppress(docker.errors.DockerException, requests.RequestException):
|
||||
self.network.reload()
|
||||
|
||||
# Check stale Network
|
||||
if container.name in (
|
||||
if container.name and container.name in (
|
||||
val.get("Name") for val in self.network.attrs.get("Containers", {}).values()
|
||||
):
|
||||
self.stale_cleanup(container.name)
|
||||
|
||||
# Attach Network
|
||||
try:
|
||||
self.network.connect(container, aliases=alias, ipv4_address=ipv4_address)
|
||||
except docker.errors.APIError as err:
|
||||
self.network.connect(
|
||||
container, aliases=alias, ipv4_address=str(ipv4) if ipv4 else None
|
||||
)
|
||||
except (
|
||||
docker.errors.NotFound,
|
||||
docker.errors.APIError,
|
||||
docker.errors.DockerException,
|
||||
requests.RequestException,
|
||||
) as err:
|
||||
raise DockerError(
|
||||
f"Can't link container to hassio-net: {err}", _LOGGER.error
|
||||
f"Can't connect {container.name} to Supervisor network: {err}",
|
||||
_LOGGER.error,
|
||||
) from err
|
||||
|
||||
def attach_container_by_name(
|
||||
self,
|
||||
name: str,
|
||||
alias: list[str] | None = None,
|
||||
ipv4: IPv4Address | None = None,
|
||||
) -> None:
|
||||
"""Attach container to Supervisor network.
|
||||
|
||||
Need run inside executor.
|
||||
"""
|
||||
try:
|
||||
container = self.docker.containers.get(name)
|
||||
except (
|
||||
docker.errors.NotFound,
|
||||
docker.errors.APIError,
|
||||
docker.errors.DockerException,
|
||||
requests.RequestException,
|
||||
) as err:
|
||||
raise DockerError(f"Can't find {name}: {err}", _LOGGER.error) from err
|
||||
|
||||
if container.id not in self.containers:
|
||||
self.attach_container(container, alias, ipv4)
|
||||
|
||||
def detach_default_bridge(
|
||||
self, container: docker.models.containers.Container
|
||||
) -> None:
|
||||
@ -130,25 +247,33 @@ class DockerNetwork:
|
||||
Need run inside executor.
|
||||
"""
|
||||
try:
|
||||
default_network = self.docker.networks.get("bridge")
|
||||
default_network = self.docker.networks.get(DOCKER_NETWORK_DRIVER)
|
||||
default_network.disconnect(container)
|
||||
|
||||
except docker.errors.NotFound:
|
||||
return
|
||||
|
||||
except docker.errors.APIError as err:
|
||||
pass
|
||||
except (
|
||||
docker.errors.APIError,
|
||||
docker.errors.DockerException,
|
||||
requests.RequestException,
|
||||
) as err:
|
||||
raise DockerError(
|
||||
f"Can't disconnect container from default: {err}", _LOGGER.warning
|
||||
f"Can't disconnect {container.name} from default network: {err}",
|
||||
_LOGGER.warning,
|
||||
) from err
|
||||
|
||||
def stale_cleanup(self, container_name: str):
|
||||
"""Remove force a container from Network.
|
||||
def stale_cleanup(self, name: str) -> None:
|
||||
"""Force remove a container from Network.
|
||||
|
||||
Fix: https://github.com/moby/moby/issues/23302
|
||||
"""
|
||||
try:
|
||||
self.network.disconnect(container_name, force=True)
|
||||
except docker.errors.NotFound:
|
||||
pass
|
||||
except (docker.errors.DockerException, requests.RequestException) as err:
|
||||
raise DockerError() from err
|
||||
self.network.disconnect(name, force=True)
|
||||
except (
|
||||
docker.errors.APIError,
|
||||
docker.errors.DockerException,
|
||||
requests.RequestException,
|
||||
) as err:
|
||||
raise DockerError(
|
||||
f"Can't disconnect {name} from Supervisor network: {err}",
|
||||
_LOGGER.warning,
|
||||
) from err
|
||||
|
@ -2,7 +2,7 @@
|
||||
|
||||
import logging
|
||||
|
||||
from ..const import DOCKER_NETWORK_MASK
|
||||
from ..const import DOCKER_IPV4_NETWORK_MASK, OBSERVER_DOCKER_NAME
|
||||
from ..coresys import CoreSysAttributes
|
||||
from ..exceptions import DockerJobError
|
||||
from ..jobs.const import JobExecutionLimit
|
||||
@ -12,7 +12,6 @@ from .interface import DockerInterface
|
||||
|
||||
_LOGGER: logging.Logger = logging.getLogger(__name__)
|
||||
|
||||
OBSERVER_DOCKER_NAME: str = "hassio_observer"
|
||||
ENV_NETWORK_MASK: str = "NETWORK_MASK"
|
||||
|
||||
|
||||
@ -49,7 +48,7 @@ class DockerObserver(DockerInterface, CoreSysAttributes):
|
||||
environment={
|
||||
ENV_TIME: self.sys_timezone,
|
||||
ENV_TOKEN: self.sys_plugins.observer.supervisor_token,
|
||||
ENV_NETWORK_MASK: DOCKER_NETWORK_MASK,
|
||||
ENV_NETWORK_MASK: DOCKER_IPV4_NETWORK_MASK,
|
||||
},
|
||||
mounts=[MOUNT_DOCKER],
|
||||
ports={"80/tcp": 4357},
|
||||
|
@ -39,7 +39,7 @@ class DockerSupervisor(DockerInterface):
|
||||
@property
|
||||
def host_mounts_available(self) -> bool:
|
||||
"""Return True if container can see mounts on host within its data directory."""
|
||||
return self._meta and any(
|
||||
return self._meta is not None and any(
|
||||
mount.get("Propagation") == PropagationMode.SLAVE
|
||||
for mount in self.meta_mounts
|
||||
if mount.get("Destination") == "/data"
|
||||
@ -89,7 +89,18 @@ class DockerSupervisor(DockerInterface):
|
||||
"""
|
||||
try:
|
||||
docker_container = self.sys_docker.containers.get(self.name)
|
||||
except (docker.errors.DockerException, requests.RequestException) as err:
|
||||
raise DockerError(
|
||||
f"Could not get Supervisor container for retag: {err}", _LOGGER.error
|
||||
) from err
|
||||
|
||||
if not self.image or not docker_container.image:
|
||||
raise DockerError(
|
||||
"Could not locate image from container metadata for retag",
|
||||
_LOGGER.error,
|
||||
)
|
||||
|
||||
try:
|
||||
docker_container.image.tag(self.image, tag=str(self.version))
|
||||
docker_container.image.tag(self.image, tag="latest")
|
||||
except (docker.errors.DockerException, requests.RequestException) as err:
|
||||
@ -110,7 +121,18 @@ class DockerSupervisor(DockerInterface):
|
||||
try:
|
||||
docker_container = self.sys_docker.containers.get(self.name)
|
||||
docker_image = self.sys_docker.images.get(f"{image}:{version!s}")
|
||||
except (docker.errors.DockerException, requests.RequestException) as err:
|
||||
raise DockerError(
|
||||
f"Can't get image or container to fix start tag: {err}", _LOGGER.error
|
||||
) from err
|
||||
|
||||
if not docker_container.image:
|
||||
raise DockerError(
|
||||
"Cannot locate image from container metadata to fix start tag",
|
||||
_LOGGER.error,
|
||||
)
|
||||
|
||||
try:
|
||||
# Find start tag
|
||||
for tag in docker_container.image.tags:
|
||||
start_image = tag.partition(":")[0]
|
||||
|
@ -84,10 +84,6 @@ class HomeAssistantWSError(HomeAssistantAPIError):
|
||||
"""Home Assistant websocket error."""
|
||||
|
||||
|
||||
class HomeAssistantWSNotSupported(HomeAssistantWSError):
|
||||
"""Raise when WebSockets are not supported."""
|
||||
|
||||
|
||||
class HomeAssistantWSConnectionError(HomeAssistantWSError):
|
||||
"""Raise when the WebSocket connection has an error."""
|
||||
|
||||
|
@ -72,7 +72,7 @@ class HwDisk(CoreSysAttributes):
|
||||
_, _, free = shutil.disk_usage(path)
|
||||
return round(free / (1024.0**3), 1)
|
||||
|
||||
def _get_mountinfo(self, path: str) -> str:
|
||||
def _get_mountinfo(self, path: str) -> list[str] | None:
|
||||
mountinfo = _MOUNTINFO.read_text(encoding="utf-8")
|
||||
for line in mountinfo.splitlines():
|
||||
mountinfoarr = line.split()
|
||||
@ -80,7 +80,7 @@ class HwDisk(CoreSysAttributes):
|
||||
return mountinfoarr
|
||||
return None
|
||||
|
||||
def _get_mount_source(self, path: str) -> str:
|
||||
def _get_mount_source(self, path: str) -> str | None:
|
||||
mountinfoarr = self._get_mountinfo(path)
|
||||
|
||||
if mountinfoarr is None:
|
||||
@ -92,7 +92,7 @@ class HwDisk(CoreSysAttributes):
|
||||
optionsep += 1
|
||||
return mountinfoarr[optionsep + 2]
|
||||
|
||||
def _try_get_emmc_life_time(self, device_name: str) -> float:
|
||||
def _try_get_emmc_life_time(self, device_name: str) -> float | None:
|
||||
# Get eMMC life_time
|
||||
life_time_path = Path(_BLOCK_DEVICE_EMMC_LIFE_TIME.format(device_name))
|
||||
|
||||
@ -121,13 +121,13 @@ class HwDisk(CoreSysAttributes):
|
||||
# Return the pessimistic estimate (0x02 -> 10%-20%, return 20%)
|
||||
return life_time_value * 10.0
|
||||
|
||||
def get_disk_life_time(self, path: str | Path) -> float:
|
||||
def get_disk_life_time(self, path: str | Path) -> float | None:
|
||||
"""Return life time estimate of the underlying SSD drive.
|
||||
|
||||
Must be run in executor.
|
||||
"""
|
||||
mount_source = self._get_mount_source(str(path))
|
||||
if mount_source == "overlay":
|
||||
if not mount_source or mount_source == "overlay":
|
||||
return None
|
||||
|
||||
mount_source_path = Path(mount_source)
|
||||
|
@ -1,8 +1,9 @@
|
||||
"""Hardware Manager of Supervisor."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import logging
|
||||
from pathlib import Path
|
||||
from typing import Self
|
||||
|
||||
import pyudev
|
||||
|
||||
@ -48,28 +49,30 @@ _STATIC_NODES: list[Device] = [
|
||||
class HardwareManager(CoreSysAttributes):
|
||||
"""Hardware manager for supervisor."""
|
||||
|
||||
def __init__(self, coresys: CoreSys):
|
||||
def __init__(self, coresys: CoreSys, udev: pyudev.Context) -> None:
|
||||
"""Initialize Hardware Monitor object."""
|
||||
self.coresys: CoreSys = coresys
|
||||
self._devices: dict[str, Device] = {}
|
||||
self._udev: pyudev.Context | None = None
|
||||
self._udev: pyudev.Context = udev
|
||||
|
||||
self._monitor: HwMonitor | None = None
|
||||
self._monitor: HwMonitor = HwMonitor(coresys, udev)
|
||||
self._helper: HwHelper = HwHelper(coresys)
|
||||
self._policy: HwPolicy = HwPolicy(coresys)
|
||||
self._disk: HwDisk = HwDisk(coresys)
|
||||
|
||||
async def post_init(self) -> Self:
|
||||
"""Complete initialization of obect within event loop."""
|
||||
self._udev = await self.sys_run_in_executor(pyudev.Context)
|
||||
self._monitor: HwMonitor = HwMonitor(self.coresys, self._udev)
|
||||
return self
|
||||
@classmethod
|
||||
async def create(cls: type[HardwareManager], coresys: CoreSys) -> HardwareManager:
|
||||
"""Complete initialization of a HardwareManager object within event loop."""
|
||||
return cls(coresys, await coresys.run_in_executor(pyudev.Context))
|
||||
|
||||
@property
|
||||
def udev(self) -> pyudev.Context:
|
||||
"""Return Udev context instance."""
|
||||
return self._udev
|
||||
|
||||
@property
|
||||
def monitor(self) -> HwMonitor:
|
||||
"""Return Hardware Monitor instance."""
|
||||
if not self._monitor:
|
||||
raise RuntimeError("Hardware monitor not initialized!")
|
||||
return self._monitor
|
||||
|
||||
@property
|
||||
@ -129,7 +132,7 @@ class HardwareManager(CoreSysAttributes):
|
||||
def check_subsystem_parents(self, device: Device, subsystem: UdevSubsystem) -> bool:
|
||||
"""Return True if the device is part of the given subsystem parent."""
|
||||
udev_device: pyudev.Device = pyudev.Devices.from_sys_path(
|
||||
self._udev, str(device.sysfs)
|
||||
self.udev, str(device.sysfs)
|
||||
)
|
||||
return udev_device.find_parent(subsystem) is not None
|
||||
|
||||
@ -138,7 +141,7 @@ class HardwareManager(CoreSysAttributes):
|
||||
self._devices.clear()
|
||||
|
||||
# Exctract all devices
|
||||
for device in self._udev.list_devices():
|
||||
for device in self.udev.list_devices():
|
||||
# Skip devices without mapping
|
||||
try:
|
||||
if not device.device_node or self.helper.hide_virtual_device(device):
|
||||
|
@ -51,15 +51,17 @@ class HomeAssistantAPI(CoreSysAttributes):
|
||||
)
|
||||
async def ensure_access_token(self) -> None:
|
||||
"""Ensure there is an access token."""
|
||||
if self.access_token is not None and self._access_token_expires > datetime.now(
|
||||
tz=UTC
|
||||
if (
|
||||
self.access_token
|
||||
and self._access_token_expires
|
||||
and self._access_token_expires > datetime.now(tz=UTC)
|
||||
):
|
||||
return
|
||||
|
||||
with suppress(asyncio.TimeoutError, aiohttp.ClientError):
|
||||
async with self.sys_websession.post(
|
||||
f"{self.sys_homeassistant.api_url}/auth/token",
|
||||
timeout=30,
|
||||
timeout=aiohttp.ClientTimeout(total=30),
|
||||
data={
|
||||
"grant_type": "refresh_token",
|
||||
"refresh_token": self.sys_homeassistant.refresh_token,
|
||||
|
@ -32,6 +32,7 @@ class WSType(StrEnum):
|
||||
SUPERVISOR_EVENT = "supervisor/event"
|
||||
BACKUP_START = "backup/start"
|
||||
BACKUP_END = "backup/end"
|
||||
HASSIO_UPDATE_ADDON = "hassio/update/addon"
|
||||
|
||||
|
||||
class WSEvent(StrEnum):
|
||||
|
@ -87,29 +87,29 @@ class HomeAssistantCore(JobGroup):
|
||||
|
||||
try:
|
||||
# Evaluate Version if we lost this information
|
||||
if not self.sys_homeassistant.version:
|
||||
if self.sys_homeassistant.version:
|
||||
version = self.sys_homeassistant.version
|
||||
else:
|
||||
self.sys_homeassistant.version = (
|
||||
await self.instance.get_latest_version()
|
||||
)
|
||||
version
|
||||
) = await self.instance.get_latest_version()
|
||||
|
||||
await self.instance.attach(
|
||||
version=self.sys_homeassistant.version, skip_state_event_if_down=True
|
||||
)
|
||||
await self.instance.attach(version=version, skip_state_event_if_down=True)
|
||||
|
||||
# Ensure we are using correct image for this system (unless user has overridden it)
|
||||
if not self.sys_homeassistant.override_image:
|
||||
await self.instance.check_image(
|
||||
self.sys_homeassistant.version, self.sys_homeassistant.default_image
|
||||
version, self.sys_homeassistant.default_image
|
||||
)
|
||||
self.sys_homeassistant.image = self.sys_homeassistant.default_image
|
||||
self.sys_homeassistant.set_image(self.sys_homeassistant.default_image)
|
||||
except DockerError:
|
||||
_LOGGER.info(
|
||||
"No Home Assistant Docker image %s found.", self.sys_homeassistant.image
|
||||
)
|
||||
await self.install_landingpage()
|
||||
else:
|
||||
self.sys_homeassistant.version = self.instance.version
|
||||
self.sys_homeassistant.image = self.instance.image
|
||||
self.sys_homeassistant.version = self.instance.version or version
|
||||
self.sys_homeassistant.set_image(self.instance.image)
|
||||
await self.sys_homeassistant.save_data()
|
||||
|
||||
# Start landingpage
|
||||
@ -138,7 +138,7 @@ class HomeAssistantCore(JobGroup):
|
||||
else:
|
||||
_LOGGER.info("Using preinstalled landingpage")
|
||||
self.sys_homeassistant.version = LANDINGPAGE
|
||||
self.sys_homeassistant.image = self.instance.image
|
||||
self.sys_homeassistant.set_image(self.instance.image)
|
||||
await self.sys_homeassistant.save_data()
|
||||
return
|
||||
|
||||
@ -166,7 +166,7 @@ class HomeAssistantCore(JobGroup):
|
||||
await asyncio.sleep(30)
|
||||
|
||||
self.sys_homeassistant.version = LANDINGPAGE
|
||||
self.sys_homeassistant.image = self.sys_updater.image_homeassistant
|
||||
self.sys_homeassistant.set_image(self.sys_updater.image_homeassistant)
|
||||
await self.sys_homeassistant.save_data()
|
||||
|
||||
@Job(
|
||||
@ -182,12 +182,13 @@ class HomeAssistantCore(JobGroup):
|
||||
if not self.sys_homeassistant.latest_version:
|
||||
await self.sys_updater.reload()
|
||||
|
||||
if self.sys_homeassistant.latest_version:
|
||||
if to_version := self.sys_homeassistant.latest_version:
|
||||
try:
|
||||
await self.instance.update(
|
||||
self.sys_homeassistant.latest_version,
|
||||
to_version,
|
||||
image=self.sys_updater.image_homeassistant,
|
||||
)
|
||||
self.sys_homeassistant.version = self.instance.version or to_version
|
||||
break
|
||||
except (DockerError, JobException):
|
||||
pass
|
||||
@ -198,8 +199,7 @@ class HomeAssistantCore(JobGroup):
|
||||
await asyncio.sleep(30)
|
||||
|
||||
_LOGGER.info("Home Assistant docker now installed")
|
||||
self.sys_homeassistant.version = self.instance.version
|
||||
self.sys_homeassistant.image = self.sys_updater.image_homeassistant
|
||||
self.sys_homeassistant.set_image(self.sys_updater.image_homeassistant)
|
||||
await self.sys_homeassistant.save_data()
|
||||
|
||||
# finishing
|
||||
@ -231,15 +231,21 @@ class HomeAssistantCore(JobGroup):
|
||||
backup: bool | None = False,
|
||||
) -> None:
|
||||
"""Update HomeAssistant version."""
|
||||
version = version or self.sys_homeassistant.latest_version
|
||||
to_version = version or self.sys_homeassistant.latest_version
|
||||
if not to_version:
|
||||
raise HomeAssistantUpdateError(
|
||||
"Cannot determine latest version of Home Assistant for update",
|
||||
_LOGGER.error,
|
||||
)
|
||||
|
||||
old_image = self.sys_homeassistant.image
|
||||
rollback = self.sys_homeassistant.version if not self.error_state else None
|
||||
running = await self.instance.is_running()
|
||||
exists = await self.instance.exists()
|
||||
|
||||
if exists and version == self.instance.version:
|
||||
if exists and to_version == self.instance.version:
|
||||
raise HomeAssistantUpdateError(
|
||||
f"Version {version!s} is already installed", _LOGGER.warning
|
||||
f"Version {to_version!s} is already installed", _LOGGER.warning
|
||||
)
|
||||
|
||||
if backup:
|
||||
@ -262,8 +268,8 @@ class HomeAssistantCore(JobGroup):
|
||||
"Updating Home Assistant image failed", _LOGGER.warning
|
||||
) from err
|
||||
|
||||
self.sys_homeassistant.version = self.instance.version
|
||||
self.sys_homeassistant.image = self.sys_updater.image_homeassistant
|
||||
self.sys_homeassistant.version = self.instance.version or to_version
|
||||
self.sys_homeassistant.set_image(self.sys_updater.image_homeassistant)
|
||||
|
||||
if running:
|
||||
await self.start()
|
||||
@ -276,7 +282,7 @@ class HomeAssistantCore(JobGroup):
|
||||
|
||||
# Update Home Assistant
|
||||
with suppress(HomeAssistantError):
|
||||
await _update(version)
|
||||
await _update(to_version)
|
||||
|
||||
if not self.error_state and rollback:
|
||||
try:
|
||||
@ -303,11 +309,11 @@ class HomeAssistantCore(JobGroup):
|
||||
# Make a copy of the current log file if it exists
|
||||
logfile = self.sys_config.path_homeassistant / "home-assistant.log"
|
||||
if logfile.exists():
|
||||
backup = (
|
||||
rollback_log = (
|
||||
self.sys_config.path_homeassistant / "home-assistant-rollback.log"
|
||||
)
|
||||
|
||||
shutil.copy(logfile, backup)
|
||||
shutil.copy(logfile, rollback_log)
|
||||
_LOGGER.info(
|
||||
"A backup of the logfile is stored in /config/home-assistant-rollback.log"
|
||||
)
|
||||
@ -334,7 +340,7 @@ class HomeAssistantCore(JobGroup):
|
||||
except DockerError as err:
|
||||
raise HomeAssistantError() from err
|
||||
|
||||
await self._block_till_run(self.sys_homeassistant.version)
|
||||
await self._block_till_run()
|
||||
# No Instance/Container found, extended start
|
||||
else:
|
||||
# Create new API token
|
||||
@ -349,7 +355,7 @@ class HomeAssistantCore(JobGroup):
|
||||
except DockerError as err:
|
||||
raise HomeAssistantError() from err
|
||||
|
||||
await self._block_till_run(self.sys_homeassistant.version)
|
||||
await self._block_till_run()
|
||||
|
||||
@Job(
|
||||
name="home_assistant_core_stop",
|
||||
@ -382,7 +388,7 @@ class HomeAssistantCore(JobGroup):
|
||||
except DockerError as err:
|
||||
raise HomeAssistantError() from err
|
||||
|
||||
await self._block_till_run(self.sys_homeassistant.version)
|
||||
await self._block_till_run()
|
||||
|
||||
@Job(
|
||||
name="home_assistant_core_rebuild",
|
||||
@ -440,7 +446,7 @@ class HomeAssistantCore(JobGroup):
|
||||
@property
|
||||
def in_progress(self) -> bool:
|
||||
"""Return True if a task is in progress."""
|
||||
return self.instance.in_progress or self.active_job
|
||||
return self.instance.in_progress or self.active_job is not None
|
||||
|
||||
async def check_config(self) -> ConfigResult:
|
||||
"""Run Home Assistant config check."""
|
||||
@ -467,10 +473,10 @@ class HomeAssistantCore(JobGroup):
|
||||
_LOGGER.info("Home Assistant config is valid")
|
||||
return ConfigResult(True, log)
|
||||
|
||||
async def _block_till_run(self, version: AwesomeVersion) -> None:
|
||||
async def _block_till_run(self) -> None:
|
||||
"""Block until Home-Assistant is booting up or startup timeout."""
|
||||
# Skip landingpage
|
||||
if version == LANDINGPAGE:
|
||||
if self.sys_homeassistant.version == LANDINGPAGE:
|
||||
return
|
||||
_LOGGER.info("Wait until Home Assistant is ready")
|
||||
|
||||
|
@ -35,6 +35,7 @@ from ..const import (
|
||||
FILE_HASSIO_HOMEASSISTANT,
|
||||
BusEvent,
|
||||
IngressSessionDataUser,
|
||||
IngressSessionDataUserDict,
|
||||
)
|
||||
from ..coresys import CoreSys, CoreSysAttributes
|
||||
from ..exceptions import (
|
||||
@ -112,12 +113,12 @@ class HomeAssistant(FileConfiguration, CoreSysAttributes):
|
||||
return self._secrets
|
||||
|
||||
@property
|
||||
def machine(self) -> str:
|
||||
def machine(self) -> str | None:
|
||||
"""Return the system machines."""
|
||||
return self.core.instance.machine
|
||||
|
||||
@property
|
||||
def arch(self) -> str:
|
||||
def arch(self) -> str | None:
|
||||
"""Return arch of running Home Assistant."""
|
||||
return self.core.instance.arch
|
||||
|
||||
@ -190,8 +191,7 @@ class HomeAssistant(FileConfiguration, CoreSysAttributes):
|
||||
return self._data[ATTR_IMAGE]
|
||||
return self.default_image
|
||||
|
||||
@image.setter
|
||||
def image(self, value: str | None) -> None:
|
||||
def set_image(self, value: str | None) -> None:
|
||||
"""Set image name of Home Assistant container."""
|
||||
self._data[ATTR_IMAGE] = value
|
||||
|
||||
@ -284,7 +284,7 @@ class HomeAssistant(FileConfiguration, CoreSysAttributes):
|
||||
def need_update(self) -> bool:
|
||||
"""Return true if a Home Assistant update is available."""
|
||||
try:
|
||||
return self.version < self.latest_version
|
||||
return self.version is not None and self.version < self.latest_version
|
||||
except (AwesomeVersionException, TypeError):
|
||||
return False
|
||||
|
||||
@ -347,7 +347,9 @@ class HomeAssistant(FileConfiguration, CoreSysAttributes):
|
||||
):
|
||||
return
|
||||
|
||||
configuration = await self.sys_homeassistant.websocket.async_send_command(
|
||||
configuration: (
|
||||
dict[str, Any] | None
|
||||
) = await self.sys_homeassistant.websocket.async_send_command(
|
||||
{ATTR_TYPE: "get_config"}
|
||||
)
|
||||
if not configuration or "usb" not in configuration.get("components", []):
|
||||
@ -359,7 +361,7 @@ class HomeAssistant(FileConfiguration, CoreSysAttributes):
|
||||
async def begin_backup(self) -> None:
|
||||
"""Inform Home Assistant a backup is beginning."""
|
||||
try:
|
||||
resp = await self.websocket.async_send_command(
|
||||
resp: dict[str, Any] | None = await self.websocket.async_send_command(
|
||||
{ATTR_TYPE: WSType.BACKUP_START}
|
||||
)
|
||||
except HomeAssistantWSError as err:
|
||||
@ -378,7 +380,7 @@ class HomeAssistant(FileConfiguration, CoreSysAttributes):
|
||||
async def end_backup(self) -> None:
|
||||
"""Inform Home Assistant the backup is ending."""
|
||||
try:
|
||||
resp = await self.websocket.async_send_command(
|
||||
resp: dict[str, Any] | None = await self.websocket.async_send_command(
|
||||
{ATTR_TYPE: WSType.BACKUP_END}
|
||||
)
|
||||
except HomeAssistantWSError as err:
|
||||
@ -555,17 +557,12 @@ class HomeAssistant(FileConfiguration, CoreSysAttributes):
|
||||
)
|
||||
async def get_users(self) -> list[IngressSessionDataUser]:
|
||||
"""Get list of all configured users."""
|
||||
list_of_users = await self.sys_homeassistant.websocket.async_send_command(
|
||||
list_of_users: (
|
||||
list[IngressSessionDataUserDict] | None
|
||||
) = await self.sys_homeassistant.websocket.async_send_command(
|
||||
{ATTR_TYPE: "config/auth/list"}
|
||||
)
|
||||
|
||||
if list_of_users:
|
||||
return [
|
||||
IngressSessionDataUser(
|
||||
id=data["id"],
|
||||
username=data.get("username"),
|
||||
display_name=data.get("name"),
|
||||
)
|
||||
for data in list_of_users
|
||||
]
|
||||
return [IngressSessionDataUser.from_dict(data) for data in list_of_users]
|
||||
return []
|
||||
|
@ -4,7 +4,7 @@ from __future__ import annotations
|
||||
|
||||
import asyncio
|
||||
import logging
|
||||
from typing import Any
|
||||
from typing import Any, TypeVar, cast
|
||||
|
||||
import aiohttp
|
||||
from aiohttp.http_websocket import WSMsgType
|
||||
@ -25,7 +25,6 @@ from ..exceptions import (
|
||||
HomeAssistantAPIError,
|
||||
HomeAssistantWSConnectionError,
|
||||
HomeAssistantWSError,
|
||||
HomeAssistantWSNotSupported,
|
||||
)
|
||||
from ..utils.json import json_dumps
|
||||
from .const import CLOSING_STATES, WSEvent, WSType
|
||||
@ -38,6 +37,8 @@ MIN_VERSION = {
|
||||
|
||||
_LOGGER: logging.Logger = logging.getLogger(__name__)
|
||||
|
||||
T = TypeVar("T")
|
||||
|
||||
|
||||
class WSClient:
|
||||
"""Home Assistant Websocket client."""
|
||||
@ -53,7 +54,7 @@ class WSClient:
|
||||
self._client = client
|
||||
self._message_id: int = 0
|
||||
self._loop = loop
|
||||
self._futures: dict[int, asyncio.Future[dict]] = {}
|
||||
self._futures: dict[int, asyncio.Future[T]] = {} # type: ignore
|
||||
|
||||
@property
|
||||
def connected(self) -> bool:
|
||||
@ -78,9 +79,9 @@ class WSClient:
|
||||
try:
|
||||
await self._client.send_json(message, dumps=json_dumps)
|
||||
except ConnectionError as err:
|
||||
raise HomeAssistantWSConnectionError(err) from err
|
||||
raise HomeAssistantWSConnectionError(str(err)) from err
|
||||
|
||||
async def async_send_command(self, message: dict[str, Any]) -> dict | None:
|
||||
async def async_send_command(self, message: dict[str, Any]) -> T | None:
|
||||
"""Send a websocket message, and return the response."""
|
||||
self._message_id += 1
|
||||
message["id"] = self._message_id
|
||||
@ -89,7 +90,7 @@ class WSClient:
|
||||
try:
|
||||
await self._client.send_json(message, dumps=json_dumps)
|
||||
except ConnectionError as err:
|
||||
raise HomeAssistantWSConnectionError(err) from err
|
||||
raise HomeAssistantWSConnectionError(str(err)) from err
|
||||
|
||||
try:
|
||||
return await self._futures[message["id"]]
|
||||
@ -206,7 +207,7 @@ class HomeAssistantWebSocket(CoreSysAttributes):
|
||||
self.sys_websession,
|
||||
self.sys_loop,
|
||||
self.sys_homeassistant.ws_url,
|
||||
self.sys_homeassistant.api.access_token,
|
||||
cast(str, self.sys_homeassistant.api.access_token),
|
||||
)
|
||||
|
||||
self.sys_create_task(client.start_listener())
|
||||
@ -252,7 +253,7 @@ class HomeAssistantWebSocket(CoreSysAttributes):
|
||||
)
|
||||
|
||||
async def async_send_message(self, message: dict[str, Any]) -> None:
|
||||
"""Send a command with the WS client."""
|
||||
"""Send a message with the WS client."""
|
||||
# Only commands allowed during startup as those tell Home Assistant to do something.
|
||||
# Messages may cause clients to make follow-up API calls so those wait.
|
||||
if self.sys_core.state in STARTING_STATES:
|
||||
@ -264,84 +265,89 @@ class HomeAssistantWebSocket(CoreSysAttributes):
|
||||
return
|
||||
|
||||
try:
|
||||
await self._client.async_send_command(message)
|
||||
if self._client:
|
||||
await self._client.async_send_command(message)
|
||||
except HomeAssistantWSConnectionError:
|
||||
if self._client:
|
||||
await self._client.close()
|
||||
self._client = None
|
||||
|
||||
async def async_send_command(self, message: dict[str, Any]) -> dict[str, Any]:
|
||||
async def async_send_command(self, message: dict[str, Any]) -> T | None:
|
||||
"""Send a command with the WS client and wait for the response."""
|
||||
if not await self._can_send(message):
|
||||
return
|
||||
return None
|
||||
|
||||
try:
|
||||
return await self._client.async_send_command(message)
|
||||
if self._client:
|
||||
return await self._client.async_send_command(message)
|
||||
except HomeAssistantWSConnectionError:
|
||||
if self._client:
|
||||
await self._client.close()
|
||||
self._client = None
|
||||
raise
|
||||
|
||||
async def async_supervisor_update_event(
|
||||
self,
|
||||
key: str,
|
||||
data: dict[str, Any] | None = None,
|
||||
) -> None:
|
||||
"""Send a supervisor/event command."""
|
||||
try:
|
||||
await self.async_send_message(
|
||||
{
|
||||
ATTR_TYPE: WSType.SUPERVISOR_EVENT,
|
||||
ATTR_DATA: {
|
||||
ATTR_EVENT: WSEvent.SUPERVISOR_UPDATE,
|
||||
ATTR_UPDATE_KEY: key,
|
||||
ATTR_DATA: data or {},
|
||||
},
|
||||
}
|
||||
)
|
||||
except HomeAssistantWSNotSupported:
|
||||
pass
|
||||
except HomeAssistantWSError as err:
|
||||
_LOGGER.error("Could not send message to Home Assistant due to %s", err)
|
||||
|
||||
def supervisor_update_event(
|
||||
self,
|
||||
key: str,
|
||||
data: dict[str, Any] | None = None,
|
||||
) -> None:
|
||||
"""Send a supervisor/event command."""
|
||||
if self.sys_core.state in CLOSING_STATES:
|
||||
return
|
||||
self.sys_create_task(self.async_supervisor_update_event(key, data))
|
||||
return None
|
||||
|
||||
def send_message(self, message: dict[str, Any]) -> None:
|
||||
"""Send a supervisor/event command."""
|
||||
"""Send a supervisor/event message."""
|
||||
if self.sys_core.state in CLOSING_STATES:
|
||||
return
|
||||
self.sys_create_task(self.async_send_message(message))
|
||||
|
||||
async def async_supervisor_event(
|
||||
self, event: WSEvent, data: dict[str, Any] | None = None
|
||||
):
|
||||
"""Send a supervisor/event command to Home Assistant."""
|
||||
async def async_supervisor_event_custom(
|
||||
self, event: WSEvent, extra_data: dict[str, Any] | None = None
|
||||
) -> None:
|
||||
"""Send a supervisor/event message to Home Assistant with custom data."""
|
||||
try:
|
||||
await self.async_send_message(
|
||||
{
|
||||
ATTR_TYPE: WSType.SUPERVISOR_EVENT,
|
||||
ATTR_DATA: {
|
||||
ATTR_EVENT: event,
|
||||
ATTR_DATA: data or {},
|
||||
**(extra_data or {}),
|
||||
},
|
||||
}
|
||||
)
|
||||
except HomeAssistantWSNotSupported:
|
||||
pass
|
||||
except HomeAssistantWSError as err:
|
||||
_LOGGER.error("Could not send message to Home Assistant due to %s", err)
|
||||
|
||||
def supervisor_event(self, event: WSEvent, data: dict[str, Any] | None = None):
|
||||
"""Send a supervisor/event command to Home Assistant."""
|
||||
def supervisor_event_custom(
|
||||
self, event: WSEvent, extra_data: dict[str, Any] | None = None
|
||||
) -> None:
|
||||
"""Send a supervisor/event message to Home Assistant with custom data."""
|
||||
if self.sys_core.state in CLOSING_STATES:
|
||||
return
|
||||
self.sys_create_task(self.async_supervisor_event(event, data))
|
||||
self.sys_create_task(self.async_supervisor_event_custom(event, extra_data))
|
||||
|
||||
def supervisor_event(
|
||||
self, event: WSEvent, data: dict[str, Any] | None = None
|
||||
) -> None:
|
||||
"""Send a supervisor/event message to Home Assistant."""
|
||||
if self.sys_core.state in CLOSING_STATES:
|
||||
return
|
||||
self.sys_create_task(
|
||||
self.async_supervisor_event_custom(event, {ATTR_DATA: data or {}})
|
||||
)
|
||||
|
||||
async def async_supervisor_update_event(
|
||||
self,
|
||||
key: str,
|
||||
data: dict[str, Any] | None = None,
|
||||
) -> None:
|
||||
"""Send an update supervisor/event message."""
|
||||
await self.async_supervisor_event_custom(
|
||||
WSEvent.SUPERVISOR_UPDATE,
|
||||
{
|
||||
ATTR_UPDATE_KEY: key,
|
||||
ATTR_DATA: data or {},
|
||||
},
|
||||
)
|
||||
|
||||
def supervisor_update_event(
|
||||
self,
|
||||
key: str,
|
||||
data: dict[str, Any] | None = None,
|
||||
) -> None:
|
||||
"""Send an update supervisor/event message."""
|
||||
if self.sys_core.state in CLOSING_STATES:
|
||||
return
|
||||
self.sys_create_task(self.async_supervisor_update_event(key, data))
|
||||
|
@ -2,17 +2,29 @@
|
||||
|
||||
from dataclasses import dataclass
|
||||
from ipaddress import IPv4Address, IPv4Interface, IPv6Address, IPv6Interface
|
||||
import logging
|
||||
import socket
|
||||
|
||||
from ..dbus.const import (
|
||||
ConnectionStateFlags,
|
||||
ConnectionStateType,
|
||||
DeviceType,
|
||||
InterfaceAddrGenMode as NMInterfaceAddrGenMode,
|
||||
InterfaceIp6Privacy as NMInterfaceIp6Privacy,
|
||||
InterfaceMethod as NMInterfaceMethod,
|
||||
)
|
||||
from ..dbus.network.connection import NetworkConnection
|
||||
from ..dbus.network.interface import NetworkInterface
|
||||
from .const import AuthMethod, InterfaceMethod, InterfaceType, WifiMode
|
||||
from .const import (
|
||||
AuthMethod,
|
||||
InterfaceAddrGenMode,
|
||||
InterfaceIp6Privacy,
|
||||
InterfaceMethod,
|
||||
InterfaceType,
|
||||
WifiMode,
|
||||
)
|
||||
|
||||
_LOGGER: logging.Logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
@dataclass(slots=True)
|
||||
@ -46,6 +58,14 @@ class IpSetting:
|
||||
nameservers: list[IPv4Address | IPv6Address]
|
||||
|
||||
|
||||
@dataclass(slots=True)
|
||||
class Ip6Setting(IpSetting):
|
||||
"""Represent a user IPv6 setting."""
|
||||
|
||||
addr_gen_mode: InterfaceAddrGenMode = InterfaceAddrGenMode.DEFAULT
|
||||
ip6_privacy: InterfaceIp6Privacy = InterfaceIp6Privacy.DEFAULT
|
||||
|
||||
|
||||
@dataclass(slots=True)
|
||||
class WifiConfig:
|
||||
"""Represent a wifi configuration."""
|
||||
@ -62,7 +82,7 @@ class VlanConfig:
|
||||
"""Represent a vlan configuration."""
|
||||
|
||||
id: int
|
||||
interface: str
|
||||
interface: str | None
|
||||
|
||||
|
||||
@dataclass(slots=True)
|
||||
@ -79,7 +99,7 @@ class Interface:
|
||||
ipv4: IpConfig | None
|
||||
ipv4setting: IpSetting | None
|
||||
ipv6: IpConfig | None
|
||||
ipv6setting: IpSetting | None
|
||||
ipv6setting: Ip6Setting | None
|
||||
wifi: WifiConfig | None
|
||||
vlan: VlanConfig | None
|
||||
|
||||
@ -91,7 +111,10 @@ class Interface:
|
||||
if inet.settings.match and inet.settings.match.path:
|
||||
return inet.settings.match.path == [self.path]
|
||||
|
||||
return inet.settings.connection.interface_name == self.name
|
||||
return (
|
||||
inet.settings.connection is not None
|
||||
and inet.settings.connection.interface_name == self.name
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
def from_dbus_interface(inet: NetworkInterface) -> "Interface":
|
||||
@ -118,8 +141,14 @@ class Interface:
|
||||
ipv4_setting = IpSetting(InterfaceMethod.DISABLED, [], None, [])
|
||||
|
||||
if inet.settings and inet.settings.ipv6:
|
||||
ipv6_setting = IpSetting(
|
||||
ipv6_setting = Ip6Setting(
|
||||
method=Interface._map_nm_method(inet.settings.ipv6.method),
|
||||
addr_gen_mode=Interface._map_nm_addr_gen_mode(
|
||||
inet.settings.ipv6.addr_gen_mode
|
||||
),
|
||||
ip6_privacy=Interface._map_nm_ip6_privacy(
|
||||
inet.settings.ipv6.ip6_privacy
|
||||
),
|
||||
address=[
|
||||
IPv6Interface(f"{ip.address}/{ip.prefix}")
|
||||
for ip in inet.settings.ipv6.address_data
|
||||
@ -134,26 +163,26 @@ class Interface:
|
||||
else [],
|
||||
)
|
||||
else:
|
||||
ipv6_setting = IpSetting(InterfaceMethod.DISABLED, [], None, [])
|
||||
ipv6_setting = Ip6Setting(InterfaceMethod.DISABLED, [], None, [])
|
||||
|
||||
ipv4_ready = (
|
||||
bool(inet.connection)
|
||||
inet.connection is not None
|
||||
and ConnectionStateFlags.IP4_READY in inet.connection.state_flags
|
||||
)
|
||||
ipv6_ready = (
|
||||
bool(inet.connection)
|
||||
inet.connection is not None
|
||||
and ConnectionStateFlags.IP6_READY in inet.connection.state_flags
|
||||
)
|
||||
|
||||
return Interface(
|
||||
inet.name,
|
||||
inet.hw_address,
|
||||
inet.path,
|
||||
inet.settings is not None,
|
||||
Interface._map_nm_connected(inet.connection),
|
||||
inet.primary,
|
||||
Interface._map_nm_type(inet.type),
|
||||
IpConfig(
|
||||
name=inet.interface_name,
|
||||
mac=inet.hw_address,
|
||||
path=inet.path,
|
||||
enabled=inet.settings is not None,
|
||||
connected=Interface._map_nm_connected(inet.connection),
|
||||
primary=inet.primary,
|
||||
type=Interface._map_nm_type(inet.type),
|
||||
ipv4=IpConfig(
|
||||
address=inet.connection.ipv4.address
|
||||
if inet.connection.ipv4.address
|
||||
else [],
|
||||
@ -165,8 +194,8 @@ class Interface:
|
||||
)
|
||||
if inet.connection and inet.connection.ipv4
|
||||
else IpConfig([], None, [], ipv4_ready),
|
||||
ipv4_setting,
|
||||
IpConfig(
|
||||
ipv4setting=ipv4_setting,
|
||||
ipv6=IpConfig(
|
||||
address=inet.connection.ipv6.address
|
||||
if inet.connection.ipv6.address
|
||||
else [],
|
||||
@ -178,22 +207,42 @@ class Interface:
|
||||
)
|
||||
if inet.connection and inet.connection.ipv6
|
||||
else IpConfig([], None, [], ipv6_ready),
|
||||
ipv6_setting,
|
||||
Interface._map_nm_wifi(inet),
|
||||
Interface._map_nm_vlan(inet),
|
||||
ipv6setting=ipv6_setting,
|
||||
wifi=Interface._map_nm_wifi(inet),
|
||||
vlan=Interface._map_nm_vlan(inet),
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
def _map_nm_method(method: str) -> InterfaceMethod:
|
||||
def _map_nm_method(method: str | None) -> InterfaceMethod:
|
||||
"""Map IP interface method."""
|
||||
match method:
|
||||
case NMInterfaceMethod.AUTO.value:
|
||||
return InterfaceMethod.AUTO
|
||||
case NMInterfaceMethod.MANUAL:
|
||||
return InterfaceMethod.STATIC
|
||||
return InterfaceMethod.DISABLED
|
||||
|
||||
@staticmethod
|
||||
def _map_nm_addr_gen_mode(addr_gen_mode: int) -> InterfaceAddrGenMode:
|
||||
"""Map IPv6 interface addr_gen_mode."""
|
||||
mapping = {
|
||||
NMInterfaceMethod.AUTO: InterfaceMethod.AUTO,
|
||||
NMInterfaceMethod.DISABLED: InterfaceMethod.DISABLED,
|
||||
NMInterfaceMethod.MANUAL: InterfaceMethod.STATIC,
|
||||
NMInterfaceMethod.LINK_LOCAL: InterfaceMethod.DISABLED,
|
||||
NMInterfaceAddrGenMode.EUI64.value: InterfaceAddrGenMode.EUI64,
|
||||
NMInterfaceAddrGenMode.STABLE_PRIVACY.value: InterfaceAddrGenMode.STABLE_PRIVACY,
|
||||
NMInterfaceAddrGenMode.DEFAULT_OR_EUI64.value: InterfaceAddrGenMode.DEFAULT_OR_EUI64,
|
||||
}
|
||||
|
||||
return mapping.get(method, InterfaceMethod.DISABLED)
|
||||
return mapping.get(addr_gen_mode, InterfaceAddrGenMode.DEFAULT)
|
||||
|
||||
@staticmethod
|
||||
def _map_nm_ip6_privacy(ip6_privacy: int) -> InterfaceIp6Privacy:
|
||||
"""Map IPv6 interface ip6_privacy."""
|
||||
mapping = {
|
||||
NMInterfaceIp6Privacy.DISABLED.value: InterfaceIp6Privacy.DISABLED,
|
||||
NMInterfaceIp6Privacy.ENABLED_PREFER_PUBLIC.value: InterfaceIp6Privacy.ENABLED_PREFER_PUBLIC,
|
||||
NMInterfaceIp6Privacy.ENABLED.value: InterfaceIp6Privacy.ENABLED,
|
||||
}
|
||||
|
||||
return mapping.get(ip6_privacy, InterfaceIp6Privacy.DEFAULT)
|
||||
|
||||
@staticmethod
|
||||
def _map_nm_connected(connection: NetworkConnection | None) -> bool:
|
||||
@ -208,12 +257,14 @@ class Interface:
|
||||
|
||||
@staticmethod
|
||||
def _map_nm_type(device_type: int) -> InterfaceType:
|
||||
mapping = {
|
||||
DeviceType.ETHERNET: InterfaceType.ETHERNET,
|
||||
DeviceType.WIRELESS: InterfaceType.WIRELESS,
|
||||
DeviceType.VLAN: InterfaceType.VLAN,
|
||||
}
|
||||
return mapping[device_type]
|
||||
match device_type:
|
||||
case DeviceType.ETHERNET.value:
|
||||
return InterfaceType.ETHERNET
|
||||
case DeviceType.WIRELESS.value:
|
||||
return InterfaceType.WIRELESS
|
||||
case DeviceType.VLAN.value:
|
||||
return InterfaceType.VLAN
|
||||
raise ValueError(f"Invalid device type: {device_type}")
|
||||
|
||||
@staticmethod
|
||||
def _map_nm_wifi(inet: NetworkInterface) -> WifiConfig | None:
|
||||
@ -222,15 +273,22 @@ class Interface:
|
||||
return None
|
||||
|
||||
# Authentication and PSK
|
||||
auth = None
|
||||
auth = AuthMethod.OPEN
|
||||
psk = None
|
||||
if not inet.settings.wireless_security:
|
||||
auth = AuthMethod.OPEN
|
||||
elif inet.settings.wireless_security.key_mgmt == "none":
|
||||
auth = AuthMethod.WEP
|
||||
elif inet.settings.wireless_security.key_mgmt == "wpa-psk":
|
||||
auth = AuthMethod.WPA_PSK
|
||||
psk = inet.settings.wireless_security.psk
|
||||
if inet.settings.wireless_security:
|
||||
match inet.settings.wireless_security.key_mgmt:
|
||||
case "none":
|
||||
auth = AuthMethod.WEP
|
||||
case "wpa-psk":
|
||||
auth = AuthMethod.WPA_PSK
|
||||
psk = inet.settings.wireless_security.psk
|
||||
case _:
|
||||
_LOGGER.warning(
|
||||
"Auth method %s for network interface %s unsupported, skipping",
|
||||
inet.settings.wireless_security.key_mgmt,
|
||||
inet.interface_name,
|
||||
)
|
||||
return None
|
||||
|
||||
# WifiMode
|
||||
mode = WifiMode.INFRASTRUCTURE
|
||||
@ -244,17 +302,17 @@ class Interface:
|
||||
signal = None
|
||||
|
||||
return WifiConfig(
|
||||
mode,
|
||||
inet.settings.wireless.ssid,
|
||||
auth,
|
||||
psk,
|
||||
signal,
|
||||
mode=mode,
|
||||
ssid=inet.settings.wireless.ssid if inet.settings.wireless else "",
|
||||
auth=auth,
|
||||
psk=psk,
|
||||
signal=signal,
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
def _map_nm_vlan(inet: NetworkInterface) -> WifiConfig | None:
|
||||
def _map_nm_vlan(inet: NetworkInterface) -> VlanConfig | None:
|
||||
"""Create mapping to nm vlan property."""
|
||||
if inet.type != DeviceType.VLAN or not inet.settings:
|
||||
if inet.type != DeviceType.VLAN or not inet.settings or not inet.settings.vlan:
|
||||
return None
|
||||
|
||||
return VlanConfig(inet.settings.vlan.id, inet.settings.vlan.parent)
|
||||
|
@ -15,6 +15,24 @@ class InterfaceMethod(StrEnum):
|
||||
AUTO = "auto"
|
||||
|
||||
|
||||
class InterfaceAddrGenMode(StrEnum):
|
||||
"""Configuration of an interface."""
|
||||
|
||||
EUI64 = "eui64"
|
||||
STABLE_PRIVACY = "stable-privacy"
|
||||
DEFAULT_OR_EUI64 = "default-or-eui64"
|
||||
DEFAULT = "default"
|
||||
|
||||
|
||||
class InterfaceIp6Privacy(StrEnum):
|
||||
"""Configuration of an interface."""
|
||||
|
||||
DEFAULT = "default"
|
||||
DISABLED = "disabled"
|
||||
ENABLED_PREFER_PUBLIC = "enabled-prefer-public"
|
||||
ENABLED = "enabled"
|
||||
|
||||
|
||||
class InterfaceType(StrEnum):
|
||||
"""Configuration of an interface."""
|
||||
|
||||
@ -62,6 +80,7 @@ class LogFormat(StrEnum):
|
||||
|
||||
JOURNAL = "application/vnd.fdo.journal"
|
||||
JSON = "application/json"
|
||||
JSON_SEQ = "application/json-seq"
|
||||
TEXT = "text/plain"
|
||||
|
||||
|
||||
|
@ -133,7 +133,7 @@ class InfoCenter(CoreSysAttributes):
|
||||
self.coresys.config.path_supervisor,
|
||||
)
|
||||
|
||||
async def disk_life_time(self) -> float:
|
||||
async def disk_life_time(self) -> float | None:
|
||||
"""Return the estimated life-time usage (in %) of the SSD storing the data directory."""
|
||||
return await self.sys_run_in_executor(
|
||||
self.sys_hardware.disk.get_disk_life_time,
|
||||
|
@ -2,12 +2,13 @@
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from collections.abc import AsyncGenerator
|
||||
from collections.abc import AsyncGenerator, Mapping
|
||||
from contextlib import asynccontextmanager
|
||||
import json
|
||||
import logging
|
||||
import os
|
||||
from pathlib import Path
|
||||
import re
|
||||
from typing import Self
|
||||
|
||||
from aiohttp import ClientError, ClientSession, ClientTimeout
|
||||
@ -24,6 +25,7 @@ from ..exceptions import (
|
||||
HostServiceError,
|
||||
)
|
||||
from ..utils.json import read_json_file
|
||||
from ..utils.systemd_journal import journal_boots_reader
|
||||
from .const import PARAM_BOOT_ID, PARAM_SYSLOG_IDENTIFIER, LogFormat
|
||||
|
||||
_LOGGER: logging.Logger = logging.getLogger(__name__)
|
||||
@ -34,6 +36,8 @@ SYSLOG_IDENTIFIERS_JSON: Path = (
|
||||
)
|
||||
# pylint: enable=no-member
|
||||
|
||||
SYSTEMD_JOURNAL_GATEWAYD_LINES_MAX = (1 << 64) - 1
|
||||
|
||||
SYSTEMD_JOURNAL_GATEWAYD_SOCKET: Path = Path("/run/systemd-journal-gatewayd.sock")
|
||||
|
||||
# From systemd catalog for message IDs (`journalctl --dump-catalog``)
|
||||
@ -42,6 +46,10 @@ SYSTEMD_JOURNAL_GATEWAYD_SOCKET: Path = Path("/run/systemd-journal-gatewayd.sock
|
||||
# Defined-By: systemd
|
||||
BOOT_IDS_QUERY = {"MESSAGE_ID": "b07a249cd024414a82dd00cd181378ff"}
|
||||
|
||||
RE_ENTRIES_HEADER = re.compile(
|
||||
r"^entries=(?P<cursor>[^:]*):(?P<num_skip>-?\d+):(?P<num_lines>\d*)$"
|
||||
)
|
||||
|
||||
|
||||
class LogsControl(CoreSysAttributes):
|
||||
"""Handle systemd-journal logs."""
|
||||
@ -101,12 +109,8 @@ class LogsControl(CoreSysAttributes):
|
||||
|
||||
return boot_ids[offset]
|
||||
|
||||
async def get_boot_ids(self) -> list[str]:
|
||||
"""Get boot IDs from oldest to newest."""
|
||||
if self._boot_ids:
|
||||
# Doesn't change without a reboot, no reason to query again once cached
|
||||
return self._boot_ids
|
||||
|
||||
async def _get_boot_ids_legacy(self) -> list[str]:
|
||||
"""Get boots IDs using suboptimal method where /boots is not available."""
|
||||
try:
|
||||
async with self.journald_logs(
|
||||
params=BOOT_IDS_QUERY,
|
||||
@ -135,13 +139,51 @@ class LogsControl(CoreSysAttributes):
|
||||
_LOGGER.error,
|
||||
) from err
|
||||
|
||||
self._boot_ids = []
|
||||
_boot_ids = []
|
||||
for entry in text.split("\n"):
|
||||
if (
|
||||
entry
|
||||
and (boot_id := json.loads(entry)[PARAM_BOOT_ID]) not in self._boot_ids
|
||||
):
|
||||
self._boot_ids.append(boot_id)
|
||||
if entry and (boot_id := json.loads(entry)[PARAM_BOOT_ID]) not in _boot_ids:
|
||||
_boot_ids.append(boot_id)
|
||||
|
||||
return _boot_ids
|
||||
|
||||
async def _get_boot_ids_native(self):
|
||||
"""Get boot IDs using /boots endpoint."""
|
||||
try:
|
||||
async with self.journald_logs(
|
||||
path="/boots",
|
||||
accept=LogFormat.JSON_SEQ,
|
||||
timeout=ClientTimeout(total=20),
|
||||
) as resp:
|
||||
if resp.status != 200:
|
||||
raise HostLogError(
|
||||
f"Got HTTP {resp.status} from /boots.",
|
||||
_LOGGER.debug,
|
||||
)
|
||||
# Don't rely solely on the order of boots in the response,
|
||||
# sort the boots by index returned in the response.
|
||||
boot_id_tuples = [boot async for boot in journal_boots_reader(resp)]
|
||||
return [
|
||||
boot_id for _, boot_id in sorted(boot_id_tuples, key=lambda x: x[0])
|
||||
]
|
||||
except (ClientError, TimeoutError) as err:
|
||||
raise HostLogError(
|
||||
"Could not get a list of boot IDs from systemd-journal-gatewayd",
|
||||
_LOGGER.error,
|
||||
) from err
|
||||
|
||||
async def get_boot_ids(self) -> list[str]:
|
||||
"""Get boot IDs from oldest to newest."""
|
||||
if self._boot_ids:
|
||||
# Doesn't change without a reboot, no reason to query again once cached
|
||||
return self._boot_ids
|
||||
|
||||
try:
|
||||
self._boot_ids = await self._get_boot_ids_native()
|
||||
except HostLogError:
|
||||
_LOGGER.info(
|
||||
"Could not get /boots from systemd-journal-gatewayd, using fallback."
|
||||
)
|
||||
self._boot_ids = await self._get_boot_ids_legacy()
|
||||
|
||||
return self._boot_ids
|
||||
|
||||
@ -163,7 +205,7 @@ class LogsControl(CoreSysAttributes):
|
||||
async def journald_logs(
|
||||
self,
|
||||
path: str = "/entries",
|
||||
params: dict[str, str | list[str]] | None = None,
|
||||
params: Mapping[str, str | list[str]] | None = None,
|
||||
range_header: str | None = None,
|
||||
accept: LogFormat = LogFormat.TEXT,
|
||||
timeout: ClientTimeout | None = None,
|
||||
@ -184,8 +226,18 @@ class LogsControl(CoreSysAttributes):
|
||||
base_url = "http://localhost/"
|
||||
connector = UnixConnector(path=str(SYSTEMD_JOURNAL_GATEWAYD_SOCKET))
|
||||
async with ClientSession(base_url=base_url, connector=connector) as session:
|
||||
headers = {ACCEPT: accept}
|
||||
headers = {ACCEPT: accept.value}
|
||||
if range_header:
|
||||
if range_header.endswith(":"):
|
||||
# Make sure that num_entries is always set - before Systemd v256 it was
|
||||
# possible to omit it, which made sense when the "follow" option was used,
|
||||
# but this syntax is now invalid and triggers HTTP 400.
|
||||
# See: https://github.com/systemd/systemd/issues/37172
|
||||
if not (matches := re.match(RE_ENTRIES_HEADER, range_header)):
|
||||
raise HostNotSupportedError(
|
||||
f"Invalid range header: {range_header}"
|
||||
)
|
||||
range_header = f"entries={matches.group('cursor')}:{matches.group('num_skip')}:{SYSTEMD_JOURNAL_GATEWAYD_LINES_MAX}"
|
||||
headers[RANGE] = range_header
|
||||
async with session.get(
|
||||
f"{path}",
|
||||
|
@ -8,11 +8,11 @@ from typing import Any
|
||||
from ..const import ATTR_HOST_INTERNET
|
||||
from ..coresys import CoreSys, CoreSysAttributes
|
||||
from ..dbus.const import (
|
||||
DBUS_ATTR_CONFIGURATION,
|
||||
DBUS_ATTR_CONNECTION_ENABLED,
|
||||
DBUS_ATTR_CONNECTIVITY,
|
||||
DBUS_ATTR_PRIMARY_CONNECTION,
|
||||
DBUS_IFACE_DNS,
|
||||
DBUS_IFACE_NM,
|
||||
DBUS_OBJECT_BASE,
|
||||
DBUS_SIGNAL_NM_CONNECTION_ACTIVE_CHANGED,
|
||||
ConnectionStateType,
|
||||
ConnectivityState,
|
||||
@ -46,6 +46,8 @@ class NetworkManager(CoreSysAttributes):
|
||||
"""Initialize system center handling."""
|
||||
self.coresys: CoreSys = coresys
|
||||
self._connectivity: bool | None = None
|
||||
# No event need on initial change (NetworkManager initializes with empty list)
|
||||
self._dns_configuration: list = []
|
||||
|
||||
@property
|
||||
def connectivity(self) -> bool | None:
|
||||
@ -87,7 +89,7 @@ class NetworkManager(CoreSysAttributes):
|
||||
for config in self.sys_dbus.network.dns.configuration:
|
||||
if config.vpn or not config.nameservers:
|
||||
continue
|
||||
servers.extend(config.nameservers)
|
||||
servers.extend([str(ns) for ns in config.nameservers])
|
||||
|
||||
return list(dict.fromkeys(servers))
|
||||
|
||||
@ -138,8 +140,12 @@ class NetworkManager(CoreSysAttributes):
|
||||
]
|
||||
)
|
||||
|
||||
self.sys_dbus.network.dbus.properties.on_properties_changed(
|
||||
self._check_connectivity_changed
|
||||
self.sys_dbus.network.dbus.properties.on(
|
||||
"properties_changed", self._check_connectivity_changed
|
||||
)
|
||||
|
||||
self.sys_dbus.network.dns.dbus.properties.on(
|
||||
"properties_changed", self._check_dns_changed
|
||||
)
|
||||
|
||||
async def _check_connectivity_changed(
|
||||
@ -152,15 +158,6 @@ class NetworkManager(CoreSysAttributes):
|
||||
connectivity_check: bool | None = changed.get(DBUS_ATTR_CONNECTION_ENABLED)
|
||||
connectivity: int | None = changed.get(DBUS_ATTR_CONNECTIVITY)
|
||||
|
||||
# This potentially updated the DNS configuration. Make sure the DNS plug-in
|
||||
# picks up the latest settings.
|
||||
if (
|
||||
DBUS_ATTR_PRIMARY_CONNECTION in changed
|
||||
and changed[DBUS_ATTR_PRIMARY_CONNECTION]
|
||||
and changed[DBUS_ATTR_PRIMARY_CONNECTION] != DBUS_OBJECT_BASE
|
||||
):
|
||||
await self.sys_plugins.dns.restart()
|
||||
|
||||
if (
|
||||
connectivity_check is True
|
||||
or DBUS_ATTR_CONNECTION_ENABLED in invalidated
|
||||
@ -174,6 +171,20 @@ class NetworkManager(CoreSysAttributes):
|
||||
elif connectivity is not None:
|
||||
self.connectivity = connectivity == ConnectivityState.CONNECTIVITY_FULL
|
||||
|
||||
async def _check_dns_changed(
|
||||
self, interface: str, changed: dict[str, Any], invalidated: list[str]
|
||||
):
|
||||
"""Check if DNS properties have changed."""
|
||||
if interface != DBUS_IFACE_DNS:
|
||||
return
|
||||
|
||||
if (
|
||||
DBUS_ATTR_CONFIGURATION in changed
|
||||
and self._dns_configuration != changed[DBUS_ATTR_CONFIGURATION]
|
||||
):
|
||||
self._dns_configuration = changed[DBUS_ATTR_CONFIGURATION]
|
||||
self.sys_plugins.dns.notify_locals_changed()
|
||||
|
||||
async def update(self, *, force_connectivity_check: bool = False):
|
||||
"""Update properties over dbus."""
|
||||
_LOGGER.info("Updating local network information")
|
||||
@ -196,10 +207,16 @@ class NetworkManager(CoreSysAttributes):
|
||||
with suppress(NetworkInterfaceNotFound):
|
||||
inet = self.sys_dbus.network.get(interface.name)
|
||||
|
||||
con: NetworkConnection = None
|
||||
con: NetworkConnection | None = None
|
||||
|
||||
# Update exist configuration
|
||||
if inet and interface.equals_dbus_interface(inet) and interface.enabled:
|
||||
if (
|
||||
inet
|
||||
and inet.settings
|
||||
and inet.settings.connection
|
||||
and interface.equals_dbus_interface(inet)
|
||||
and interface.enabled
|
||||
):
|
||||
_LOGGER.debug("Updating existing configuration for %s", interface.name)
|
||||
settings = get_connection_from_interface(
|
||||
interface,
|
||||
@ -210,12 +227,12 @@ class NetworkManager(CoreSysAttributes):
|
||||
|
||||
try:
|
||||
await inet.settings.update(settings)
|
||||
con = await self.sys_dbus.network.activate_connection(
|
||||
con = activated = await self.sys_dbus.network.activate_connection(
|
||||
inet.settings.object_path, inet.object_path
|
||||
)
|
||||
_LOGGER.debug(
|
||||
"activate_connection returns %s",
|
||||
con.object_path,
|
||||
activated.object_path,
|
||||
)
|
||||
except DBusError as err:
|
||||
raise HostNetworkError(
|
||||
@ -235,12 +252,16 @@ class NetworkManager(CoreSysAttributes):
|
||||
settings = get_connection_from_interface(interface, self.sys_dbus.network)
|
||||
|
||||
try:
|
||||
settings, con = await self.sys_dbus.network.add_and_activate_connection(
|
||||
(
|
||||
settings,
|
||||
activated,
|
||||
) = await self.sys_dbus.network.add_and_activate_connection(
|
||||
settings, inet.object_path
|
||||
)
|
||||
con = activated
|
||||
_LOGGER.debug(
|
||||
"add_and_activate_connection returns %s",
|
||||
con.object_path,
|
||||
activated.object_path,
|
||||
)
|
||||
except DBusError as err:
|
||||
raise HostNetworkError(
|
||||
@ -276,7 +297,7 @@ class NetworkManager(CoreSysAttributes):
|
||||
)
|
||||
|
||||
if con:
|
||||
async with con.dbus.signal(
|
||||
async with con.connected_dbus.signal(
|
||||
DBUS_SIGNAL_NM_CONNECTION_ACTIVE_CHANGED
|
||||
) as signal:
|
||||
# From this point we monitor signals. However, it might be that
|
||||
@ -302,7 +323,7 @@ class NetworkManager(CoreSysAttributes):
|
||||
"""Scan on Interface for AccessPoint."""
|
||||
inet = self.sys_dbus.network.get(interface.name)
|
||||
|
||||
if inet.type != DeviceType.WIRELESS:
|
||||
if inet.type != DeviceType.WIRELESS or not inet.wireless:
|
||||
raise HostNotSupportedError(
|
||||
f"Can only scan with wireless card - {interface.name}", _LOGGER.error
|
||||
)
|
||||
|
@ -12,6 +12,7 @@ from .const import (
|
||||
ATTR_SESSION_DATA,
|
||||
FILE_HASSIO_INGRESS,
|
||||
IngressSessionData,
|
||||
IngressSessionDataDict,
|
||||
)
|
||||
from .coresys import CoreSys, CoreSysAttributes
|
||||
from .utils import check_port
|
||||
@ -35,7 +36,7 @@ class Ingress(FileConfiguration, CoreSysAttributes):
|
||||
"""Return addon they have this ingress token."""
|
||||
if token not in self.tokens:
|
||||
return None
|
||||
return self.sys_addons.get(self.tokens[token], local_only=True)
|
||||
return self.sys_addons.get_local_only(self.tokens[token])
|
||||
|
||||
def get_session_data(self, session_id: str) -> IngressSessionData | None:
|
||||
"""Return complementary data of current session or None."""
|
||||
@ -49,7 +50,7 @@ class Ingress(FileConfiguration, CoreSysAttributes):
|
||||
return self._data[ATTR_SESSION]
|
||||
|
||||
@property
|
||||
def sessions_data(self) -> dict[str, dict[str, str | None]]:
|
||||
def sessions_data(self) -> dict[str, IngressSessionDataDict]:
|
||||
"""Return sessions_data."""
|
||||
return self._data[ATTR_SESSION_DATA]
|
||||
|
||||
@ -89,7 +90,7 @@ class Ingress(FileConfiguration, CoreSysAttributes):
|
||||
now = utcnow()
|
||||
|
||||
sessions = {}
|
||||
sessions_data: dict[str, dict[str, str | None]] = {}
|
||||
sessions_data: dict[str, IngressSessionDataDict] = {}
|
||||
for session, valid in self.sessions.items():
|
||||
# check if timestamp valid, to avoid crash on malformed timestamp
|
||||
try:
|
||||
@ -118,7 +119,8 @@ class Ingress(FileConfiguration, CoreSysAttributes):
|
||||
|
||||
# Read all ingress token and build a map
|
||||
for addon in self.addons:
|
||||
self.tokens[addon.ingress_token] = addon.slug
|
||||
if addon.ingress_token:
|
||||
self.tokens[addon.ingress_token] = addon.slug
|
||||
|
||||
def create_session(self, data: IngressSessionData | None = None) -> str:
|
||||
"""Create new session."""
|
||||
@ -141,7 +143,7 @@ class Ingress(FileConfiguration, CoreSysAttributes):
|
||||
try:
|
||||
valid_until = utc_from_timestamp(self.sessions[session])
|
||||
except OverflowError:
|
||||
self.sessions[session] = utcnow() + timedelta(minutes=15)
|
||||
self.sessions[session] = (utcnow() + timedelta(minutes=15)).timestamp()
|
||||
return True
|
||||
|
||||
# Is still valid?
|
||||
|
@ -1,13 +1,13 @@
|
||||
"""Supervisor job manager."""
|
||||
|
||||
import asyncio
|
||||
from collections.abc import Awaitable, Callable
|
||||
from contextlib import contextmanager
|
||||
from collections.abc import Callable, Coroutine, Generator
|
||||
from contextlib import contextmanager, suppress
|
||||
from contextvars import Context, ContextVar, Token
|
||||
from dataclasses import dataclass
|
||||
from datetime import datetime
|
||||
import logging
|
||||
from typing import Any
|
||||
from typing import Any, Self
|
||||
from uuid import uuid4
|
||||
|
||||
from attrs import Attribute, define, field
|
||||
@ -27,7 +27,7 @@ from .validate import SCHEMA_JOBS_CONFIG
|
||||
# When a new asyncio task is started the current context is copied over.
|
||||
# Modifications to it in one task are not visible to others though.
|
||||
# This allows us to track what job is currently in progress in each task.
|
||||
_CURRENT_JOB: ContextVar[str] = ContextVar("current_job")
|
||||
_CURRENT_JOB: ContextVar[str | None] = ContextVar("current_job", default=None)
|
||||
|
||||
_LOGGER: logging.Logger = logging.getLogger(__name__)
|
||||
|
||||
@ -75,7 +75,7 @@ class SupervisorJobError:
|
||||
message: str = "Unknown error, see supervisor logs"
|
||||
stage: str | None = None
|
||||
|
||||
def as_dict(self) -> dict[str, str]:
|
||||
def as_dict(self) -> dict[str, str | None]:
|
||||
"""Return dictionary representation."""
|
||||
return {
|
||||
"type": self.type_.__name__,
|
||||
@ -101,9 +101,7 @@ class SupervisorJob:
|
||||
stage: str | None = field(
|
||||
default=None, validator=[_invalid_if_done], on_setattr=_on_change
|
||||
)
|
||||
parent_id: str | None = field(
|
||||
factory=lambda: _CURRENT_JOB.get(None), on_setattr=frozen
|
||||
)
|
||||
parent_id: str | None = field(factory=_CURRENT_JOB.get, on_setattr=frozen)
|
||||
done: bool | None = field(init=False, default=None, on_setattr=_on_change)
|
||||
on_change: Callable[["SupervisorJob", Attribute, Any], None] | None = field(
|
||||
default=None, on_setattr=frozen
|
||||
@ -137,7 +135,7 @@ class SupervisorJob:
|
||||
self.errors += [new_error]
|
||||
|
||||
@contextmanager
|
||||
def start(self):
|
||||
def start(self) -> Generator[Self]:
|
||||
"""Start the job in the current task.
|
||||
|
||||
This can only be called if the parent ID matches the job running in the current task.
|
||||
@ -146,11 +144,11 @@ class SupervisorJob:
|
||||
"""
|
||||
if self.done is not None:
|
||||
raise JobStartException("Job has already been started")
|
||||
if _CURRENT_JOB.get(None) != self.parent_id:
|
||||
if _CURRENT_JOB.get() != self.parent_id:
|
||||
raise JobStartException("Job has a different parent from current job")
|
||||
|
||||
self.done = False
|
||||
token: Token[str] | None = None
|
||||
token: Token[str | None] | None = None
|
||||
try:
|
||||
token = _CURRENT_JOB.set(self.uuid)
|
||||
yield self
|
||||
@ -193,17 +191,15 @@ class JobManager(FileConfiguration, CoreSysAttributes):
|
||||
|
||||
Must be called from within a job. Raises RuntimeError if there is no current job.
|
||||
"""
|
||||
try:
|
||||
return self.get_job(_CURRENT_JOB.get())
|
||||
except (LookupError, JobNotFound):
|
||||
raise RuntimeError(
|
||||
"No job for the current asyncio task!", _LOGGER.critical
|
||||
) from None
|
||||
if job_id := _CURRENT_JOB.get():
|
||||
with suppress(JobNotFound):
|
||||
return self.get_job(job_id)
|
||||
raise RuntimeError("No job for the current asyncio task!", _LOGGER.critical)
|
||||
|
||||
@property
|
||||
def is_job(self) -> bool:
|
||||
"""Return true if there is an active job for the current asyncio task."""
|
||||
return bool(_CURRENT_JOB.get(None))
|
||||
return _CURRENT_JOB.get() is not None
|
||||
|
||||
def _notify_on_job_change(
|
||||
self, job: SupervisorJob, attribute: Attribute, value: Any
|
||||
@ -265,7 +261,7 @@ class JobManager(FileConfiguration, CoreSysAttributes):
|
||||
|
||||
def schedule_job(
|
||||
self,
|
||||
job_method: Callable[..., Awaitable[Any]],
|
||||
job_method: Callable[..., Coroutine],
|
||||
options: JobSchedulerOptions,
|
||||
*args,
|
||||
**kwargs,
|
||||
|
@ -1,12 +1,12 @@
|
||||
"""Job decorator."""
|
||||
|
||||
import asyncio
|
||||
from collections.abc import Callable
|
||||
from collections.abc import Awaitable, Callable
|
||||
from contextlib import suppress
|
||||
from datetime import datetime, timedelta
|
||||
from functools import wraps
|
||||
import logging
|
||||
from typing import Any
|
||||
from typing import Any, cast
|
||||
|
||||
from ..const import CoreState
|
||||
from ..coresys import CoreSys, CoreSysAttributes
|
||||
@ -43,7 +43,22 @@ class Job(CoreSysAttributes):
|
||||
throttle_max_calls: int | None = None,
|
||||
internal: bool = False,
|
||||
):
|
||||
"""Initialize the Job class."""
|
||||
"""Initialize the Job decorator.
|
||||
|
||||
Args:
|
||||
name (str): Unique name for the job. Must not be duplicated.
|
||||
conditions (list[JobCondition] | None): List of conditions that must be met before the job runs.
|
||||
cleanup (bool): Whether to clean up the job after execution. Defaults to True. If set to False, the job will remain accessible through the Supervisor API until the next restart.
|
||||
on_condition (type[JobException] | None): Exception type to raise if a job condition fails. If None, logs the failure.
|
||||
limit (JobExecutionLimit | None): Execution limit policy for the job (e.g., throttle, once, group-based).
|
||||
throttle_period (timedelta | Callable | None): Throttle period as a timedelta or a callable returning a timedelta (for rate-limited jobs).
|
||||
throttle_max_calls (int | None): Maximum number of calls allowed within the throttle period (for rate-limited jobs).
|
||||
internal (bool): Whether the job is internal (not exposed through the Supervisor API). Defaults to False.
|
||||
|
||||
Raises:
|
||||
RuntimeError: If job name is not unique, or required throttle parameters are missing for the selected limit.
|
||||
|
||||
"""
|
||||
if name in _JOB_NAMES:
|
||||
raise RuntimeError(f"A job already exists with name {name}!")
|
||||
|
||||
@ -54,11 +69,10 @@ class Job(CoreSysAttributes):
|
||||
self.on_condition = on_condition
|
||||
self.limit = limit
|
||||
self._throttle_period = throttle_period
|
||||
self.throttle_max_calls = throttle_max_calls
|
||||
self._throttle_max_calls = throttle_max_calls
|
||||
self._lock: asyncio.Semaphore | None = None
|
||||
self._method = None
|
||||
self._last_call: dict[str | None, datetime] = {}
|
||||
self._rate_limited_calls: dict[str, list[datetime]] | None = None
|
||||
self._rate_limited_calls: dict[str | None, list[datetime]] | None = None
|
||||
self._internal = internal
|
||||
|
||||
# Validate Options
|
||||
@ -82,13 +96,29 @@ class Job(CoreSysAttributes):
|
||||
JobExecutionLimit.THROTTLE_RATE_LIMIT,
|
||||
JobExecutionLimit.GROUP_THROTTLE_RATE_LIMIT,
|
||||
):
|
||||
if self.throttle_max_calls is None:
|
||||
if self._throttle_max_calls is None:
|
||||
raise RuntimeError(
|
||||
f"Job {name} is using execution limit {limit} without throttle max calls!"
|
||||
)
|
||||
|
||||
self._rate_limited_calls = {}
|
||||
|
||||
@property
|
||||
def throttle_max_calls(self) -> int:
|
||||
"""Return max calls for throttle."""
|
||||
if self._throttle_max_calls is None:
|
||||
raise RuntimeError("No throttle max calls set for job!")
|
||||
return self._throttle_max_calls
|
||||
|
||||
@property
|
||||
def lock(self) -> asyncio.Semaphore:
|
||||
"""Return lock for limits."""
|
||||
# asyncio.Semaphore objects must be created in event loop
|
||||
# Since this is sync code it is not safe to create if missing here
|
||||
if not self._lock:
|
||||
raise RuntimeError("Lock has not been created yet!")
|
||||
return self._lock
|
||||
|
||||
def last_call(self, group_name: str | None = None) -> datetime:
|
||||
"""Return last call datetime."""
|
||||
return self._last_call.get(group_name, datetime.min)
|
||||
@ -97,12 +127,12 @@ class Job(CoreSysAttributes):
|
||||
"""Set last call datetime."""
|
||||
self._last_call[group_name] = value
|
||||
|
||||
def rate_limited_calls(
|
||||
self, group_name: str | None = None
|
||||
) -> list[datetime] | None:
|
||||
def rate_limited_calls(self, group_name: str | None = None) -> list[datetime]:
|
||||
"""Return rate limited calls if used."""
|
||||
if self._rate_limited_calls is None:
|
||||
return None
|
||||
raise RuntimeError(
|
||||
f"Rate limited calls not available for limit type {self.limit}"
|
||||
)
|
||||
|
||||
return self._rate_limited_calls.get(group_name, [])
|
||||
|
||||
@ -131,10 +161,10 @@ class Job(CoreSysAttributes):
|
||||
|
||||
self._rate_limited_calls[group_name] = value
|
||||
|
||||
def throttle_period(self, group_name: str | None = None) -> timedelta | None:
|
||||
def throttle_period(self, group_name: str | None = None) -> timedelta:
|
||||
"""Return throttle period."""
|
||||
if self._throttle_period is None:
|
||||
return None
|
||||
raise RuntimeError("No throttle period set for Job!")
|
||||
|
||||
if isinstance(self._throttle_period, timedelta):
|
||||
return self._throttle_period
|
||||
@ -142,7 +172,7 @@ class Job(CoreSysAttributes):
|
||||
return self._throttle_period(
|
||||
self.coresys,
|
||||
self.last_call(group_name),
|
||||
self.rate_limited_calls(group_name),
|
||||
self.rate_limited_calls(group_name) if self._rate_limited_calls else None,
|
||||
)
|
||||
|
||||
def _post_init(self, obj: JobGroup | CoreSysAttributes) -> JobGroup | None:
|
||||
@ -158,12 +188,12 @@ class Job(CoreSysAttributes):
|
||||
self._lock = asyncio.Semaphore()
|
||||
|
||||
# Job groups
|
||||
try:
|
||||
is_job_group = obj.acquire and obj.release
|
||||
except AttributeError:
|
||||
is_job_group = False
|
||||
job_group: JobGroup | None = None
|
||||
with suppress(AttributeError):
|
||||
if obj.acquire and obj.release: # type: ignore
|
||||
job_group = cast(JobGroup, obj)
|
||||
|
||||
if not is_job_group and self.limit in (
|
||||
if not job_group and self.limit in (
|
||||
JobExecutionLimit.GROUP_ONCE,
|
||||
JobExecutionLimit.GROUP_WAIT,
|
||||
JobExecutionLimit.GROUP_THROTTLE,
|
||||
@ -174,7 +204,7 @@ class Job(CoreSysAttributes):
|
||||
f"Job on {self.name} need to be a JobGroup to use group based limits!"
|
||||
) from None
|
||||
|
||||
return obj if is_job_group else None
|
||||
return job_group
|
||||
|
||||
def _handle_job_condition_exception(self, err: JobConditionException) -> None:
|
||||
"""Handle a job condition failure."""
|
||||
@ -184,9 +214,8 @@ class Job(CoreSysAttributes):
|
||||
return
|
||||
raise self.on_condition(error_msg, _LOGGER.warning) from None
|
||||
|
||||
def __call__(self, method):
|
||||
def __call__(self, method: Callable[..., Awaitable]):
|
||||
"""Call the wrapper logic."""
|
||||
self._method = method
|
||||
|
||||
@wraps(method)
|
||||
async def wrapper(
|
||||
@ -221,7 +250,7 @@ class Job(CoreSysAttributes):
|
||||
if self.conditions:
|
||||
try:
|
||||
await Job.check_conditions(
|
||||
self, set(self.conditions), self._method.__qualname__
|
||||
self, set(self.conditions), method.__qualname__
|
||||
)
|
||||
except JobConditionException as err:
|
||||
return self._handle_job_condition_exception(err)
|
||||
@ -237,7 +266,7 @@ class Job(CoreSysAttributes):
|
||||
JobExecutionLimit.GROUP_WAIT,
|
||||
):
|
||||
try:
|
||||
await obj.acquire(
|
||||
await cast(JobGroup, job_group).acquire(
|
||||
job, self.limit == JobExecutionLimit.GROUP_WAIT
|
||||
)
|
||||
except JobGroupExecutionLimitExceeded as err:
|
||||
@ -296,12 +325,12 @@ class Job(CoreSysAttributes):
|
||||
with job.start():
|
||||
try:
|
||||
self.set_last_call(datetime.now(), group_name)
|
||||
if self.rate_limited_calls(group_name) is not None:
|
||||
if self._rate_limited_calls is not None:
|
||||
self.add_rate_limited_call(
|
||||
self.last_call(group_name), group_name
|
||||
)
|
||||
|
||||
return await self._method(obj, *args, **kwargs)
|
||||
return await method(obj, *args, **kwargs)
|
||||
|
||||
# If a method has a conditional JobCondition, they must check it in the method
|
||||
# These should be handled like normal JobConditions as much as possible
|
||||
@ -317,11 +346,11 @@ class Job(CoreSysAttributes):
|
||||
raise JobException() from err
|
||||
finally:
|
||||
self._release_exception_limits()
|
||||
if self.limit in (
|
||||
if job_group and self.limit in (
|
||||
JobExecutionLimit.GROUP_ONCE,
|
||||
JobExecutionLimit.GROUP_WAIT,
|
||||
):
|
||||
obj.release()
|
||||
job_group.release()
|
||||
|
||||
# Jobs that weren't started are always cleaned up. Also clean up done jobs if required
|
||||
finally:
|
||||
@ -473,13 +502,13 @@ class Job(CoreSysAttributes):
|
||||
):
|
||||
return
|
||||
|
||||
if self.limit == JobExecutionLimit.ONCE and self._lock.locked():
|
||||
if self.limit == JobExecutionLimit.ONCE and self.lock.locked():
|
||||
on_condition = (
|
||||
JobException if self.on_condition is None else self.on_condition
|
||||
)
|
||||
raise on_condition("Another job is running")
|
||||
|
||||
await self._lock.acquire()
|
||||
await self.lock.acquire()
|
||||
|
||||
def _release_exception_limits(self) -> None:
|
||||
"""Release possible exception limits."""
|
||||
@ -490,4 +519,4 @@ class Job(CoreSysAttributes):
|
||||
JobExecutionLimit.GROUP_THROTTLE_WAIT,
|
||||
):
|
||||
return
|
||||
self._lock.release()
|
||||
self.lock.release()
|
||||
|
@ -41,7 +41,7 @@ class JobGroup(CoreSysAttributes):
|
||||
def has_lock(self) -> bool:
|
||||
"""Return true if current task has the lock on this job group."""
|
||||
return (
|
||||
self.active_job
|
||||
self.active_job is not None
|
||||
and self.sys_jobs.is_job
|
||||
and self.active_job == self.sys_jobs.current
|
||||
)
|
||||
|
@ -3,11 +3,13 @@
|
||||
import ipaddress
|
||||
import os
|
||||
import re
|
||||
from typing import cast
|
||||
|
||||
from aiohttp import hdrs
|
||||
import attr
|
||||
from sentry_sdk.types import Event, Hint
|
||||
|
||||
from ..const import DOCKER_NETWORK_MASK, HEADER_TOKEN, HEADER_TOKEN_OLD, CoreState
|
||||
from ..const import DOCKER_IPV4_NETWORK_MASK, HEADER_TOKEN, HEADER_TOKEN_OLD, CoreState
|
||||
from ..coresys import CoreSys
|
||||
from ..exceptions import AddonConfigurationError
|
||||
|
||||
@ -19,7 +21,7 @@ def sanitize_host(host: str) -> str:
|
||||
try:
|
||||
# Allow internal URLs
|
||||
ip = ipaddress.ip_address(host)
|
||||
if ip in ipaddress.ip_network(DOCKER_NETWORK_MASK):
|
||||
if ip in ipaddress.ip_network(DOCKER_IPV4_NETWORK_MASK):
|
||||
return host
|
||||
except ValueError:
|
||||
pass
|
||||
@ -39,7 +41,7 @@ def sanitize_url(url: str) -> str:
|
||||
return f"{match.group(1)}{host}{match.group(3)}"
|
||||
|
||||
|
||||
def filter_data(coresys: CoreSys, event: dict, hint: dict) -> dict:
|
||||
def filter_data(coresys: CoreSys, event: Event, hint: Hint) -> Event | None:
|
||||
"""Filter event data before sending to sentry."""
|
||||
# Ignore some exceptions
|
||||
if "exc_info" in hint:
|
||||
@ -53,11 +55,12 @@ def filter_data(coresys: CoreSys, event: dict, hint: dict) -> dict:
|
||||
|
||||
event.setdefault("extra", {}).update({"os.environ": dict(os.environ)})
|
||||
event.setdefault("user", {}).update({"id": coresys.machine_id})
|
||||
event.setdefault("tags", {}).update(
|
||||
{
|
||||
"machine": coresys.machine,
|
||||
}
|
||||
)
|
||||
if coresys.machine:
|
||||
event.setdefault("tags", {}).update(
|
||||
{
|
||||
"machine": coresys.machine,
|
||||
}
|
||||
)
|
||||
|
||||
# Not full startup - missing information
|
||||
if coresys.core.state in (CoreState.INITIALIZE, CoreState.SETUP):
|
||||
@ -122,22 +125,22 @@ def filter_data(coresys: CoreSys, event: dict, hint: dict) -> dict:
|
||||
}
|
||||
)
|
||||
|
||||
if event.get("request"):
|
||||
if event["request"].get("url"):
|
||||
event["request"]["url"] = sanitize_url(event["request"]["url"])
|
||||
if request := event.get("request"):
|
||||
if request.get("url"):
|
||||
request["url"] = sanitize_url(cast(str, request["url"]))
|
||||
|
||||
headers = event["request"].get("headers", {})
|
||||
if hdrs.REFERER in headers:
|
||||
headers[hdrs.REFERER] = sanitize_url(headers[hdrs.REFERER])
|
||||
if HEADER_TOKEN in headers:
|
||||
headers[HEADER_TOKEN] = "XXXXXXXXXXXXXXXXXXX"
|
||||
if HEADER_TOKEN_OLD in headers:
|
||||
headers[HEADER_TOKEN_OLD] = "XXXXXXXXXXXXXXXXXXX"
|
||||
if hdrs.HOST in headers:
|
||||
headers[hdrs.HOST] = sanitize_host(headers[hdrs.HOST])
|
||||
if hdrs.X_FORWARDED_HOST in headers:
|
||||
headers[hdrs.X_FORWARDED_HOST] = sanitize_host(
|
||||
headers[hdrs.X_FORWARDED_HOST]
|
||||
)
|
||||
if headers := cast(dict, request.get("headers")):
|
||||
if hdrs.REFERER in headers:
|
||||
headers[hdrs.REFERER] = sanitize_url(headers[hdrs.REFERER])
|
||||
if HEADER_TOKEN in headers:
|
||||
headers[HEADER_TOKEN] = "XXXXXXXXXXXXXXXXXXX"
|
||||
if HEADER_TOKEN_OLD in headers:
|
||||
headers[HEADER_TOKEN_OLD] = "XXXXXXXXXXXXXXXXXXX"
|
||||
if hdrs.HOST in headers:
|
||||
headers[hdrs.HOST] = sanitize_host(headers[hdrs.HOST])
|
||||
if hdrs.X_FORWARDED_HOST in headers:
|
||||
headers[hdrs.X_FORWARDED_HOST] = sanitize_host(
|
||||
headers[hdrs.X_FORWARDED_HOST]
|
||||
)
|
||||
|
||||
return event
|
||||
|
@ -1,13 +1,12 @@
|
||||
"""A collection of tasks."""
|
||||
|
||||
import asyncio
|
||||
from collections.abc import Awaitable
|
||||
from datetime import datetime, timedelta
|
||||
import logging
|
||||
from typing import cast
|
||||
|
||||
from ..addons.const import ADDON_UPDATE_CONDITIONS
|
||||
from ..backups.const import LOCATION_CLOUD_BACKUP
|
||||
from ..const import AddonState
|
||||
from ..backups.const import LOCATION_CLOUD_BACKUP, LOCATION_TYPE
|
||||
from ..const import ATTR_TYPE, AddonState
|
||||
from ..coresys import CoreSysAttributes
|
||||
from ..exceptions import (
|
||||
AddonsError,
|
||||
@ -15,7 +14,7 @@ from ..exceptions import (
|
||||
HomeAssistantError,
|
||||
ObserverError,
|
||||
)
|
||||
from ..homeassistant.const import LANDINGPAGE
|
||||
from ..homeassistant.const import LANDINGPAGE, WSType
|
||||
from ..jobs.decorator import Job, JobCondition, JobExecutionLimit
|
||||
from ..plugins.const import PLUGIN_UPDATE_CONDITIONS
|
||||
from ..utils.dt import utcnow
|
||||
@ -106,7 +105,6 @@ class Tasks(CoreSysAttributes):
|
||||
)
|
||||
async def _update_addons(self):
|
||||
"""Check if an update is available for an Add-on and update it."""
|
||||
start_tasks: list[Awaitable[None]] = []
|
||||
for addon in self.sys_addons.all:
|
||||
if not addon.is_installed or not addon.auto_update:
|
||||
continue
|
||||
@ -124,6 +122,12 @@ class Tasks(CoreSysAttributes):
|
||||
continue
|
||||
# Delay auto-updates for a day in case of issues
|
||||
if utcnow() < addon.latest_version_timestamp + timedelta(days=1):
|
||||
_LOGGER.debug(
|
||||
"Not updating add-on %s from %s to %s as the latest version is less than a day old",
|
||||
addon.slug,
|
||||
addon.version,
|
||||
addon.latest_version,
|
||||
)
|
||||
continue
|
||||
if not addon.test_update_schema():
|
||||
_LOGGER.warning(
|
||||
@ -131,16 +135,21 @@ class Tasks(CoreSysAttributes):
|
||||
)
|
||||
continue
|
||||
|
||||
# Run Add-on update sequential
|
||||
# avoid issue on slow IO
|
||||
_LOGGER.info("Add-on auto update process %s", addon.slug)
|
||||
try:
|
||||
if start_task := await self.sys_addons.update(addon.slug, backup=True):
|
||||
start_tasks.append(start_task)
|
||||
except AddonsError:
|
||||
_LOGGER.error("Can't auto update Add-on %s", addon.slug)
|
||||
|
||||
await asyncio.gather(*start_tasks)
|
||||
# Call Home Assistant Core to update add-on to make sure that backups
|
||||
# get created through the Home Assistant Core API (categorized correctly).
|
||||
# Ultimately auto updates should be handled by Home Assistant Core itself
|
||||
# through a update entity feature.
|
||||
message = {
|
||||
ATTR_TYPE: WSType.HASSIO_UPDATE_ADDON,
|
||||
"addon": addon.slug,
|
||||
"backup": True,
|
||||
}
|
||||
_LOGGER.debug(
|
||||
"Sending update add-on WebSocket command to Home Assistant Core: %s",
|
||||
message,
|
||||
)
|
||||
await self.sys_homeassistant.websocket.async_send_command(message)
|
||||
|
||||
@Job(
|
||||
name="tasks_update_supervisor",
|
||||
@ -370,6 +379,8 @@ class Tasks(CoreSysAttributes):
|
||||
]
|
||||
for backup in old_backups:
|
||||
try:
|
||||
await self.sys_backups.remove(backup, [LOCATION_CLOUD_BACKUP])
|
||||
await self.sys_backups.remove(
|
||||
backup, [cast(LOCATION_TYPE, LOCATION_CLOUD_BACKUP)]
|
||||
)
|
||||
except BackupFileNotFoundError as err:
|
||||
_LOGGER.debug("Can't remove backup %s: %s", backup.slug, err)
|
||||
|
@ -56,7 +56,7 @@ class MountManager(FileConfiguration, CoreSysAttributes):
|
||||
async def load_config(self) -> Self:
|
||||
"""Load config in executor."""
|
||||
await super().load_config()
|
||||
self._mounts: dict[str, Mount] = {
|
||||
self._mounts = {
|
||||
mount[ATTR_NAME]: Mount.from_dict(self.coresys, mount)
|
||||
for mount in self._data[ATTR_MOUNTS]
|
||||
}
|
||||
@ -172,12 +172,12 @@ class MountManager(FileConfiguration, CoreSysAttributes):
|
||||
errors = await asyncio.gather(*mount_tasks, return_exceptions=True)
|
||||
|
||||
for i in range(len(errors)): # pylint: disable=consider-using-enumerate
|
||||
if not errors[i]:
|
||||
if not (err := errors[i]):
|
||||
continue
|
||||
if mounts[i].failed_issue in self.sys_resolution.issues:
|
||||
continue
|
||||
if not isinstance(errors[i], MountError):
|
||||
await async_capture_exception(errors[i])
|
||||
if not isinstance(err, MountError):
|
||||
await async_capture_exception(err)
|
||||
|
||||
self.sys_resolution.add_issue(
|
||||
evolve(mounts[i].failed_issue),
|
||||
@ -219,7 +219,7 @@ class MountManager(FileConfiguration, CoreSysAttributes):
|
||||
conditions=[JobCondition.MOUNT_AVAILABLE],
|
||||
on_condition=MountJobError,
|
||||
)
|
||||
async def remove_mount(self, name: str, *, retain_entry: bool = False) -> None:
|
||||
async def remove_mount(self, name: str, *, retain_entry: bool = False) -> Mount:
|
||||
"""Remove a mount."""
|
||||
# Add mount name to job
|
||||
self.sys_jobs.current.reference = name
|
||||
|
@ -2,6 +2,7 @@
|
||||
|
||||
from abc import ABC, abstractmethod
|
||||
import asyncio
|
||||
from collections.abc import Callable
|
||||
from functools import cached_property
|
||||
import logging
|
||||
from pathlib import Path, PurePath
|
||||
@ -9,14 +10,6 @@ from pathlib import Path, PurePath
|
||||
from dbus_fast import Variant
|
||||
from voluptuous import Coerce
|
||||
|
||||
from ..const import (
|
||||
ATTR_NAME,
|
||||
ATTR_PASSWORD,
|
||||
ATTR_PORT,
|
||||
ATTR_TYPE,
|
||||
ATTR_USERNAME,
|
||||
ATTR_VERSION,
|
||||
)
|
||||
from ..coresys import CoreSys, CoreSysAttributes
|
||||
from ..dbus.const import (
|
||||
DBUS_ATTR_ACTIVE_STATE,
|
||||
@ -41,22 +34,13 @@ from ..exceptions import (
|
||||
from ..resolution.const import ContextType, IssueType
|
||||
from ..resolution.data import Issue
|
||||
from ..utils.sentry import async_capture_exception
|
||||
from .const import (
|
||||
ATTR_PATH,
|
||||
ATTR_READ_ONLY,
|
||||
ATTR_SERVER,
|
||||
ATTR_SHARE,
|
||||
ATTR_USAGE,
|
||||
MountCifsVersion,
|
||||
MountType,
|
||||
MountUsage,
|
||||
)
|
||||
from .const import MountCifsVersion, MountType, MountUsage
|
||||
from .validate import MountData
|
||||
|
||||
_LOGGER: logging.Logger = logging.getLogger(__name__)
|
||||
|
||||
COERCE_MOUNT_TYPE = Coerce(MountType)
|
||||
COERCE_MOUNT_USAGE = Coerce(MountUsage)
|
||||
COERCE_MOUNT_TYPE: Callable[[str], MountType] = Coerce(MountType)
|
||||
COERCE_MOUNT_USAGE: Callable[[str], MountUsage] = Coerce(MountUsage)
|
||||
|
||||
|
||||
class Mount(CoreSysAttributes, ABC):
|
||||
@ -80,7 +64,7 @@ class Mount(CoreSysAttributes, ABC):
|
||||
if cls not in [Mount, NetworkMount]:
|
||||
return cls(coresys, data)
|
||||
|
||||
type_ = COERCE_MOUNT_TYPE(data[ATTR_TYPE])
|
||||
type_ = COERCE_MOUNT_TYPE(data["type"])
|
||||
if type_ == MountType.CIFS:
|
||||
return CIFSMount(coresys, data)
|
||||
if type_ == MountType.NFS:
|
||||
@ -90,32 +74,33 @@ class Mount(CoreSysAttributes, ABC):
|
||||
def to_dict(self, *, skip_secrets: bool = True) -> MountData:
|
||||
"""Return dictionary representation."""
|
||||
return MountData(
|
||||
name=self.name, type=self.type, usage=self.usage, read_only=self.read_only
|
||||
name=self.name,
|
||||
type=self.type,
|
||||
usage=self.usage and self.usage.value,
|
||||
read_only=self.read_only,
|
||||
)
|
||||
|
||||
@property
|
||||
def name(self) -> str:
|
||||
"""Get name."""
|
||||
return self._data[ATTR_NAME]
|
||||
return self._data["name"]
|
||||
|
||||
@property
|
||||
def type(self) -> MountType:
|
||||
"""Get mount type."""
|
||||
return COERCE_MOUNT_TYPE(self._data[ATTR_TYPE])
|
||||
return COERCE_MOUNT_TYPE(self._data["type"])
|
||||
|
||||
@property
|
||||
def usage(self) -> MountUsage | None:
|
||||
"""Get mount usage."""
|
||||
return (
|
||||
COERCE_MOUNT_USAGE(self._data[ATTR_USAGE])
|
||||
if ATTR_USAGE in self._data
|
||||
else None
|
||||
)
|
||||
if self._data["usage"] is None:
|
||||
return None
|
||||
return COERCE_MOUNT_USAGE(self._data["usage"])
|
||||
|
||||
@property
|
||||
def read_only(self) -> bool:
|
||||
"""Is mount read-only."""
|
||||
return self._data.get(ATTR_READ_ONLY, False)
|
||||
return self._data.get("read_only", False)
|
||||
|
||||
@property
|
||||
@abstractmethod
|
||||
@ -186,20 +171,20 @@ class Mount(CoreSysAttributes, ABC):
|
||||
async def load(self) -> None:
|
||||
"""Initialize object."""
|
||||
# If there's no mount unit, mount it to make one
|
||||
if not await self._update_unit():
|
||||
if not (unit := await self._update_unit()):
|
||||
await self.mount()
|
||||
return
|
||||
|
||||
await self._update_state_await(not_state=UnitActiveState.ACTIVATING)
|
||||
await self._update_state_await(unit, not_state=UnitActiveState.ACTIVATING)
|
||||
|
||||
# If mount is not available, try to reload it
|
||||
if not await self.is_mounted():
|
||||
await self.reload()
|
||||
|
||||
async def _update_state(self) -> UnitActiveState | None:
|
||||
async def _update_state(self, unit: SystemdUnit) -> None:
|
||||
"""Update mount unit state."""
|
||||
try:
|
||||
self._state = await self.unit.get_active_state()
|
||||
self._state = await unit.get_active_state()
|
||||
except DBusError as err:
|
||||
await async_capture_exception(err)
|
||||
raise MountError(
|
||||
@ -220,10 +205,10 @@ class Mount(CoreSysAttributes, ABC):
|
||||
|
||||
async def update(self) -> bool:
|
||||
"""Update info about mount from dbus. Return true if it is mounted and available."""
|
||||
if not await self._update_unit():
|
||||
if not (unit := await self._update_unit()):
|
||||
return False
|
||||
|
||||
await self._update_state()
|
||||
await self._update_state(unit)
|
||||
|
||||
# If active, dismiss corresponding failed mount issue if found
|
||||
if (
|
||||
@ -235,16 +220,14 @@ class Mount(CoreSysAttributes, ABC):
|
||||
|
||||
async def _update_state_await(
|
||||
self,
|
||||
unit: SystemdUnit,
|
||||
expected_states: list[UnitActiveState] | None = None,
|
||||
not_state: UnitActiveState = UnitActiveState.ACTIVATING,
|
||||
) -> None:
|
||||
"""Update state info about mount from dbus. Wait for one of expected_states to appear or state to change from not_state."""
|
||||
if not self.unit:
|
||||
return
|
||||
|
||||
try:
|
||||
async with asyncio.timeout(30), self.unit.properties_changed() as signal:
|
||||
await self._update_state()
|
||||
async with asyncio.timeout(30), unit.properties_changed() as signal:
|
||||
await self._update_state(unit)
|
||||
while (
|
||||
expected_states
|
||||
and self.state not in expected_states
|
||||
@ -312,8 +295,8 @@ class Mount(CoreSysAttributes, ABC):
|
||||
f"Could not mount {self.name} due to: {err!s}", _LOGGER.error
|
||||
) from err
|
||||
|
||||
if await self._update_unit():
|
||||
await self._update_state_await(not_state=UnitActiveState.ACTIVATING)
|
||||
if unit := await self._update_unit():
|
||||
await self._update_state_await(unit, not_state=UnitActiveState.ACTIVATING)
|
||||
|
||||
if not await self.is_mounted():
|
||||
raise MountActivationError(
|
||||
@ -323,17 +306,17 @@ class Mount(CoreSysAttributes, ABC):
|
||||
|
||||
async def unmount(self) -> None:
|
||||
"""Unmount using systemd."""
|
||||
if not await self._update_unit():
|
||||
if not (unit := await self._update_unit()):
|
||||
_LOGGER.info("Mount %s is not mounted, skipping unmount", self.name)
|
||||
return
|
||||
|
||||
await self._update_state()
|
||||
await self._update_state(unit)
|
||||
try:
|
||||
if self.state != UnitActiveState.FAILED:
|
||||
await self.sys_dbus.systemd.stop_unit(self.unit_name, StopUnitMode.FAIL)
|
||||
|
||||
await self._update_state_await(
|
||||
[UnitActiveState.INACTIVE, UnitActiveState.FAILED]
|
||||
unit, [UnitActiveState.INACTIVE, UnitActiveState.FAILED]
|
||||
)
|
||||
|
||||
if self.state == UnitActiveState.FAILED:
|
||||
@ -360,8 +343,10 @@ class Mount(CoreSysAttributes, ABC):
|
||||
f"Could not reload mount {self.name} due to: {err!s}", _LOGGER.error
|
||||
) from err
|
||||
else:
|
||||
if await self._update_unit():
|
||||
await self._update_state_await(not_state=UnitActiveState.ACTIVATING)
|
||||
if unit := await self._update_unit():
|
||||
await self._update_state_await(
|
||||
unit, not_state=UnitActiveState.ACTIVATING
|
||||
)
|
||||
|
||||
if not await self.is_mounted():
|
||||
raise MountActivationError(
|
||||
@ -381,18 +366,18 @@ class NetworkMount(Mount, ABC):
|
||||
"""Return dictionary representation."""
|
||||
out = MountData(server=self.server, **super().to_dict())
|
||||
if self.port is not None:
|
||||
out[ATTR_PORT] = self.port
|
||||
out["port"] = self.port
|
||||
return out
|
||||
|
||||
@property
|
||||
def server(self) -> str:
|
||||
"""Get server."""
|
||||
return self._data[ATTR_SERVER]
|
||||
return self._data["server"]
|
||||
|
||||
@property
|
||||
def port(self) -> int | None:
|
||||
"""Get port, returns none if using the protocol default."""
|
||||
return self._data.get(ATTR_PORT)
|
||||
return self._data.get("port")
|
||||
|
||||
@property
|
||||
def where(self) -> PurePath:
|
||||
@ -420,31 +405,31 @@ class CIFSMount(NetworkMount):
|
||||
def to_dict(self, *, skip_secrets: bool = True) -> MountData:
|
||||
"""Return dictionary representation."""
|
||||
out = MountData(share=self.share, **super().to_dict())
|
||||
if not skip_secrets and self.username is not None:
|
||||
out[ATTR_USERNAME] = self.username
|
||||
out[ATTR_PASSWORD] = self.password
|
||||
out[ATTR_VERSION] = self.version
|
||||
if not skip_secrets and self.username is not None and self.password is not None:
|
||||
out["username"] = self.username
|
||||
out["password"] = self.password
|
||||
out["version"] = self.version
|
||||
return out
|
||||
|
||||
@property
|
||||
def share(self) -> str:
|
||||
"""Get share."""
|
||||
return self._data[ATTR_SHARE]
|
||||
return self._data["share"]
|
||||
|
||||
@property
|
||||
def username(self) -> str | None:
|
||||
"""Get username, returns none if auth is not used."""
|
||||
return self._data.get(ATTR_USERNAME)
|
||||
return self._data.get("username")
|
||||
|
||||
@property
|
||||
def password(self) -> str | None:
|
||||
"""Get password, returns none if auth is not used."""
|
||||
return self._data.get(ATTR_PASSWORD)
|
||||
return self._data.get("password")
|
||||
|
||||
@property
|
||||
def version(self) -> str | None:
|
||||
"""Get password, returns none if auth is not used."""
|
||||
version = self._data.get(ATTR_VERSION)
|
||||
"""Get cifs version, returns none if using default."""
|
||||
version = self._data.get("version")
|
||||
if version == MountCifsVersion.LEGACY_1_0:
|
||||
return "1.0"
|
||||
if version == MountCifsVersion.LEGACY_2_0:
|
||||
@ -513,7 +498,7 @@ class NFSMount(NetworkMount):
|
||||
@property
|
||||
def path(self) -> PurePath:
|
||||
"""Get path."""
|
||||
return PurePath(self._data[ATTR_PATH])
|
||||
return PurePath(self._data["path"])
|
||||
|
||||
@property
|
||||
def what(self) -> str:
|
||||
@ -543,7 +528,7 @@ class BindMount(Mount):
|
||||
def create(
|
||||
coresys: CoreSys,
|
||||
name: str,
|
||||
path: Path,
|
||||
path: PurePath,
|
||||
usage: MountUsage | None = None,
|
||||
where: PurePath | None = None,
|
||||
read_only: bool = False,
|
||||
@ -568,7 +553,7 @@ class BindMount(Mount):
|
||||
@property
|
||||
def path(self) -> PurePath:
|
||||
"""Get path."""
|
||||
return PurePath(self._data[ATTR_PATH])
|
||||
return PurePath(self._data["path"])
|
||||
|
||||
@property
|
||||
def what(self) -> str:
|
||||
|
@ -103,7 +103,7 @@ class MountData(TypedDict):
|
||||
name: str
|
||||
type: str
|
||||
read_only: bool
|
||||
usage: NotRequired[str]
|
||||
usage: str | None
|
||||
|
||||
# CIFS and NFS fields
|
||||
server: NotRequired[str]
|
||||
@ -113,6 +113,7 @@ class MountData(TypedDict):
|
||||
share: NotRequired[str]
|
||||
username: NotRequired[str]
|
||||
password: NotRequired[str]
|
||||
version: NotRequired[str | None]
|
||||
|
||||
# NFS and Bind fields
|
||||
path: NotRequired[str]
|
||||
|
@ -5,7 +5,7 @@ from contextlib import suppress
|
||||
from dataclasses import dataclass
|
||||
import logging
|
||||
from pathlib import Path
|
||||
from typing import Any, Final
|
||||
from typing import Any, Final, cast
|
||||
|
||||
from awesomeversion import AwesomeVersion
|
||||
|
||||
@ -24,6 +24,7 @@ from ..exceptions import (
|
||||
)
|
||||
from ..jobs.const import JobCondition, JobExecutionLimit
|
||||
from ..jobs.decorator import Job
|
||||
from ..resolution.checks.base import CheckBase
|
||||
from ..resolution.checks.disabled_data_disk import CheckDisabledDataDisk
|
||||
from ..resolution.checks.multiple_data_disks import CheckMultipleDataDisks
|
||||
from ..utils.sentry import async_capture_exception
|
||||
@ -149,7 +150,7 @@ class DataDisk(CoreSysAttributes):
|
||||
Available disks are drives where nothing on it has been mounted
|
||||
and it can be formatted.
|
||||
"""
|
||||
available: list[UDisks2Drive] = []
|
||||
available: list[Disk] = []
|
||||
for drive in self.sys_dbus.udisks2.drives:
|
||||
block_devices = self._get_block_devices_for_drive(drive)
|
||||
primary = _get_primary_block_device(block_devices)
|
||||
@ -166,12 +167,16 @@ class DataDisk(CoreSysAttributes):
|
||||
@property
|
||||
def check_multiple_data_disks(self) -> CheckMultipleDataDisks:
|
||||
"""Resolution center check for multiple data disks."""
|
||||
return self.sys_resolution.check.get("multiple_data_disks")
|
||||
return cast(
|
||||
CheckMultipleDataDisks, self.sys_resolution.check.get("multiple_data_disks")
|
||||
)
|
||||
|
||||
@property
|
||||
def check_disabled_data_disk(self) -> CheckDisabledDataDisk:
|
||||
"""Resolution center check for disabled data disk."""
|
||||
return self.sys_resolution.check.get("disabled_data_disk")
|
||||
return cast(
|
||||
CheckDisabledDataDisk, self.sys_resolution.check.get("disabled_data_disk")
|
||||
)
|
||||
|
||||
def _get_block_devices_for_drive(self, drive: UDisks2Drive) -> list[UDisks2Block]:
|
||||
"""Get block devices for a drive."""
|
||||
@ -361,7 +366,7 @@ class DataDisk(CoreSysAttributes):
|
||||
|
||||
try:
|
||||
partition_block = await UDisks2Block.new(
|
||||
partition, self.sys_dbus.bus, sync_properties=False
|
||||
partition, self.sys_dbus.connected_bus, sync_properties=False
|
||||
)
|
||||
except DBusError as err:
|
||||
raise HassOSDataDiskError(
|
||||
@ -388,7 +393,7 @@ class DataDisk(CoreSysAttributes):
|
||||
properties[DBUS_IFACE_BLOCK][DBUS_ATTR_ID_LABEL]
|
||||
== FILESYSTEM_LABEL_DATA_DISK
|
||||
):
|
||||
check = self.check_multiple_data_disks
|
||||
check: CheckBase = self.check_multiple_data_disks
|
||||
elif (
|
||||
properties[DBUS_IFACE_BLOCK][DBUS_ATTR_ID_LABEL]
|
||||
== FILESYSTEM_LABEL_DISABLED_DATA_DISK
|
||||
@ -411,7 +416,7 @@ class DataDisk(CoreSysAttributes):
|
||||
and issue.context == self.check_multiple_data_disks.context
|
||||
for issue in self.sys_resolution.issues
|
||||
):
|
||||
check = self.check_multiple_data_disks
|
||||
check: CheckBase = self.check_multiple_data_disks
|
||||
elif any(
|
||||
issue.type == self.check_disabled_data_disk.issue
|
||||
and issue.context == self.check_disabled_data_disk.context
|
||||
|
@ -1,11 +1,11 @@
|
||||
"""OS support on supervisor."""
|
||||
|
||||
from collections.abc import Awaitable
|
||||
from dataclasses import dataclass
|
||||
from datetime import datetime
|
||||
import errno
|
||||
import logging
|
||||
from pathlib import Path, PurePath
|
||||
from typing import cast
|
||||
|
||||
import aiohttp
|
||||
from awesomeversion import AwesomeVersion, AwesomeVersionException
|
||||
@ -61,8 +61,8 @@ class SlotStatus:
|
||||
device=PurePath(data["device"]),
|
||||
bundle_compatible=data.get("bundle.compatible"),
|
||||
sha256=data.get("sha256"),
|
||||
size=data.get("size"),
|
||||
installed_count=data.get("installed.count"),
|
||||
size=cast(int | None, data.get("size")),
|
||||
installed_count=cast(int | None, data.get("installed.count")),
|
||||
bundle_version=AwesomeVersion(data["bundle.version"])
|
||||
if "bundle.version" in data
|
||||
else None,
|
||||
@ -70,51 +70,17 @@ class SlotStatus:
|
||||
if "installed.timestamp" in data
|
||||
else None,
|
||||
status=data.get("status"),
|
||||
activated_count=data.get("activated.count"),
|
||||
activated_count=cast(int | None, data.get("activated.count")),
|
||||
activated_timestamp=datetime.fromisoformat(data["activated.timestamp"])
|
||||
if "activated.timestamp" in data
|
||||
else None,
|
||||
boot_status=data.get("boot-status"),
|
||||
boot_status=RaucState(data["boot-status"])
|
||||
if "boot-status" in data
|
||||
else None,
|
||||
bootname=data.get("bootname"),
|
||||
parent=data.get("parent"),
|
||||
)
|
||||
|
||||
def to_dict(self) -> SlotStatusDataType:
|
||||
"""Get dictionary representation."""
|
||||
out: SlotStatusDataType = {
|
||||
"class": self.class_,
|
||||
"type": self.type_,
|
||||
"state": self.state,
|
||||
"device": self.device.as_posix(),
|
||||
}
|
||||
|
||||
if self.bundle_compatible is not None:
|
||||
out["bundle.compatible"] = self.bundle_compatible
|
||||
if self.sha256 is not None:
|
||||
out["sha256"] = self.sha256
|
||||
if self.size is not None:
|
||||
out["size"] = self.size
|
||||
if self.installed_count is not None:
|
||||
out["installed.count"] = self.installed_count
|
||||
if self.bundle_version is not None:
|
||||
out["bundle.version"] = str(self.bundle_version)
|
||||
if self.installed_timestamp is not None:
|
||||
out["installed.timestamp"] = str(self.installed_timestamp)
|
||||
if self.status is not None:
|
||||
out["status"] = self.status
|
||||
if self.activated_count is not None:
|
||||
out["activated.count"] = self.activated_count
|
||||
if self.activated_timestamp:
|
||||
out["activated.timestamp"] = str(self.activated_timestamp)
|
||||
if self.boot_status:
|
||||
out["boot-status"] = self.boot_status
|
||||
if self.bootname is not None:
|
||||
out["bootname"] = self.bootname
|
||||
if self.parent is not None:
|
||||
out["parent"] = self.parent
|
||||
|
||||
return out
|
||||
|
||||
|
||||
class OSManager(CoreSysAttributes):
|
||||
"""OS interface inside supervisor."""
|
||||
@ -148,7 +114,11 @@ class OSManager(CoreSysAttributes):
|
||||
def need_update(self) -> bool:
|
||||
"""Return true if a HassOS update is available."""
|
||||
try:
|
||||
return self.version < self.latest_version
|
||||
return (
|
||||
self.version is not None
|
||||
and self.latest_version is not None
|
||||
and self.version < self.latest_version
|
||||
)
|
||||
except (AwesomeVersionException, TypeError):
|
||||
return False
|
||||
|
||||
@ -176,6 +146,9 @@ class OSManager(CoreSysAttributes):
|
||||
|
||||
def get_slot_name(self, boot_name: str) -> str:
|
||||
"""Get slot name from boot name."""
|
||||
if not self._slots:
|
||||
raise HassOSSlotNotFound()
|
||||
|
||||
for name, status in self._slots.items():
|
||||
if status.bootname == boot_name:
|
||||
return name
|
||||
@ -288,11 +261,8 @@ class OSManager(CoreSysAttributes):
|
||||
conditions=[JobCondition.HAOS],
|
||||
on_condition=HassOSJobError,
|
||||
)
|
||||
async def config_sync(self) -> Awaitable[None]:
|
||||
"""Trigger a host config reload from usb.
|
||||
|
||||
Return a coroutine.
|
||||
"""
|
||||
async def config_sync(self) -> None:
|
||||
"""Trigger a host config reload from usb."""
|
||||
_LOGGER.info(
|
||||
"Synchronizing configuration from USB with Home Assistant Operating System."
|
||||
)
|
||||
@ -314,6 +284,10 @@ class OSManager(CoreSysAttributes):
|
||||
version = version or self.latest_version
|
||||
|
||||
# Check installed version
|
||||
if not version:
|
||||
raise HassOSUpdateError(
|
||||
"No version information available, cannot update", _LOGGER.error
|
||||
)
|
||||
if version == self.version:
|
||||
raise HassOSUpdateError(
|
||||
f"Version {version!s} is already installed", _LOGGER.warning
|
||||
|
@ -22,6 +22,7 @@ from ..exceptions import (
|
||||
AudioUpdateError,
|
||||
ConfigurationFileError,
|
||||
DockerError,
|
||||
PluginError,
|
||||
)
|
||||
from ..jobs.const import JobExecutionLimit
|
||||
from ..jobs.decorator import Job
|
||||
@ -127,7 +128,7 @@ class PluginAudio(PluginBase):
|
||||
"""Update Audio plugin."""
|
||||
try:
|
||||
await super().update(version)
|
||||
except DockerError as err:
|
||||
except (DockerError, PluginError) as err:
|
||||
raise AudioUpdateError("Audio update failed", _LOGGER.error) from err
|
||||
|
||||
async def restart(self) -> None:
|
||||
|
@ -63,7 +63,11 @@ class PluginBase(ABC, FileConfiguration, CoreSysAttributes):
|
||||
def need_update(self) -> bool:
|
||||
"""Return True if an update is available."""
|
||||
try:
|
||||
return self.version < self.latest_version
|
||||
return (
|
||||
self.version is not None
|
||||
and self.latest_version is not None
|
||||
and self.version < self.latest_version
|
||||
)
|
||||
except (AwesomeVersionException, TypeError):
|
||||
return False
|
||||
|
||||
@ -153,6 +157,10 @@ class PluginBase(ABC, FileConfiguration, CoreSysAttributes):
|
||||
async def start(self) -> None:
|
||||
"""Start system plugin."""
|
||||
|
||||
@abstractmethod
|
||||
async def stop(self) -> None:
|
||||
"""Stop system plugin."""
|
||||
|
||||
async def load(self) -> None:
|
||||
"""Load system plugin."""
|
||||
self.start_watchdog()
|
||||
@ -160,14 +168,14 @@ class PluginBase(ABC, FileConfiguration, CoreSysAttributes):
|
||||
# Check plugin state
|
||||
try:
|
||||
# Evaluate Version if we lost this information
|
||||
if not self.version:
|
||||
self.version = await self.instance.get_latest_version()
|
||||
if self.version:
|
||||
version = self.version
|
||||
else:
|
||||
self.version = version = await self.instance.get_latest_version()
|
||||
|
||||
await self.instance.attach(
|
||||
version=self.version, skip_state_event_if_down=True
|
||||
)
|
||||
await self.instance.attach(version=version, skip_state_event_if_down=True)
|
||||
|
||||
await self.instance.check_image(self.version, self.default_image)
|
||||
await self.instance.check_image(version, self.default_image)
|
||||
except DockerError:
|
||||
_LOGGER.info(
|
||||
"No %s plugin Docker image %s found.", self.slug, self.instance.image
|
||||
@ -177,7 +185,7 @@ class PluginBase(ABC, FileConfiguration, CoreSysAttributes):
|
||||
with suppress(PluginError):
|
||||
await self.install()
|
||||
else:
|
||||
self.version = self.instance.version
|
||||
self.version = self.instance.version or version
|
||||
self.image = self.default_image
|
||||
await self.save_data()
|
||||
|
||||
@ -194,11 +202,10 @@ class PluginBase(ABC, FileConfiguration, CoreSysAttributes):
|
||||
if not self.latest_version:
|
||||
await self.sys_updater.reload()
|
||||
|
||||
if self.latest_version:
|
||||
if to_version := self.latest_version:
|
||||
with suppress(DockerError):
|
||||
await self.instance.install(
|
||||
self.latest_version, image=self.default_image
|
||||
)
|
||||
await self.instance.install(to_version, image=self.default_image)
|
||||
self.version = self.instance.version or to_version
|
||||
break
|
||||
_LOGGER.warning(
|
||||
"Error on installing %s plugin, retrying in 30sec", self.slug
|
||||
@ -206,23 +213,28 @@ class PluginBase(ABC, FileConfiguration, CoreSysAttributes):
|
||||
await asyncio.sleep(30)
|
||||
|
||||
_LOGGER.info("%s plugin now installed", self.slug)
|
||||
self.version = self.instance.version
|
||||
self.image = self.default_image
|
||||
await self.save_data()
|
||||
|
||||
async def update(self, version: str | None = None) -> None:
|
||||
"""Update system plugin."""
|
||||
version = version or self.latest_version
|
||||
to_version = AwesomeVersion(version) if version else self.latest_version
|
||||
if not to_version:
|
||||
raise PluginError(
|
||||
f"Cannot determine latest version of plugin {self.slug} for update",
|
||||
_LOGGER.error,
|
||||
)
|
||||
|
||||
old_image = self.image
|
||||
|
||||
if version == self.version:
|
||||
if to_version == self.version:
|
||||
_LOGGER.warning(
|
||||
"Version %s is already installed for %s", version, self.slug
|
||||
"Version %s is already installed for %s", to_version, self.slug
|
||||
)
|
||||
return
|
||||
|
||||
await self.instance.update(version, image=self.default_image)
|
||||
self.version = self.instance.version
|
||||
await self.instance.update(to_version, image=self.default_image)
|
||||
self.version = self.instance.version or to_version
|
||||
self.image = self.default_image
|
||||
await self.save_data()
|
||||
|
||||
|
@ -14,7 +14,7 @@ from ..coresys import CoreSys
|
||||
from ..docker.cli import DockerCli
|
||||
from ..docker.const import ContainerState
|
||||
from ..docker.stats import DockerStats
|
||||
from ..exceptions import CliError, CliJobError, CliUpdateError, DockerError
|
||||
from ..exceptions import CliError, CliJobError, CliUpdateError, DockerError, PluginError
|
||||
from ..jobs.const import JobExecutionLimit
|
||||
from ..jobs.decorator import Job
|
||||
from ..utils.sentry import async_capture_exception
|
||||
@ -53,7 +53,7 @@ class PluginCli(PluginBase):
|
||||
return self.sys_updater.version_cli
|
||||
|
||||
@property
|
||||
def supervisor_token(self) -> str:
|
||||
def supervisor_token(self) -> str | None:
|
||||
"""Return an access token for the Supervisor API."""
|
||||
return self._data.get(ATTR_ACCESS_TOKEN)
|
||||
|
||||
@ -66,7 +66,7 @@ class PluginCli(PluginBase):
|
||||
"""Update local HA cli."""
|
||||
try:
|
||||
await super().update(version)
|
||||
except DockerError as err:
|
||||
except (DockerError, PluginError) as err:
|
||||
raise CliUpdateError("CLI update failed", _LOGGER.error) from err
|
||||
|
||||
async def start(self) -> None:
|
||||
|
@ -15,7 +15,8 @@ from awesomeversion import AwesomeVersion
|
||||
import jinja2
|
||||
import voluptuous as vol
|
||||
|
||||
from ..const import ATTR_SERVERS, DNS_SUFFIX, LogLevel
|
||||
from ..bus import EventListener
|
||||
from ..const import ATTR_SERVERS, DNS_SUFFIX, BusEvent, LogLevel
|
||||
from ..coresys import CoreSys
|
||||
from ..dbus.const import MulticastProtocolEnabled
|
||||
from ..docker.const import ContainerState
|
||||
@ -28,6 +29,7 @@ from ..exceptions import (
|
||||
CoreDNSJobError,
|
||||
CoreDNSUpdateError,
|
||||
DockerError,
|
||||
PluginError,
|
||||
)
|
||||
from ..jobs.const import JobExecutionLimit
|
||||
from ..jobs.decorator import Job
|
||||
@ -71,11 +73,17 @@ class PluginDns(PluginBase):
|
||||
self.slug = "dns"
|
||||
self.coresys: CoreSys = coresys
|
||||
self.instance: DockerDNS = DockerDNS(coresys)
|
||||
self.resolv_template: jinja2.Template | None = None
|
||||
self.hosts_template: jinja2.Template | None = None
|
||||
self._resolv_template: jinja2.Template | None = None
|
||||
self._hosts_template: jinja2.Template | None = None
|
||||
|
||||
self._hosts: list[HostEntry] = []
|
||||
self._loop: bool = False
|
||||
self._cached_locals: list[str] | None = None
|
||||
|
||||
# Debouncing system for rapid local changes
|
||||
self._locals_changed_handle: asyncio.TimerHandle | None = None
|
||||
self._restart_after_locals_change_handle: asyncio.Task | None = None
|
||||
self._connectivity_check_listener: EventListener | None = None
|
||||
|
||||
@property
|
||||
def hosts(self) -> Path:
|
||||
@ -90,6 +98,12 @@ class PluginDns(PluginBase):
|
||||
@property
|
||||
def locals(self) -> list[str]:
|
||||
"""Return list of local system DNS servers."""
|
||||
if self._cached_locals is None:
|
||||
self._cached_locals = self._compute_locals()
|
||||
return self._cached_locals
|
||||
|
||||
def _compute_locals(self) -> list[str]:
|
||||
"""Compute list of local system DNS servers."""
|
||||
servers: list[str] = []
|
||||
for server in [
|
||||
f"dns://{server!s}" for server in self.sys_host.network.dns_servers
|
||||
@ -99,6 +113,52 @@ class PluginDns(PluginBase):
|
||||
|
||||
return servers
|
||||
|
||||
async def _on_dns_container_running(self, event: DockerContainerStateEvent) -> None:
|
||||
"""Handle DNS container state change to running and trigger connectivity check."""
|
||||
if event.name == self.instance.name and event.state == ContainerState.RUNNING:
|
||||
# Wait before CoreDNS actually becomes available
|
||||
await asyncio.sleep(5)
|
||||
|
||||
_LOGGER.debug("CoreDNS started, checking connectivity")
|
||||
await self.sys_supervisor.check_connectivity()
|
||||
|
||||
async def _restart_dns_after_locals_change(self) -> None:
|
||||
"""Restart DNS after a debounced delay for local changes."""
|
||||
old_locals = self._cached_locals
|
||||
new_locals = self._compute_locals()
|
||||
if old_locals == new_locals:
|
||||
return
|
||||
|
||||
_LOGGER.debug("DNS locals changed from %s to %s", old_locals, new_locals)
|
||||
self._cached_locals = new_locals
|
||||
if not await self.instance.is_running():
|
||||
return
|
||||
|
||||
await self.restart()
|
||||
self._restart_after_locals_change_handle = None
|
||||
|
||||
def _trigger_restart_dns_after_locals_change(self) -> None:
|
||||
"""Trigger a restart of DNS after local changes."""
|
||||
# Cancel existing restart task if any
|
||||
if self._restart_after_locals_change_handle:
|
||||
self._restart_after_locals_change_handle.cancel()
|
||||
|
||||
self._restart_after_locals_change_handle = self.sys_create_task(
|
||||
self._restart_dns_after_locals_change()
|
||||
)
|
||||
self._locals_changed_handle = None
|
||||
|
||||
def notify_locals_changed(self) -> None:
|
||||
"""Schedule a debounced DNS restart for local changes."""
|
||||
# Cancel existing timer if any
|
||||
if self._locals_changed_handle:
|
||||
self._locals_changed_handle.cancel()
|
||||
|
||||
# Schedule new timer with 1 second delay
|
||||
self._locals_changed_handle = self.sys_call_later(
|
||||
1.0, self._trigger_restart_dns_after_locals_change
|
||||
)
|
||||
|
||||
@property
|
||||
def servers(self) -> list[str]:
|
||||
"""Return list of DNS servers."""
|
||||
@ -147,11 +207,25 @@ class PluginDns(PluginBase):
|
||||
"""Set fallback DNS enabled."""
|
||||
self._data[ATTR_FALLBACK] = value
|
||||
|
||||
@property
|
||||
def hosts_template(self) -> jinja2.Template:
|
||||
"""Get hosts jinja template."""
|
||||
if not self._hosts_template:
|
||||
raise RuntimeError("Hosts template not set!")
|
||||
return self._hosts_template
|
||||
|
||||
@property
|
||||
def resolv_template(self) -> jinja2.Template:
|
||||
"""Get resolv jinja template."""
|
||||
if not self._resolv_template:
|
||||
raise RuntimeError("Resolv template not set!")
|
||||
return self._resolv_template
|
||||
|
||||
async def load(self) -> None:
|
||||
"""Load DNS setup."""
|
||||
# Initialize CoreDNS Template
|
||||
try:
|
||||
self.resolv_template = jinja2.Template(
|
||||
self._resolv_template = jinja2.Template(
|
||||
await self.sys_run_in_executor(RESOLV_TMPL.read_text, encoding="utf-8")
|
||||
)
|
||||
except OSError as err:
|
||||
@ -162,7 +236,7 @@ class PluginDns(PluginBase):
|
||||
_LOGGER.error("Can't read resolve.tmpl: %s", err)
|
||||
|
||||
try:
|
||||
self.hosts_template = jinja2.Template(
|
||||
self._hosts_template = jinja2.Template(
|
||||
await self.sys_run_in_executor(HOSTS_TMPL.read_text, encoding="utf-8")
|
||||
)
|
||||
except OSError as err:
|
||||
@ -173,11 +247,26 @@ class PluginDns(PluginBase):
|
||||
_LOGGER.error("Can't read hosts.tmpl: %s", err)
|
||||
|
||||
await self._init_hosts()
|
||||
|
||||
# Register Docker event listener for connectivity checks
|
||||
if not self._connectivity_check_listener:
|
||||
self._connectivity_check_listener = self.sys_bus.register_event(
|
||||
BusEvent.DOCKER_CONTAINER_STATE_CHANGE, self._on_dns_container_running
|
||||
)
|
||||
|
||||
await super().load()
|
||||
|
||||
# Update supervisor
|
||||
await self._write_resolv(HOST_RESOLV)
|
||||
await self.sys_supervisor.check_connectivity()
|
||||
# Resolv template should always be set but just in case don't fail load
|
||||
if self._resolv_template:
|
||||
await self._write_resolv(HOST_RESOLV)
|
||||
|
||||
# Reinitializing aiohttp.ClientSession after DNS setup makes sure that
|
||||
# aiodns is using the right DNS servers (see #5857).
|
||||
# At this point it should be fairly safe to replace the session since
|
||||
# we only use the session synchronously during setup and not thorugh the
|
||||
# API which previously caused issues (see #5851).
|
||||
await self.coresys.init_websession()
|
||||
|
||||
async def install(self) -> None:
|
||||
"""Install CoreDNS."""
|
||||
@ -195,7 +284,7 @@ class PluginDns(PluginBase):
|
||||
"""Update CoreDNS plugin."""
|
||||
try:
|
||||
await super().update(version)
|
||||
except DockerError as err:
|
||||
except (DockerError, PluginError) as err:
|
||||
raise CoreDNSUpdateError("CoreDNS update failed", _LOGGER.error) from err
|
||||
|
||||
async def restart(self) -> None:
|
||||
@ -205,7 +294,7 @@ class PluginDns(PluginBase):
|
||||
try:
|
||||
await self.instance.restart()
|
||||
except DockerError as err:
|
||||
raise CoreDNSError("Can't start CoreDNS plugin", _LOGGER.error) from err
|
||||
raise CoreDNSError("Can't restart CoreDNS plugin", _LOGGER.error) from err
|
||||
|
||||
async def start(self) -> None:
|
||||
"""Run CoreDNS."""
|
||||
@ -220,6 +309,16 @@ class PluginDns(PluginBase):
|
||||
|
||||
async def stop(self) -> None:
|
||||
"""Stop CoreDNS."""
|
||||
# Cancel any pending locals change timer
|
||||
if self._locals_changed_handle:
|
||||
self._locals_changed_handle.cancel()
|
||||
self._locals_changed_handle = None
|
||||
|
||||
# Wait for any pending restart before stopping
|
||||
if self._restart_after_locals_change_handle:
|
||||
self._restart_after_locals_change_handle.cancel()
|
||||
self._restart_after_locals_change_handle = None
|
||||
|
||||
_LOGGER.info("Stopping CoreDNS plugin")
|
||||
try:
|
||||
await self.instance.stop()
|
||||
@ -422,12 +521,6 @@ class PluginDns(PluginBase):
|
||||
|
||||
async def _write_resolv(self, resolv_conf: Path) -> None:
|
||||
"""Update/Write resolv.conf file."""
|
||||
if not self.resolv_template:
|
||||
_LOGGER.warning(
|
||||
"Resolv template is missing, cannot write/update %s", resolv_conf
|
||||
)
|
||||
return
|
||||
|
||||
nameservers = [str(self.sys_docker.network.dns), "127.0.0.11"]
|
||||
|
||||
# Read resolv config
|
||||
|
@ -16,6 +16,7 @@ from ..exceptions import (
|
||||
MulticastError,
|
||||
MulticastJobError,
|
||||
MulticastUpdateError,
|
||||
PluginError,
|
||||
)
|
||||
from ..jobs.const import JobExecutionLimit
|
||||
from ..jobs.decorator import Job
|
||||
@ -63,7 +64,7 @@ class PluginMulticast(PluginBase):
|
||||
"""Update Multicast plugin."""
|
||||
try:
|
||||
await super().update(version)
|
||||
except DockerError as err:
|
||||
except (DockerError, PluginError) as err:
|
||||
raise MulticastUpdateError(
|
||||
"Multicast update failed", _LOGGER.error
|
||||
) from err
|
||||
|
@ -19,6 +19,7 @@ from ..exceptions import (
|
||||
ObserverError,
|
||||
ObserverJobError,
|
||||
ObserverUpdateError,
|
||||
PluginError,
|
||||
)
|
||||
from ..jobs.const import JobExecutionLimit
|
||||
from ..jobs.decorator import Job
|
||||
@ -58,7 +59,7 @@ class PluginObserver(PluginBase):
|
||||
return self.sys_updater.version_observer
|
||||
|
||||
@property
|
||||
def supervisor_token(self) -> str:
|
||||
def supervisor_token(self) -> str | None:
|
||||
"""Return an access token for the Observer API."""
|
||||
return self._data.get(ATTR_ACCESS_TOKEN)
|
||||
|
||||
@ -71,7 +72,7 @@ class PluginObserver(PluginBase):
|
||||
"""Update local HA observer."""
|
||||
try:
|
||||
await super().update(version)
|
||||
except DockerError as err:
|
||||
except (DockerError, PluginError) as err:
|
||||
raise ObserverUpdateError(
|
||||
"HA observer update failed", _LOGGER.error
|
||||
) from err
|
||||
@ -90,6 +91,10 @@ class PluginObserver(PluginBase):
|
||||
_LOGGER.error("Can't start observer plugin")
|
||||
raise ObserverError() from err
|
||||
|
||||
async def stop(self) -> None:
|
||||
"""Raise. Supervisor should not stop observer."""
|
||||
raise RuntimeError("Stopping observer without a restart is not supported!")
|
||||
|
||||
async def stats(self) -> DockerStats:
|
||||
"""Return stats of observer."""
|
||||
try:
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
x
Reference in New Issue
Block a user