mirror of
https://github.com/home-assistant/supervisor.git
synced 2025-07-31 04:56:31 +00:00
Compare commits
136 Commits
Author | SHA1 | Date | |
---|---|---|---|
![]() |
6871ea4b81 | ||
![]() |
cf77ab2290 | ||
![]() |
ceeffa3284 | ||
![]() |
31f2f70cd9 | ||
![]() |
deac85bddb | ||
![]() |
7dcf5ba631 | ||
![]() |
a004830131 | ||
![]() |
a8cc6c416d | ||
![]() |
74b26642b0 | ||
![]() |
5e26ab5f4a | ||
![]() |
a841cb8282 | ||
![]() |
3b1b03c8a7 | ||
![]() |
680428f304 | ||
![]() |
f34128c37e | ||
![]() |
2ed0682b34 | ||
![]() |
fbb0915ef8 | ||
![]() |
780ae1e15c | ||
![]() |
c617358855 | ||
![]() |
b679c4f4d8 | ||
![]() |
c946c421f2 | ||
![]() |
aeabf7ea25 | ||
![]() |
365b838abf | ||
![]() |
99c040520e | ||
![]() |
eefe2f2e06 | ||
![]() |
a366e36b37 | ||
![]() |
27a2fde9e1 | ||
![]() |
9a0f530a2f | ||
![]() |
baf9695cf7 | ||
![]() |
7873c457d5 | ||
![]() |
cbc48c381f | ||
![]() |
11e37011bd | ||
![]() |
cfda559a90 | ||
![]() |
806bd9f52c | ||
![]() |
953f7d01d7 | ||
![]() |
381e719a0e | ||
![]() |
296071067d | ||
![]() |
8336537f51 | ||
![]() |
5c90a00263 | ||
![]() |
1f2bf77784 | ||
![]() |
9aa4f381b8 | ||
![]() |
ae036ceffe | ||
![]() |
f0ea0d4a44 | ||
![]() |
abc44946bb | ||
![]() |
3e20a0937d | ||
![]() |
6cebf52249 | ||
![]() |
bc57deb474 | ||
![]() |
38750d74a8 | ||
![]() |
d1c1a2d418 | ||
![]() |
cf32f036c0 | ||
![]() |
b8852872fe | ||
![]() |
779f47e25d | ||
![]() |
be8b36b560 | ||
![]() |
8378d434d4 | ||
![]() |
0b79e09bc0 | ||
![]() |
d747a59696 | ||
![]() |
3ee7c082ec | ||
![]() |
3f921e50b3 | ||
![]() |
0370320f75 | ||
![]() |
1e19e26ef3 | ||
![]() |
e1a18eeba8 | ||
![]() |
b030879efd | ||
![]() |
dfa1602ac6 | ||
![]() |
bbda943583 | ||
![]() |
aea15b65b7 | ||
![]() |
5c04249e41 | ||
![]() |
456cec7ed1 | ||
![]() |
52a519e55c | ||
![]() |
fcb20d0ae8 | ||
![]() |
9b3f2b17bd | ||
![]() |
3d026b9534 | ||
![]() |
0e8ace949a | ||
![]() |
1fe6f8ad99 | ||
![]() |
9ef2352d12 | ||
![]() |
2543bcae29 | ||
![]() |
ad9de9f73c | ||
![]() |
a5556651ae | ||
![]() |
ac28deff6d | ||
![]() |
82ee4bc441 | ||
![]() |
bdbd09733a | ||
![]() |
d5b5a328d7 | ||
![]() |
52b24e177f | ||
![]() |
e10c58c424 | ||
![]() |
9682870c2c | ||
![]() |
fd0b894d6a | ||
![]() |
697515b81f | ||
![]() |
d912c234fa | ||
![]() |
e8445ae8f2 | ||
![]() |
6710439ce5 | ||
![]() |
95eec03c91 | ||
![]() |
9b686a2d9a | ||
![]() |
063d69da90 | ||
![]() |
baaf04981f | ||
![]() |
bdb25a7ff8 | ||
![]() |
ad2d6a3156 | ||
![]() |
42f885595e | ||
![]() |
2a88cb9339 | ||
![]() |
4d1a5e2dc2 | ||
![]() |
705e76abe3 | ||
![]() |
7f54383147 | ||
![]() |
63fde3b410 | ||
![]() |
5285e60cd3 | ||
![]() |
2a1e32bb36 | ||
![]() |
a2251e0729 | ||
![]() |
1efee641ba | ||
![]() |
bbb8fa0b92 | ||
![]() |
7593f857e8 | ||
![]() |
87232cf1e4 | ||
![]() |
9e6a4d65cd | ||
![]() |
c80fbd77c8 | ||
![]() |
a452969ffe | ||
![]() |
89fa5c9c7a | ||
![]() |
73069b628e | ||
![]() |
8251b6c61c | ||
![]() |
1faf529b42 | ||
![]() |
86c016b35d | ||
![]() |
4f35759fe3 | ||
![]() |
3b575eedba | ||
![]() |
6e6fe5ba39 | ||
![]() |
b5a7e521ae | ||
![]() |
bac7c21fe8 | ||
![]() |
2eb9ec20d6 | ||
![]() |
406348c068 | ||
![]() |
5e3f4e8ff3 | ||
![]() |
31a67bc642 | ||
![]() |
d0d11db7b1 | ||
![]() |
cbf4b4e27e | ||
![]() |
c855eaab52 | ||
![]() |
6bac751c4c | ||
![]() |
da0ae75e8e | ||
![]() |
154aeaee87 | ||
![]() |
b9bbb99f37 | ||
![]() |
ff849ce692 | ||
![]() |
24456efb6b | ||
![]() |
0cd9d04e63 | ||
![]() |
39bd20c0e7 | ||
![]() |
481bbc5be8 |
69
.github/ISSUE_TEMPLATE.md
vendored
69
.github/ISSUE_TEMPLATE.md
vendored
@ -1,69 +0,0 @@
|
|||||||
---
|
|
||||||
name: Report a bug with the Supervisor on a supported System
|
|
||||||
about: Report an issue related to the Home Assistant Supervisor.
|
|
||||||
labels: bug
|
|
||||||
---
|
|
||||||
|
|
||||||
<!-- READ THIS FIRST:
|
|
||||||
- If you need additional help with this template please refer to https://www.home-assistant.io/help/reporting_issues/
|
|
||||||
- This is for bugs only. Feature and enhancement requests should go in our community forum: https://community.home-assistant.io/c/feature-requests
|
|
||||||
- Provide as many details as possible. Paste logs, configuration sample and code into the backticks. Do not delete any text from this template!
|
|
||||||
- If you have a problem with an add-on, make an issue in it's repository.
|
|
||||||
-->
|
|
||||||
|
|
||||||
<!--
|
|
||||||
Important: You can only fill a bug repport for an supported system! If you run an unsupported installation. This report would be closed without comment.
|
|
||||||
-->
|
|
||||||
|
|
||||||
### Describe the issue
|
|
||||||
|
|
||||||
<!-- Provide as many details as possible. -->
|
|
||||||
|
|
||||||
### Steps to reproduce
|
|
||||||
|
|
||||||
<!-- What do you do to encounter the issue. -->
|
|
||||||
|
|
||||||
1. ...
|
|
||||||
2. ...
|
|
||||||
3. ...
|
|
||||||
|
|
||||||
### Enviroment details
|
|
||||||
|
|
||||||
<!-- You can find these details in the system tab of the supervisor panel, or by using the `ha` CLI. -->
|
|
||||||
|
|
||||||
- **Operating System:**: xxx
|
|
||||||
- **Supervisor version:**: xxx
|
|
||||||
- **Home Assistant version**: xxx
|
|
||||||
|
|
||||||
### Supervisor logs
|
|
||||||
|
|
||||||
<details>
|
|
||||||
<summary>Supervisor logs</summary>
|
|
||||||
<!--
|
|
||||||
- Frontend -> Supervisor -> System
|
|
||||||
- Or use this command: ha supervisor logs
|
|
||||||
- Logs are more than just errors, even if you don't think it's important, it is.
|
|
||||||
-->
|
|
||||||
|
|
||||||
```
|
|
||||||
Paste supervisor logs here
|
|
||||||
|
|
||||||
```
|
|
||||||
|
|
||||||
</details>
|
|
||||||
|
|
||||||
### System Information
|
|
||||||
|
|
||||||
<details>
|
|
||||||
<summary>System Information</summary>
|
|
||||||
<!--
|
|
||||||
- Use this command: ha info
|
|
||||||
-->
|
|
||||||
|
|
||||||
```
|
|
||||||
Paste system info here
|
|
||||||
|
|
||||||
```
|
|
||||||
|
|
||||||
</details>
|
|
||||||
|
|
9
.github/ISSUE_TEMPLATE/bug_report.yml
vendored
9
.github/ISSUE_TEMPLATE/bug_report.yml
vendored
@ -1,6 +1,5 @@
|
|||||||
name: Bug Report Form
|
name: Report an issue with Home Assistant Supervisor
|
||||||
description: Report an issue related to the Home Assistant Supervisor.
|
description: Report an issue related to the Home Assistant Supervisor.
|
||||||
labels: bug
|
|
||||||
body:
|
body:
|
||||||
- type: markdown
|
- type: markdown
|
||||||
attributes:
|
attributes:
|
||||||
@ -9,7 +8,7 @@ body:
|
|||||||
|
|
||||||
If you have a feature or enhancement request, please use the [feature request][fr] section of our [Community Forum][fr].
|
If you have a feature or enhancement request, please use the [feature request][fr] section of our [Community Forum][fr].
|
||||||
|
|
||||||
[fr]: https://community.home-assistant.io/c/feature-requests
|
[fr]: https://github.com/orgs/home-assistant/discussions
|
||||||
- type: textarea
|
- type: textarea
|
||||||
validations:
|
validations:
|
||||||
required: true
|
required: true
|
||||||
@ -76,7 +75,7 @@ body:
|
|||||||
description: >
|
description: >
|
||||||
The System information can be found in [Settings -> System -> Repairs -> (three dot menu) -> System Information](https://my.home-assistant.io/redirect/system_health/).
|
The System information can be found in [Settings -> System -> Repairs -> (three dot menu) -> System Information](https://my.home-assistant.io/redirect/system_health/).
|
||||||
Click the copy button at the bottom of the pop-up and paste it here.
|
Click the copy button at the bottom of the pop-up and paste it here.
|
||||||
|
|
||||||
[](https://my.home-assistant.io/redirect/system_health/)
|
[](https://my.home-assistant.io/redirect/system_health/)
|
||||||
- type: textarea
|
- type: textarea
|
||||||
attributes:
|
attributes:
|
||||||
@ -86,7 +85,7 @@ body:
|
|||||||
Supervisor diagnostics can be found in [Settings -> Devices & services](https://my.home-assistant.io/redirect/integrations/).
|
Supervisor diagnostics can be found in [Settings -> Devices & services](https://my.home-assistant.io/redirect/integrations/).
|
||||||
Find the card that says `Home Assistant Supervisor`, open it, and select the three dot menu of the Supervisor integration entry
|
Find the card that says `Home Assistant Supervisor`, open it, and select the three dot menu of the Supervisor integration entry
|
||||||
and select 'Download diagnostics'.
|
and select 'Download diagnostics'.
|
||||||
|
|
||||||
**Please drag-and-drop the downloaded file into the textbox below. Do not copy and paste its contents.**
|
**Please drag-and-drop the downloaded file into the textbox below. Do not copy and paste its contents.**
|
||||||
- type: textarea
|
- type: textarea
|
||||||
attributes:
|
attributes:
|
||||||
|
2
.github/ISSUE_TEMPLATE/config.yml
vendored
2
.github/ISSUE_TEMPLATE/config.yml
vendored
@ -13,7 +13,7 @@ contact_links:
|
|||||||
about: Our documentation has its own issue tracker. Please report issues with the website there.
|
about: Our documentation has its own issue tracker. Please report issues with the website there.
|
||||||
|
|
||||||
- name: Request a feature for the Supervisor
|
- name: Request a feature for the Supervisor
|
||||||
url: https://community.home-assistant.io/c/feature-requests
|
url: https://github.com/orgs/home-assistant/discussions
|
||||||
about: Request an new feature for the Supervisor.
|
about: Request an new feature for the Supervisor.
|
||||||
|
|
||||||
- name: I have a question or need support
|
- name: I have a question or need support
|
||||||
|
53
.github/ISSUE_TEMPLATE/task.yml
vendored
Normal file
53
.github/ISSUE_TEMPLATE/task.yml
vendored
Normal file
@ -0,0 +1,53 @@
|
|||||||
|
name: Task
|
||||||
|
description: For staff only - Create a task
|
||||||
|
type: Task
|
||||||
|
body:
|
||||||
|
- type: markdown
|
||||||
|
attributes:
|
||||||
|
value: |
|
||||||
|
## ⚠️ RESTRICTED ACCESS
|
||||||
|
|
||||||
|
**This form is restricted to Open Home Foundation staff and authorized contributors only.**
|
||||||
|
|
||||||
|
If you are a community member wanting to contribute, please:
|
||||||
|
- For bug reports: Use the [bug report form](https://github.com/home-assistant/supervisor/issues/new?template=bug_report.yml)
|
||||||
|
- For feature requests: Submit to [Feature Requests](https://github.com/orgs/home-assistant/discussions)
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
### For authorized contributors
|
||||||
|
|
||||||
|
Use this form to create tasks for development work, improvements, or other actionable items that need to be tracked.
|
||||||
|
- type: textarea
|
||||||
|
id: description
|
||||||
|
attributes:
|
||||||
|
label: Description
|
||||||
|
description: |
|
||||||
|
Provide a clear and detailed description of the task that needs to be accomplished.
|
||||||
|
|
||||||
|
Be specific about what needs to be done, why it's important, and any constraints or requirements.
|
||||||
|
placeholder: |
|
||||||
|
Describe the task, including:
|
||||||
|
- What needs to be done
|
||||||
|
- Why this task is needed
|
||||||
|
- Expected outcome
|
||||||
|
- Any constraints or requirements
|
||||||
|
validations:
|
||||||
|
required: true
|
||||||
|
- type: textarea
|
||||||
|
id: additional_context
|
||||||
|
attributes:
|
||||||
|
label: Additional context
|
||||||
|
description: |
|
||||||
|
Any additional information, links, research, or context that would be helpful.
|
||||||
|
|
||||||
|
Include links to related issues, research, prototypes, roadmap opportunities etc.
|
||||||
|
placeholder: |
|
||||||
|
- Roadmap opportunity: [link]
|
||||||
|
- Epic: [link]
|
||||||
|
- Feature request: [link]
|
||||||
|
- Technical design documents: [link]
|
||||||
|
- Prototype/mockup: [link]
|
||||||
|
- Dependencies: [links]
|
||||||
|
validations:
|
||||||
|
required: false
|
288
.github/copilot-instructions.md
vendored
Normal file
288
.github/copilot-instructions.md
vendored
Normal file
@ -0,0 +1,288 @@
|
|||||||
|
# GitHub Copilot & Claude Code Instructions
|
||||||
|
|
||||||
|
This repository contains the Home Assistant Supervisor, a Python 3 based container
|
||||||
|
orchestration and management system for Home Assistant.
|
||||||
|
|
||||||
|
## Supervisor Capabilities & Features
|
||||||
|
|
||||||
|
### Architecture Overview
|
||||||
|
|
||||||
|
Home Assistant Supervisor is a Python-based container orchestration system that
|
||||||
|
communicates with the Docker daemon to manage containerized components. It is tightly
|
||||||
|
integrated with the underlying Operating System and core Operating System components
|
||||||
|
through D-Bus.
|
||||||
|
|
||||||
|
**Managed Components:**
|
||||||
|
- **Home Assistant Core**: The main home automation application running in its own
|
||||||
|
container (also provides the web interface)
|
||||||
|
- **Add-ons**: Third-party applications and services (each add-on runs in its own
|
||||||
|
container)
|
||||||
|
- **Plugins**: Built-in system services like DNS, Audio, CLI, Multicast, and Observer
|
||||||
|
- **Host System Integration**: OS-level operations and hardware access via D-Bus
|
||||||
|
- **Container Networking**: Internal Docker network management and external
|
||||||
|
connectivity
|
||||||
|
- **Storage & Backup**: Data persistence and backup management across all containers
|
||||||
|
|
||||||
|
**Key Dependencies:**
|
||||||
|
- **Docker Engine**: Required for all container operations
|
||||||
|
- **D-Bus**: System-level communication with the host OS
|
||||||
|
- **systemd**: Service management for host system operations
|
||||||
|
- **NetworkManager**: Network configuration and management
|
||||||
|
|
||||||
|
### Add-on System
|
||||||
|
|
||||||
|
**Add-on Architecture**: Add-ons are containerized applications available through
|
||||||
|
add-on stores. Each store contains multiple add-ons, and each add-on includes metadata
|
||||||
|
that tells Supervisor the version, startup configuration (permissions), and available
|
||||||
|
user configurable options. Add-on metadata typically references a container image that
|
||||||
|
Supervisor fetches during installation. If not, the Supervisor builds the container
|
||||||
|
image from a Dockerfile.
|
||||||
|
|
||||||
|
**Built-in Stores**: Supervisor comes with several pre-configured stores:
|
||||||
|
- **Core Add-ons**: Official add-ons maintained by the Home Assistant team
|
||||||
|
- **Community Add-ons**: Popular third-party add-ons repository
|
||||||
|
- **ESPHome**: Add-ons for ESPHome ecosystem integration
|
||||||
|
- **Music Assistant**: Audio and music-related add-ons
|
||||||
|
- **Local Development**: Local folder for testing custom add-ons during development
|
||||||
|
|
||||||
|
**Store Management**: Stores are Git-based repositories that are periodically updated.
|
||||||
|
When updates are available, users receive notifications.
|
||||||
|
|
||||||
|
**Add-on Lifecycle**:
|
||||||
|
- **Installation**: Supervisor fetches or builds container images based on add-on
|
||||||
|
metadata
|
||||||
|
- **Configuration**: Schema-validated options with integrated UI management
|
||||||
|
- **Runtime**: Full container lifecycle management, health monitoring
|
||||||
|
- **Updates**: Automatic or manual version management
|
||||||
|
|
||||||
|
### Update System
|
||||||
|
|
||||||
|
**Core Components**: Supervisor, Home Assistant Core, HAOS, and built-in plugins
|
||||||
|
receive version information from a central JSON file fetched from
|
||||||
|
`https://version.home-assistant.io/{channel}.json`. The `Updater` class handles
|
||||||
|
fetching this data, validating signatures, and updating internal version tracking.
|
||||||
|
|
||||||
|
**Update Channels**: Three channels (`stable`/`beta`/`dev`) determine which version
|
||||||
|
JSON file is fetched, allowing users to opt into different release streams.
|
||||||
|
|
||||||
|
**Add-on Updates**: Add-on version information comes from store repository updates, not
|
||||||
|
the central JSON file. When repositories are refreshed via the store system, add-ons
|
||||||
|
compare their local versions against repository versions to determine update
|
||||||
|
availability.
|
||||||
|
|
||||||
|
### Backup & Recovery System
|
||||||
|
|
||||||
|
**Backup Capabilities**:
|
||||||
|
- **Full Backups**: Complete system state capture including all add-ons,
|
||||||
|
configuration, and data
|
||||||
|
- **Partial Backups**: Selective backup of specific components (Home Assistant,
|
||||||
|
add-ons, folders)
|
||||||
|
- **Encrypted Backups**: Optional backup encryption with user-provided passwords
|
||||||
|
- **Multiple Storage Locations**: Local storage and remote backup destinations
|
||||||
|
|
||||||
|
**Recovery Features**:
|
||||||
|
- **One-click Restore**: Simple restoration from backup files
|
||||||
|
- **Selective Restore**: Choose specific components to restore
|
||||||
|
- **Automatic Recovery**: Self-healing for common system issues
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Supervisor Development
|
||||||
|
|
||||||
|
### Python Requirements
|
||||||
|
|
||||||
|
- **Compatibility**: Python 3.13+
|
||||||
|
- **Language Features**: Use modern Python features:
|
||||||
|
- Type hints with `typing` module
|
||||||
|
- f-strings (preferred over `%` or `.format()`)
|
||||||
|
- Dataclasses and enum classes
|
||||||
|
- Async/await patterns
|
||||||
|
- Pattern matching where appropriate
|
||||||
|
|
||||||
|
### Code Quality Standards
|
||||||
|
|
||||||
|
- **Formatting**: Ruff
|
||||||
|
- **Linting**: PyLint and Ruff
|
||||||
|
- **Type Checking**: MyPy
|
||||||
|
- **Testing**: pytest with asyncio support
|
||||||
|
- **Language**: American English for all code, comments, and documentation
|
||||||
|
|
||||||
|
### Code Organization
|
||||||
|
|
||||||
|
**Core Structure**:
|
||||||
|
```
|
||||||
|
supervisor/
|
||||||
|
├── __init__.py # Package initialization
|
||||||
|
├── const.py # Constants and enums
|
||||||
|
├── coresys.py # Core system management
|
||||||
|
├── bootstrap.py # System initialization
|
||||||
|
├── exceptions.py # Custom exception classes
|
||||||
|
├── api/ # REST API endpoints
|
||||||
|
├── addons/ # Add-on management
|
||||||
|
├── backups/ # Backup system
|
||||||
|
├── docker/ # Docker integration
|
||||||
|
├── host/ # Host system interface
|
||||||
|
├── homeassistant/ # Home Assistant Core management
|
||||||
|
├── dbus/ # D-Bus system integration
|
||||||
|
├── hardware/ # Hardware detection and management
|
||||||
|
├── plugins/ # Plugin system
|
||||||
|
├── resolution/ # Issue detection and resolution
|
||||||
|
├── security/ # Security management
|
||||||
|
├── services/ # Service discovery and management
|
||||||
|
├── store/ # Add-on store management
|
||||||
|
└── utils/ # Utility functions
|
||||||
|
```
|
||||||
|
|
||||||
|
**Shared Constants**: Use constants from `supervisor/const.py` instead of hardcoding
|
||||||
|
values. Define new constants following existing patterns and group related constants
|
||||||
|
together.
|
||||||
|
|
||||||
|
### Supervisor Architecture Patterns
|
||||||
|
|
||||||
|
**CoreSysAttributes Inheritance Pattern**: Nearly all major classes in Supervisor
|
||||||
|
inherit from `CoreSysAttributes`, providing access to the centralized system state
|
||||||
|
via `self.coresys` and convenient `sys_*` properties.
|
||||||
|
|
||||||
|
```python
|
||||||
|
# Standard Supervisor class pattern
|
||||||
|
class MyManager(CoreSysAttributes):
|
||||||
|
"""Manage my functionality."""
|
||||||
|
|
||||||
|
def __init__(self, coresys: CoreSys):
|
||||||
|
"""Initialize manager."""
|
||||||
|
self.coresys: CoreSys = coresys
|
||||||
|
self._component: MyComponent = MyComponent(coresys)
|
||||||
|
|
||||||
|
@property
|
||||||
|
def component(self) -> MyComponent:
|
||||||
|
"""Return component handler."""
|
||||||
|
return self._component
|
||||||
|
|
||||||
|
# Access system components via inherited properties
|
||||||
|
async def do_something(self):
|
||||||
|
await self.sys_docker.containers.get("my_container")
|
||||||
|
self.sys_bus.fire_event(BusEvent.MY_EVENT, {"data": "value"})
|
||||||
|
```
|
||||||
|
|
||||||
|
**Key Inherited Properties from CoreSysAttributes**:
|
||||||
|
- `self.sys_docker` - Docker API access
|
||||||
|
- `self.sys_run_in_executor()` - Execute blocking operations
|
||||||
|
- `self.sys_create_task()` - Create async tasks
|
||||||
|
- `self.sys_bus` - Event bus for system events
|
||||||
|
- `self.sys_config` - System configuration
|
||||||
|
- `self.sys_homeassistant` - Home Assistant Core management
|
||||||
|
- `self.sys_addons` - Add-on management
|
||||||
|
- `self.sys_host` - Host system access
|
||||||
|
- `self.sys_dbus` - D-Bus system interface
|
||||||
|
|
||||||
|
**Load Pattern**: Many components implement a `load()` method which effectively
|
||||||
|
initialize the component from external sources (containers, files, D-Bus services).
|
||||||
|
|
||||||
|
### API Development
|
||||||
|
|
||||||
|
**REST API Structure**:
|
||||||
|
- **Base Path**: `/api/` for all endpoints
|
||||||
|
- **Authentication**: Bearer token authentication
|
||||||
|
- **Consistent Response Format**: `{"result": "ok", "data": {...}}` or
|
||||||
|
`{"result": "error", "message": "..."}`
|
||||||
|
- **Validation**: Use voluptuous schemas with `api_validate()`
|
||||||
|
|
||||||
|
**Use `@api_process` Decorator**: This decorator handles all standard error handling
|
||||||
|
and response formatting automatically. The decorator catches `APIError`, `HassioError`,
|
||||||
|
and other exceptions, returning appropriate HTTP responses.
|
||||||
|
|
||||||
|
```python
|
||||||
|
from ..api.utils import api_process, api_validate
|
||||||
|
|
||||||
|
@api_process
|
||||||
|
async def backup_full(self, request: web.Request) -> dict[str, Any]:
|
||||||
|
"""Create full backup."""
|
||||||
|
body = await api_validate(SCHEMA_BACKUP_FULL, request)
|
||||||
|
job = await self.sys_backups.do_backup_full(**body)
|
||||||
|
return {ATTR_JOB_ID: job.uuid}
|
||||||
|
```
|
||||||
|
|
||||||
|
### Docker Integration
|
||||||
|
|
||||||
|
- **Container Management**: Use Supervisor's Docker manager instead of direct
|
||||||
|
Docker API
|
||||||
|
- **Networking**: Supervisor manages internal Docker networks with predefined IP
|
||||||
|
ranges
|
||||||
|
- **Security**: AppArmor profiles, capability restrictions, and user namespace
|
||||||
|
isolation
|
||||||
|
- **Health Checks**: Implement health monitoring for all managed containers
|
||||||
|
|
||||||
|
### D-Bus Integration
|
||||||
|
|
||||||
|
- **Use dbus-fast**: Async D-Bus library for system integration
|
||||||
|
- **Service Management**: systemd, NetworkManager, hostname management
|
||||||
|
- **Error Handling**: Wrap D-Bus exceptions in Supervisor-specific exceptions
|
||||||
|
|
||||||
|
### Async Programming
|
||||||
|
|
||||||
|
- **All I/O operations must be async**: File operations, network calls, subprocess
|
||||||
|
execution
|
||||||
|
- **Use asyncio patterns**: Prefer `asyncio.gather()` over sequential awaits
|
||||||
|
- **Executor jobs**: Use `self.sys_run_in_executor()` for blocking operations
|
||||||
|
- **Two-phase initialization**: `__init__` for sync setup, `post_init()` for async
|
||||||
|
initialization
|
||||||
|
|
||||||
|
### Testing
|
||||||
|
|
||||||
|
- **Location**: `tests/` directory with module mirroring
|
||||||
|
- **Fixtures**: Extensive use of pytest fixtures for CoreSys setup
|
||||||
|
- **Mocking**: Mock external dependencies (Docker, D-Bus, network calls)
|
||||||
|
- **Coverage**: Minimum 90% test coverage, 100% for security-sensitive code
|
||||||
|
|
||||||
|
### Error Handling
|
||||||
|
|
||||||
|
- **Custom Exceptions**: Defined in `exceptions.py` with clear inheritance hierarchy
|
||||||
|
- **Error Propagation**: Use `from` clause for exception chaining
|
||||||
|
- **API Errors**: Use `APIError` with appropriate HTTP status codes
|
||||||
|
|
||||||
|
### Security Considerations
|
||||||
|
|
||||||
|
- **Container Security**: AppArmor profiles mandatory for add-ons, minimal
|
||||||
|
capabilities
|
||||||
|
- **Authentication**: Token-based API authentication with role-based access
|
||||||
|
- **Data Protection**: Backup encryption, secure secret management, comprehensive
|
||||||
|
input validation
|
||||||
|
|
||||||
|
### Development Commands
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Run tests, adjust paths as necessary
|
||||||
|
pytest -qsx tests/
|
||||||
|
|
||||||
|
# Linting and formatting
|
||||||
|
ruff check supervisor/
|
||||||
|
ruff format supervisor/
|
||||||
|
|
||||||
|
# Type checking
|
||||||
|
mypy --ignore-missing-imports supervisor/
|
||||||
|
|
||||||
|
# Pre-commit hooks
|
||||||
|
pre-commit run --all-files
|
||||||
|
```
|
||||||
|
|
||||||
|
Always run the pre-commit hooks at the end of code editing.
|
||||||
|
|
||||||
|
### Common Patterns to Follow
|
||||||
|
|
||||||
|
**✅ Use These Patterns**:
|
||||||
|
- Inherit from `CoreSysAttributes` for system access
|
||||||
|
- Use `@api_process` decorator for API endpoints
|
||||||
|
- Use `self.sys_run_in_executor()` for blocking operations
|
||||||
|
- Access Docker via `self.sys_docker` not direct Docker API
|
||||||
|
- Use constants from `const.py` instead of hardcoding
|
||||||
|
- Store types in (per-module) `const.py` (e.g. supervisor/store/const.py)
|
||||||
|
|
||||||
|
**❌ Avoid These Patterns**:
|
||||||
|
- Direct Docker API usage - use Supervisor's Docker manager
|
||||||
|
- Blocking operations in async context (use asyncio alternatives)
|
||||||
|
- Hardcoded values - use constants from `const.py`
|
||||||
|
- Manual error handling in API endpoints - let `@api_process` handle it
|
||||||
|
|
||||||
|
This guide provides the foundation for contributing to Home Assistant Supervisor.
|
||||||
|
Follow these patterns and guidelines to ensure code quality, security, and
|
||||||
|
maintainability.
|
4
.github/workflows/builder.yml
vendored
4
.github/workflows/builder.yml
vendored
@ -131,9 +131,9 @@ jobs:
|
|||||||
|
|
||||||
- name: Install Cosign
|
- name: Install Cosign
|
||||||
if: needs.init.outputs.publish == 'true'
|
if: needs.init.outputs.publish == 'true'
|
||||||
uses: sigstore/cosign-installer@v3.8.2
|
uses: sigstore/cosign-installer@v3.9.2
|
||||||
with:
|
with:
|
||||||
cosign-release: "v2.4.0"
|
cosign-release: "v2.4.3"
|
||||||
|
|
||||||
- name: Install dirhash and calc hash
|
- name: Install dirhash and calc hash
|
||||||
if: needs.init.outputs.publish == 'true'
|
if: needs.init.outputs.publish == 'true'
|
||||||
|
53
.github/workflows/ci.yaml
vendored
53
.github/workflows/ci.yaml
vendored
@ -10,6 +10,7 @@ on:
|
|||||||
env:
|
env:
|
||||||
DEFAULT_PYTHON: "3.13"
|
DEFAULT_PYTHON: "3.13"
|
||||||
PRE_COMMIT_CACHE: ~/.cache/pre-commit
|
PRE_COMMIT_CACHE: ~/.cache/pre-commit
|
||||||
|
MYPY_CACHE_VERSION: 1
|
||||||
|
|
||||||
concurrency:
|
concurrency:
|
||||||
group: "${{ github.workflow }}-${{ github.ref }}"
|
group: "${{ github.workflow }}-${{ github.ref }}"
|
||||||
@ -286,6 +287,52 @@ jobs:
|
|||||||
. venv/bin/activate
|
. venv/bin/activate
|
||||||
pylint supervisor tests
|
pylint supervisor tests
|
||||||
|
|
||||||
|
mypy:
|
||||||
|
name: Check mypy
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
needs: prepare
|
||||||
|
steps:
|
||||||
|
- name: Check out code from GitHub
|
||||||
|
uses: actions/checkout@v4.2.2
|
||||||
|
- name: Set up Python ${{ needs.prepare.outputs.python-version }}
|
||||||
|
uses: actions/setup-python@v5.6.0
|
||||||
|
id: python
|
||||||
|
with:
|
||||||
|
python-version: ${{ needs.prepare.outputs.python-version }}
|
||||||
|
- name: Generate partial mypy restore key
|
||||||
|
id: generate-mypy-key
|
||||||
|
run: |
|
||||||
|
mypy_version=$(cat requirements_test.txt | grep mypy | cut -d '=' -f 3)
|
||||||
|
echo "version=$mypy_version" >> $GITHUB_OUTPUT
|
||||||
|
echo "key=mypy-${{ env.MYPY_CACHE_VERSION }}-$mypy_version-$(date -u '+%Y-%m-%dT%H:%M:%s')" >> $GITHUB_OUTPUT
|
||||||
|
- name: Restore Python virtual environment
|
||||||
|
id: cache-venv
|
||||||
|
uses: actions/cache@v4.2.3
|
||||||
|
with:
|
||||||
|
path: venv
|
||||||
|
key: >-
|
||||||
|
${{ runner.os }}-venv-${{ needs.prepare.outputs.python-version }}-${{ hashFiles('requirements.txt') }}-${{ hashFiles('requirements_tests.txt') }}
|
||||||
|
- name: Fail job if Python cache restore failed
|
||||||
|
if: steps.cache-venv.outputs.cache-hit != 'true'
|
||||||
|
run: |
|
||||||
|
echo "Failed to restore Python virtual environment from cache"
|
||||||
|
exit 1
|
||||||
|
- name: Restore mypy cache
|
||||||
|
uses: actions/cache@v4.2.3
|
||||||
|
with:
|
||||||
|
path: .mypy_cache
|
||||||
|
key: >-
|
||||||
|
${{ runner.os }}-mypy-${{ needs.prepare.outputs.python-version }}-${{ steps.generate-mypy-key.outputs.key }}
|
||||||
|
restore-keys: >-
|
||||||
|
${{ runner.os }}-venv-${{ needs.prepare.outputs.python-version }}-mypy-${{ env.MYPY_CACHE_VERSION }}-${{ steps.generate-mypy-key.outputs.version }}
|
||||||
|
- name: Register mypy problem matcher
|
||||||
|
run: |
|
||||||
|
echo "::add-matcher::.github/workflows/matchers/mypy.json"
|
||||||
|
- name: Run mypy
|
||||||
|
run: |
|
||||||
|
. venv/bin/activate
|
||||||
|
mypy --ignore-missing-imports supervisor
|
||||||
|
|
||||||
pytest:
|
pytest:
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
needs: prepare
|
needs: prepare
|
||||||
@ -299,9 +346,9 @@ jobs:
|
|||||||
with:
|
with:
|
||||||
python-version: ${{ needs.prepare.outputs.python-version }}
|
python-version: ${{ needs.prepare.outputs.python-version }}
|
||||||
- name: Install Cosign
|
- name: Install Cosign
|
||||||
uses: sigstore/cosign-installer@v3.8.2
|
uses: sigstore/cosign-installer@v3.9.2
|
||||||
with:
|
with:
|
||||||
cosign-release: "v2.4.0"
|
cosign-release: "v2.4.3"
|
||||||
- name: Restore Python virtual environment
|
- name: Restore Python virtual environment
|
||||||
id: cache-venv
|
id: cache-venv
|
||||||
uses: actions/cache@v4.2.3
|
uses: actions/cache@v4.2.3
|
||||||
@ -378,4 +425,4 @@ jobs:
|
|||||||
coverage report
|
coverage report
|
||||||
coverage xml
|
coverage xml
|
||||||
- name: Upload coverage to Codecov
|
- name: Upload coverage to Codecov
|
||||||
uses: codecov/codecov-action@v5.4.2
|
uses: codecov/codecov-action@v5.4.3
|
||||||
|
16
.github/workflows/matchers/mypy.json
vendored
Normal file
16
.github/workflows/matchers/mypy.json
vendored
Normal file
@ -0,0 +1,16 @@
|
|||||||
|
{
|
||||||
|
"problemMatcher": [
|
||||||
|
{
|
||||||
|
"owner": "mypy",
|
||||||
|
"pattern": [
|
||||||
|
{
|
||||||
|
"regexp": "^(.+):(\\d+):\\s(error|warning):\\s(.+)$",
|
||||||
|
"file": 1,
|
||||||
|
"line": 2,
|
||||||
|
"severity": 3,
|
||||||
|
"message": 4
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
58
.github/workflows/restrict-task-creation.yml
vendored
Normal file
58
.github/workflows/restrict-task-creation.yml
vendored
Normal file
@ -0,0 +1,58 @@
|
|||||||
|
name: Restrict task creation
|
||||||
|
|
||||||
|
# yamllint disable-line rule:truthy
|
||||||
|
on:
|
||||||
|
issues:
|
||||||
|
types: [opened]
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
check-authorization:
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
# Only run if this is a Task issue type (from the issue form)
|
||||||
|
if: github.event.issue.issue_type == 'Task'
|
||||||
|
steps:
|
||||||
|
- name: Check if user is authorized
|
||||||
|
uses: actions/github-script@v7
|
||||||
|
with:
|
||||||
|
script: |
|
||||||
|
const issueAuthor = context.payload.issue.user.login;
|
||||||
|
|
||||||
|
// Check if user is an organization member
|
||||||
|
try {
|
||||||
|
await github.rest.orgs.checkMembershipForUser({
|
||||||
|
org: 'home-assistant',
|
||||||
|
username: issueAuthor
|
||||||
|
});
|
||||||
|
console.log(`✅ ${issueAuthor} is an organization member`);
|
||||||
|
return; // Authorized
|
||||||
|
} catch (error) {
|
||||||
|
console.log(`❌ ${issueAuthor} is not authorized to create Task issues`);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Close the issue with a comment
|
||||||
|
await github.rest.issues.createComment({
|
||||||
|
owner: context.repo.owner,
|
||||||
|
repo: context.repo.repo,
|
||||||
|
issue_number: context.issue.number,
|
||||||
|
body: `Hi @${issueAuthor}, thank you for your contribution!\n\n` +
|
||||||
|
`Task issues are restricted to Open Home Foundation staff and authorized contributors.\n\n` +
|
||||||
|
`If you would like to:\n` +
|
||||||
|
`- Report a bug: Please use the [bug report form](https://github.com/home-assistant/supervisor/issues/new?template=bug_report.yml)\n` +
|
||||||
|
`- Request a feature: Please submit to [Feature Requests](https://github.com/orgs/home-assistant/discussions)\n\n` +
|
||||||
|
`If you believe you should have access to create Task issues, please contact the maintainers.`
|
||||||
|
});
|
||||||
|
|
||||||
|
await github.rest.issues.update({
|
||||||
|
owner: context.repo.owner,
|
||||||
|
repo: context.repo.repo,
|
||||||
|
issue_number: context.issue.number,
|
||||||
|
state: 'closed'
|
||||||
|
});
|
||||||
|
|
||||||
|
// Add a label to indicate this was auto-closed
|
||||||
|
await github.rest.issues.addLabels({
|
||||||
|
owner: context.repo.owner,
|
||||||
|
repo: context.repo.repo,
|
||||||
|
issue_number: context.issue.number,
|
||||||
|
labels: ['auto-closed']
|
||||||
|
});
|
2
.github/workflows/sentry.yaml
vendored
2
.github/workflows/sentry.yaml
vendored
@ -12,7 +12,7 @@ jobs:
|
|||||||
- name: Check out code from GitHub
|
- name: Check out code from GitHub
|
||||||
uses: actions/checkout@v4.2.2
|
uses: actions/checkout@v4.2.2
|
||||||
- name: Sentry Release
|
- name: Sentry Release
|
||||||
uses: getsentry/action-release@v3.1.1
|
uses: getsentry/action-release@v3.2.0
|
||||||
env:
|
env:
|
||||||
SENTRY_AUTH_TOKEN: ${{ secrets.SENTRY_AUTH_TOKEN }}
|
SENTRY_AUTH_TOKEN: ${{ secrets.SENTRY_AUTH_TOKEN }}
|
||||||
SENTRY_ORG: ${{ secrets.SENTRY_ORG }}
|
SENTRY_ORG: ${{ secrets.SENTRY_ORG }}
|
||||||
|
@ -1,6 +1,6 @@
|
|||||||
repos:
|
repos:
|
||||||
- repo: https://github.com/astral-sh/ruff-pre-commit
|
- repo: https://github.com/astral-sh/ruff-pre-commit
|
||||||
rev: v0.9.1
|
rev: v0.11.10
|
||||||
hooks:
|
hooks:
|
||||||
- id: ruff
|
- id: ruff
|
||||||
args:
|
args:
|
||||||
@ -13,3 +13,15 @@ repos:
|
|||||||
- id: check-executables-have-shebangs
|
- id: check-executables-have-shebangs
|
||||||
stages: [manual]
|
stages: [manual]
|
||||||
- id: check-json
|
- id: check-json
|
||||||
|
- repo: local
|
||||||
|
hooks:
|
||||||
|
# Run mypy through our wrapper script in order to get the possible
|
||||||
|
# pyenv and/or virtualenv activated; it may not have been e.g. if
|
||||||
|
# committing from a GUI tool that was not launched from an activated
|
||||||
|
# shell.
|
||||||
|
- id: mypy
|
||||||
|
name: mypy
|
||||||
|
entry: script/run-in-env.sh mypy --ignore-missing-imports
|
||||||
|
language: script
|
||||||
|
types_or: [python, pyi]
|
||||||
|
files: ^supervisor/.+\.(py|pyi)$
|
||||||
|
@ -12,7 +12,7 @@ cosign:
|
|||||||
base_identity: https://github.com/home-assistant/docker-base/.*
|
base_identity: https://github.com/home-assistant/docker-base/.*
|
||||||
identity: https://github.com/home-assistant/supervisor/.*
|
identity: https://github.com/home-assistant/supervisor/.*
|
||||||
args:
|
args:
|
||||||
COSIGN_VERSION: 2.4.0
|
COSIGN_VERSION: 2.4.3
|
||||||
labels:
|
labels:
|
||||||
io.hass.type: supervisor
|
io.hass.type: supervisor
|
||||||
org.opencontainers.image.title: Home Assistant Supervisor
|
org.opencontainers.image.title: Home Assistant Supervisor
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
[build-system]
|
[build-system]
|
||||||
requires = ["setuptools~=80.3.1", "wheel~=0.46.1"]
|
requires = ["setuptools~=80.9.0", "wheel~=0.46.1"]
|
||||||
build-backend = "setuptools.build_meta"
|
build-backend = "setuptools.build_meta"
|
||||||
|
|
||||||
[project]
|
[project]
|
||||||
|
@ -1,30 +1,30 @@
|
|||||||
aiodns==3.3.0
|
aiodns==3.5.0
|
||||||
aiohttp==3.11.18
|
aiohttp==3.12.15
|
||||||
atomicwrites-homeassistant==1.4.1
|
atomicwrites-homeassistant==1.4.1
|
||||||
attrs==25.3.0
|
attrs==25.3.0
|
||||||
awesomeversion==24.6.0
|
awesomeversion==25.5.0
|
||||||
blockbuster==1.5.24
|
blockbuster==1.5.25
|
||||||
brotli==1.1.0
|
brotli==1.1.0
|
||||||
ciso8601==2.3.2
|
ciso8601==2.3.2
|
||||||
colorlog==6.9.0
|
colorlog==6.9.0
|
||||||
cpe==1.3.1
|
cpe==1.3.1
|
||||||
cryptography==44.0.3
|
cryptography==45.0.5
|
||||||
debugpy==1.8.14
|
debugpy==1.8.15
|
||||||
deepmerge==2.0
|
deepmerge==2.0
|
||||||
dirhash==0.5.0
|
dirhash==0.5.0
|
||||||
docker==7.1.0
|
docker==7.1.0
|
||||||
faust-cchardet==2.1.19
|
faust-cchardet==2.1.19
|
||||||
gitpython==3.1.44
|
gitpython==3.1.45
|
||||||
jinja2==3.1.6
|
jinja2==3.1.6
|
||||||
log-rate-limit==1.4.2
|
log-rate-limit==1.4.2
|
||||||
orjson==3.10.18
|
orjson==3.11.1
|
||||||
pulsectl==24.12.0
|
pulsectl==24.12.0
|
||||||
pyudev==0.24.3
|
pyudev==0.24.3
|
||||||
PyYAML==6.0.2
|
PyYAML==6.0.2
|
||||||
requests==2.32.3
|
requests==2.32.4
|
||||||
securetar==2025.2.1
|
securetar==2025.2.1
|
||||||
sentry-sdk==2.27.0
|
sentry-sdk==2.34.0
|
||||||
setuptools==80.3.1
|
setuptools==80.9.0
|
||||||
voluptuous==0.15.2
|
voluptuous==0.15.2
|
||||||
dbus-fast==2.44.1
|
dbus-fast==2.44.2
|
||||||
zlib-fast==0.2.1
|
zlib-fast==0.2.1
|
||||||
|
@ -1,12 +1,16 @@
|
|||||||
astroid==3.3.9
|
astroid==3.3.11
|
||||||
coverage==7.8.0
|
coverage==7.10.1
|
||||||
|
mypy==1.17.0
|
||||||
pre-commit==4.2.0
|
pre-commit==4.2.0
|
||||||
pylint==3.3.7
|
pylint==3.3.7
|
||||||
pytest-aiohttp==1.1.0
|
pytest-aiohttp==1.1.0
|
||||||
pytest-asyncio==0.25.2
|
pytest-asyncio==0.25.2
|
||||||
pytest-cov==6.1.1
|
pytest-cov==6.2.1
|
||||||
pytest-timeout==2.4.0
|
pytest-timeout==2.4.0
|
||||||
pytest==8.3.5
|
pytest==8.4.1
|
||||||
ruff==0.11.8
|
ruff==0.12.7
|
||||||
time-machine==2.16.0
|
time-machine==2.16.0
|
||||||
urllib3==2.4.0
|
types-docker==7.1.0.20250705
|
||||||
|
types-pyyaml==6.0.12.20250516
|
||||||
|
types-requests==2.32.4.20250611
|
||||||
|
urllib3==2.5.0
|
||||||
|
30
script/run-in-env.sh
Executable file
30
script/run-in-env.sh
Executable file
@ -0,0 +1,30 @@
|
|||||||
|
#!/usr/bin/env sh
|
||||||
|
set -eu
|
||||||
|
|
||||||
|
# Used in venv activate script.
|
||||||
|
# Would be an error if undefined.
|
||||||
|
OSTYPE="${OSTYPE-}"
|
||||||
|
|
||||||
|
# Activate pyenv and virtualenv if present, then run the specified command
|
||||||
|
|
||||||
|
# pyenv, pyenv-virtualenv
|
||||||
|
if [ -s .python-version ]; then
|
||||||
|
PYENV_VERSION=$(head -n 1 .python-version)
|
||||||
|
export PYENV_VERSION
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [ -n "${VIRTUAL_ENV-}" ] && [ -f "${VIRTUAL_ENV}/bin/activate" ]; then
|
||||||
|
. "${VIRTUAL_ENV}/bin/activate"
|
||||||
|
else
|
||||||
|
# other common virtualenvs
|
||||||
|
my_path=$(git rev-parse --show-toplevel)
|
||||||
|
|
||||||
|
for venv in venv .venv .; do
|
||||||
|
if [ -f "${my_path}/${venv}/bin/activate" ]; then
|
||||||
|
. "${my_path}/${venv}/bin/activate"
|
||||||
|
break
|
||||||
|
fi
|
||||||
|
done
|
||||||
|
fi
|
||||||
|
|
||||||
|
exec "$@"
|
@ -13,7 +13,7 @@ zlib_fast.enable()
|
|||||||
|
|
||||||
# pylint: disable=wrong-import-position
|
# pylint: disable=wrong-import-position
|
||||||
from supervisor import bootstrap # noqa: E402
|
from supervisor import bootstrap # noqa: E402
|
||||||
from supervisor.utils.blockbuster import activate_blockbuster # noqa: E402
|
from supervisor.utils.blockbuster import BlockBusterManager # noqa: E402
|
||||||
from supervisor.utils.logging import activate_log_queue_handler # noqa: E402
|
from supervisor.utils.logging import activate_log_queue_handler # noqa: E402
|
||||||
|
|
||||||
# pylint: enable=wrong-import-position
|
# pylint: enable=wrong-import-position
|
||||||
@ -55,7 +55,7 @@ if __name__ == "__main__":
|
|||||||
coresys = loop.run_until_complete(bootstrap.initialize_coresys())
|
coresys = loop.run_until_complete(bootstrap.initialize_coresys())
|
||||||
loop.set_debug(coresys.config.debug)
|
loop.set_debug(coresys.config.debug)
|
||||||
if coresys.config.detect_blocking_io:
|
if coresys.config.detect_blocking_io:
|
||||||
activate_blockbuster()
|
BlockBusterManager.activate()
|
||||||
loop.run_until_complete(coresys.core.connect())
|
loop.run_until_complete(coresys.core.connect())
|
||||||
|
|
||||||
loop.run_until_complete(bootstrap.supervisor_debugger(coresys))
|
loop.run_until_complete(bootstrap.supervisor_debugger(coresys))
|
||||||
@ -66,8 +66,15 @@ if __name__ == "__main__":
|
|||||||
_LOGGER.info("Setting up Supervisor")
|
_LOGGER.info("Setting up Supervisor")
|
||||||
loop.run_until_complete(coresys.core.setup())
|
loop.run_until_complete(coresys.core.setup())
|
||||||
|
|
||||||
loop.call_soon_threadsafe(loop.create_task, coresys.core.start())
|
bootstrap.register_signal_handlers(loop, coresys)
|
||||||
loop.call_soon_threadsafe(bootstrap.reg_signal, loop, coresys)
|
|
||||||
|
try:
|
||||||
|
loop.run_until_complete(coresys.core.start())
|
||||||
|
except Exception as err: # pylint: disable=broad-except
|
||||||
|
# Supervisor itself is running at this point, just something didn't
|
||||||
|
# start as expected. Log with traceback to get more insights for
|
||||||
|
# such cases.
|
||||||
|
_LOGGER.critical("Supervisor start failed: %s", err, exc_info=True)
|
||||||
|
|
||||||
try:
|
try:
|
||||||
_LOGGER.info("Running Supervisor")
|
_LOGGER.info("Running Supervisor")
|
||||||
|
@ -360,7 +360,7 @@ class Addon(AddonModel):
|
|||||||
@property
|
@property
|
||||||
def auto_update(self) -> bool:
|
def auto_update(self) -> bool:
|
||||||
"""Return if auto update is enable."""
|
"""Return if auto update is enable."""
|
||||||
return self.persist.get(ATTR_AUTO_UPDATE, super().auto_update)
|
return self.persist.get(ATTR_AUTO_UPDATE, False)
|
||||||
|
|
||||||
@auto_update.setter
|
@auto_update.setter
|
||||||
def auto_update(self, value: bool) -> None:
|
def auto_update(self, value: bool) -> None:
|
||||||
@ -1322,8 +1322,8 @@ class Addon(AddonModel):
|
|||||||
arcname="data",
|
arcname="data",
|
||||||
)
|
)
|
||||||
|
|
||||||
# Backup config
|
# Backup config (if used and existing, restore handles this gracefully)
|
||||||
if addon_config_used:
|
if addon_config_used and self.path_config.is_dir():
|
||||||
atomic_contents_add(
|
atomic_contents_add(
|
||||||
backup,
|
backup,
|
||||||
self.path_config,
|
self.path_config,
|
||||||
@ -1359,9 +1359,7 @@ class Addon(AddonModel):
|
|||||||
)
|
)
|
||||||
_LOGGER.info("Finish backup for addon %s", self.slug)
|
_LOGGER.info("Finish backup for addon %s", self.slug)
|
||||||
except (tarfile.TarError, OSError, AddFileError) as err:
|
except (tarfile.TarError, OSError, AddFileError) as err:
|
||||||
raise AddonsError(
|
raise AddonsError(f"Can't write tarfile: {err}", _LOGGER.error) from err
|
||||||
f"Can't write tarfile {tar_file}: {err}", _LOGGER.error
|
|
||||||
) from err
|
|
||||||
finally:
|
finally:
|
||||||
if was_running:
|
if was_running:
|
||||||
wait_for_start = await self.end_backup()
|
wait_for_start = await self.end_backup()
|
||||||
|
@ -15,6 +15,7 @@ from ..const import (
|
|||||||
ATTR_SQUASH,
|
ATTR_SQUASH,
|
||||||
FILE_SUFFIX_CONFIGURATION,
|
FILE_SUFFIX_CONFIGURATION,
|
||||||
META_ADDON,
|
META_ADDON,
|
||||||
|
SOCKET_DOCKER,
|
||||||
)
|
)
|
||||||
from ..coresys import CoreSys, CoreSysAttributes
|
from ..coresys import CoreSys, CoreSysAttributes
|
||||||
from ..docker.interface import MAP_ARCH
|
from ..docker.interface import MAP_ARCH
|
||||||
@ -121,39 +122,64 @@ class AddonBuild(FileConfiguration, CoreSysAttributes):
|
|||||||
except HassioArchNotFound:
|
except HassioArchNotFound:
|
||||||
return False
|
return False
|
||||||
|
|
||||||
def get_docker_args(self, version: AwesomeVersion, image: str | None = None):
|
def get_docker_args(
|
||||||
"""Create a dict with Docker build arguments.
|
self, version: AwesomeVersion, image_tag: str
|
||||||
|
) -> dict[str, Any]:
|
||||||
|
"""Create a dict with Docker run args."""
|
||||||
|
dockerfile_path = self.get_dockerfile().relative_to(self.addon.path_location)
|
||||||
|
|
||||||
Must be run in executor.
|
build_cmd = [
|
||||||
"""
|
"docker",
|
||||||
args: dict[str, Any] = {
|
"buildx",
|
||||||
"path": str(self.addon.path_location),
|
"build",
|
||||||
"tag": f"{image or self.addon.image}:{version!s}",
|
".",
|
||||||
"dockerfile": str(self.get_dockerfile()),
|
"--tag",
|
||||||
"pull": True,
|
image_tag,
|
||||||
"forcerm": not self.sys_dev,
|
"--file",
|
||||||
"squash": self.squash,
|
str(dockerfile_path),
|
||||||
"platform": MAP_ARCH[self.arch],
|
"--platform",
|
||||||
"labels": {
|
MAP_ARCH[self.arch],
|
||||||
"io.hass.version": version,
|
"--pull",
|
||||||
"io.hass.arch": self.arch,
|
]
|
||||||
"io.hass.type": META_ADDON,
|
|
||||||
"io.hass.name": self._fix_label("name"),
|
labels = {
|
||||||
"io.hass.description": self._fix_label("description"),
|
"io.hass.version": version,
|
||||||
**self.additional_labels,
|
"io.hass.arch": self.arch,
|
||||||
},
|
"io.hass.type": META_ADDON,
|
||||||
"buildargs": {
|
"io.hass.name": self._fix_label("name"),
|
||||||
"BUILD_FROM": self.base_image,
|
"io.hass.description": self._fix_label("description"),
|
||||||
"BUILD_VERSION": version,
|
**self.additional_labels,
|
||||||
"BUILD_ARCH": self.sys_arch.default,
|
|
||||||
**self.additional_args,
|
|
||||||
},
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if self.addon.url:
|
if self.addon.url:
|
||||||
args["labels"]["io.hass.url"] = self.addon.url
|
labels["io.hass.url"] = self.addon.url
|
||||||
|
|
||||||
return args
|
for key, value in labels.items():
|
||||||
|
build_cmd.extend(["--label", f"{key}={value}"])
|
||||||
|
|
||||||
|
build_args = {
|
||||||
|
"BUILD_FROM": self.base_image,
|
||||||
|
"BUILD_VERSION": version,
|
||||||
|
"BUILD_ARCH": self.sys_arch.default,
|
||||||
|
**self.additional_args,
|
||||||
|
}
|
||||||
|
|
||||||
|
for key, value in build_args.items():
|
||||||
|
build_cmd.extend(["--build-arg", f"{key}={value}"])
|
||||||
|
|
||||||
|
# The addon path will be mounted from the host system
|
||||||
|
addon_extern_path = self.sys_config.local_to_extern_path(
|
||||||
|
self.addon.path_location
|
||||||
|
)
|
||||||
|
|
||||||
|
return {
|
||||||
|
"command": build_cmd,
|
||||||
|
"volumes": {
|
||||||
|
SOCKET_DOCKER: {"bind": "/var/run/docker.sock", "mode": "rw"},
|
||||||
|
addon_extern_path: {"bind": "/addon", "mode": "ro"},
|
||||||
|
},
|
||||||
|
"working_dir": "/addon",
|
||||||
|
}
|
||||||
|
|
||||||
def _fix_label(self, label_name: str) -> str:
|
def _fix_label(self, label_name: str) -> str:
|
||||||
"""Remove characters they are not supported."""
|
"""Remove characters they are not supported."""
|
||||||
|
@ -67,6 +67,10 @@ class AddonManager(CoreSysAttributes):
|
|||||||
return self.store.get(addon_slug)
|
return self.store.get(addon_slug)
|
||||||
return None
|
return None
|
||||||
|
|
||||||
|
def get_local_only(self, addon_slug: str) -> Addon | None:
|
||||||
|
"""Return an installed add-on from slug."""
|
||||||
|
return self.local.get(addon_slug)
|
||||||
|
|
||||||
def from_token(self, token: str) -> Addon | None:
|
def from_token(self, token: str) -> Addon | None:
|
||||||
"""Return an add-on from Supervisor token."""
|
"""Return an add-on from Supervisor token."""
|
||||||
for addon in self.installed:
|
for addon in self.installed:
|
||||||
@ -262,7 +266,7 @@ class AddonManager(CoreSysAttributes):
|
|||||||
],
|
],
|
||||||
on_condition=AddonsJobError,
|
on_condition=AddonsJobError,
|
||||||
)
|
)
|
||||||
async def rebuild(self, slug: str) -> asyncio.Task | None:
|
async def rebuild(self, slug: str, *, force: bool = False) -> asyncio.Task | None:
|
||||||
"""Perform a rebuild of local build add-on.
|
"""Perform a rebuild of local build add-on.
|
||||||
|
|
||||||
Returns a Task that completes when addon has state 'started' (see addon.start)
|
Returns a Task that completes when addon has state 'started' (see addon.start)
|
||||||
@ -285,7 +289,7 @@ class AddonManager(CoreSysAttributes):
|
|||||||
raise AddonsError(
|
raise AddonsError(
|
||||||
"Version changed, use Update instead Rebuild", _LOGGER.error
|
"Version changed, use Update instead Rebuild", _LOGGER.error
|
||||||
)
|
)
|
||||||
if not addon.need_build:
|
if not force and not addon.need_build:
|
||||||
raise AddonsNotSupportedError(
|
raise AddonsNotSupportedError(
|
||||||
"Can't rebuild a image based add-on", _LOGGER.error
|
"Can't rebuild a image based add-on", _LOGGER.error
|
||||||
)
|
)
|
||||||
|
@ -664,12 +664,16 @@ class AddonModel(JobGroup, ABC):
|
|||||||
"""Validate if addon is available for current system."""
|
"""Validate if addon is available for current system."""
|
||||||
return self._validate_availability(self.data, logger=_LOGGER.error)
|
return self._validate_availability(self.data, logger=_LOGGER.error)
|
||||||
|
|
||||||
def __eq__(self, other):
|
def __eq__(self, other: Any) -> bool:
|
||||||
"""Compaired add-on objects."""
|
"""Compare add-on objects."""
|
||||||
if not isinstance(other, AddonModel):
|
if not isinstance(other, AddonModel):
|
||||||
return False
|
return False
|
||||||
return self.slug == other.slug
|
return self.slug == other.slug
|
||||||
|
|
||||||
|
def __hash__(self) -> int:
|
||||||
|
"""Hash for add-on objects."""
|
||||||
|
return hash(self.slug)
|
||||||
|
|
||||||
def _validate_availability(
|
def _validate_availability(
|
||||||
self, config, *, logger: Callable[..., None] | None = None
|
self, config, *, logger: Callable[..., None] | None = None
|
||||||
) -> None:
|
) -> None:
|
||||||
|
@ -8,7 +8,7 @@ from typing import Any
|
|||||||
|
|
||||||
from aiohttp import hdrs, web
|
from aiohttp import hdrs, web
|
||||||
|
|
||||||
from ..const import AddonState
|
from ..const import SUPERVISOR_DOCKER_NAME, AddonState
|
||||||
from ..coresys import CoreSys, CoreSysAttributes
|
from ..coresys import CoreSys, CoreSysAttributes
|
||||||
from ..exceptions import APIAddonNotInstalled, HostNotSupportedError
|
from ..exceptions import APIAddonNotInstalled, HostNotSupportedError
|
||||||
from ..utils.sentry import async_capture_exception
|
from ..utils.sentry import async_capture_exception
|
||||||
@ -426,7 +426,7 @@ class RestAPI(CoreSysAttributes):
|
|||||||
async def get_supervisor_logs(*args, **kwargs):
|
async def get_supervisor_logs(*args, **kwargs):
|
||||||
try:
|
try:
|
||||||
return await self._api_host.advanced_logs_handler(
|
return await self._api_host.advanced_logs_handler(
|
||||||
*args, identifier="hassio_supervisor", **kwargs
|
*args, identifier=SUPERVISOR_DOCKER_NAME, **kwargs
|
||||||
)
|
)
|
||||||
except Exception as err: # pylint: disable=broad-exception-caught
|
except Exception as err: # pylint: disable=broad-exception-caught
|
||||||
# Supervisor logs are critical, so catch everything, log the exception
|
# Supervisor logs are critical, so catch everything, log the exception
|
||||||
@ -789,6 +789,7 @@ class RestAPI(CoreSysAttributes):
|
|||||||
self.webapp.add_routes(
|
self.webapp.add_routes(
|
||||||
[
|
[
|
||||||
web.get("/docker/info", api_docker.info),
|
web.get("/docker/info", api_docker.info),
|
||||||
|
web.post("/docker/options", api_docker.options),
|
||||||
web.get("/docker/registries", api_docker.registries),
|
web.get("/docker/registries", api_docker.registries),
|
||||||
web.post("/docker/registries", api_docker.create_registry),
|
web.post("/docker/registries", api_docker.create_registry),
|
||||||
web.delete("/docker/registries/{hostname}", api_docker.remove_registry),
|
web.delete("/docker/registries/{hostname}", api_docker.remove_registry),
|
||||||
|
@ -36,6 +36,7 @@ from ..const import (
|
|||||||
ATTR_DNS,
|
ATTR_DNS,
|
||||||
ATTR_DOCKER_API,
|
ATTR_DOCKER_API,
|
||||||
ATTR_DOCUMENTATION,
|
ATTR_DOCUMENTATION,
|
||||||
|
ATTR_FORCE,
|
||||||
ATTR_FULL_ACCESS,
|
ATTR_FULL_ACCESS,
|
||||||
ATTR_GPIO,
|
ATTR_GPIO,
|
||||||
ATTR_HASSIO_API,
|
ATTR_HASSIO_API,
|
||||||
@ -139,6 +140,8 @@ SCHEMA_SECURITY = vol.Schema({vol.Optional(ATTR_PROTECTED): vol.Boolean()})
|
|||||||
SCHEMA_UNINSTALL = vol.Schema(
|
SCHEMA_UNINSTALL = vol.Schema(
|
||||||
{vol.Optional(ATTR_REMOVE_CONFIG, default=False): vol.Boolean()}
|
{vol.Optional(ATTR_REMOVE_CONFIG, default=False): vol.Boolean()}
|
||||||
)
|
)
|
||||||
|
|
||||||
|
SCHEMA_REBUILD = vol.Schema({vol.Optional(ATTR_FORCE, default=False): vol.Boolean()})
|
||||||
# pylint: enable=no-value-for-parameter
|
# pylint: enable=no-value-for-parameter
|
||||||
|
|
||||||
|
|
||||||
@ -461,7 +464,11 @@ class APIAddons(CoreSysAttributes):
|
|||||||
async def rebuild(self, request: web.Request) -> None:
|
async def rebuild(self, request: web.Request) -> None:
|
||||||
"""Rebuild local build add-on."""
|
"""Rebuild local build add-on."""
|
||||||
addon = self.get_addon_for_request(request)
|
addon = self.get_addon_for_request(request)
|
||||||
if start_task := await asyncio.shield(self.sys_addons.rebuild(addon.slug)):
|
body: dict[str, Any] = await api_validate(SCHEMA_REBUILD, request)
|
||||||
|
|
||||||
|
if start_task := await asyncio.shield(
|
||||||
|
self.sys_addons.rebuild(addon.slug, force=body[ATTR_FORCE])
|
||||||
|
):
|
||||||
await start_task
|
await start_task
|
||||||
|
|
||||||
@api_process
|
@api_process
|
||||||
|
@ -3,11 +3,13 @@
|
|||||||
import asyncio
|
import asyncio
|
||||||
from collections.abc import Awaitable
|
from collections.abc import Awaitable
|
||||||
import logging
|
import logging
|
||||||
from typing import Any
|
from typing import Any, cast
|
||||||
|
|
||||||
from aiohttp import BasicAuth, web
|
from aiohttp import BasicAuth, web
|
||||||
from aiohttp.hdrs import AUTHORIZATION, CONTENT_TYPE, WWW_AUTHENTICATE
|
from aiohttp.hdrs import AUTHORIZATION, CONTENT_TYPE, WWW_AUTHENTICATE
|
||||||
|
from aiohttp.web import FileField
|
||||||
from aiohttp.web_exceptions import HTTPUnauthorized
|
from aiohttp.web_exceptions import HTTPUnauthorized
|
||||||
|
from multidict import MultiDictProxy
|
||||||
import voluptuous as vol
|
import voluptuous as vol
|
||||||
|
|
||||||
from ..addons.addon import Addon
|
from ..addons.addon import Addon
|
||||||
@ -51,7 +53,10 @@ class APIAuth(CoreSysAttributes):
|
|||||||
return self.sys_auth.check_login(addon, auth.login, auth.password)
|
return self.sys_auth.check_login(addon, auth.login, auth.password)
|
||||||
|
|
||||||
def _process_dict(
|
def _process_dict(
|
||||||
self, request: web.Request, addon: Addon, data: dict[str, str]
|
self,
|
||||||
|
request: web.Request,
|
||||||
|
addon: Addon,
|
||||||
|
data: dict[str, Any] | MultiDictProxy[str | bytes | FileField],
|
||||||
) -> Awaitable[bool]:
|
) -> Awaitable[bool]:
|
||||||
"""Process login with dict data.
|
"""Process login with dict data.
|
||||||
|
|
||||||
@ -60,7 +65,15 @@ class APIAuth(CoreSysAttributes):
|
|||||||
username = data.get("username") or data.get("user")
|
username = data.get("username") or data.get("user")
|
||||||
password = data.get("password")
|
password = data.get("password")
|
||||||
|
|
||||||
return self.sys_auth.check_login(addon, username, password)
|
# Test that we did receive strings and not something else, raise if so
|
||||||
|
try:
|
||||||
|
_ = username.encode and password.encode # type: ignore
|
||||||
|
except AttributeError:
|
||||||
|
raise HTTPUnauthorized(headers=REALM_HEADER) from None
|
||||||
|
|
||||||
|
return self.sys_auth.check_login(
|
||||||
|
addon, cast(str, username), cast(str, password)
|
||||||
|
)
|
||||||
|
|
||||||
@api_process
|
@api_process
|
||||||
async def auth(self, request: web.Request) -> bool:
|
async def auth(self, request: web.Request) -> bool:
|
||||||
@ -79,13 +92,18 @@ class APIAuth(CoreSysAttributes):
|
|||||||
# Json
|
# Json
|
||||||
if request.headers.get(CONTENT_TYPE) == CONTENT_TYPE_JSON:
|
if request.headers.get(CONTENT_TYPE) == CONTENT_TYPE_JSON:
|
||||||
data = await request.json(loads=json_loads)
|
data = await request.json(loads=json_loads)
|
||||||
return await self._process_dict(request, addon, data)
|
if not await self._process_dict(request, addon, data):
|
||||||
|
raise HTTPUnauthorized()
|
||||||
|
return True
|
||||||
|
|
||||||
# URL encoded
|
# URL encoded
|
||||||
if request.headers.get(CONTENT_TYPE) == CONTENT_TYPE_URL:
|
if request.headers.get(CONTENT_TYPE) == CONTENT_TYPE_URL:
|
||||||
data = await request.post()
|
data = await request.post()
|
||||||
return await self._process_dict(request, addon, data)
|
if not await self._process_dict(request, addon, data):
|
||||||
|
raise HTTPUnauthorized()
|
||||||
|
return True
|
||||||
|
|
||||||
|
# Advertise Basic authentication by default
|
||||||
raise HTTPUnauthorized(headers=REALM_HEADER)
|
raise HTTPUnauthorized(headers=REALM_HEADER)
|
||||||
|
|
||||||
@api_process
|
@api_process
|
||||||
|
@ -87,4 +87,4 @@ class DetectBlockingIO(StrEnum):
|
|||||||
|
|
||||||
OFF = "off"
|
OFF = "off"
|
||||||
ON = "on"
|
ON = "on"
|
||||||
ON_AT_STARTUP = "on_at_startup"
|
ON_AT_STARTUP = "on-at-startup"
|
||||||
|
@ -1,7 +1,7 @@
|
|||||||
"""Init file for Supervisor network RESTful API."""
|
"""Init file for Supervisor network RESTful API."""
|
||||||
|
|
||||||
import logging
|
import logging
|
||||||
from typing import Any, cast
|
from typing import Any
|
||||||
|
|
||||||
from aiohttp import web
|
from aiohttp import web
|
||||||
import voluptuous as vol
|
import voluptuous as vol
|
||||||
@ -56,8 +56,8 @@ class APIDiscovery(CoreSysAttributes):
|
|||||||
}
|
}
|
||||||
for message in self.sys_discovery.list_messages
|
for message in self.sys_discovery.list_messages
|
||||||
if (
|
if (
|
||||||
discovered := cast(
|
discovered := self.sys_addons.get_local_only(
|
||||||
Addon, self.sys_addons.get(message.addon, local_only=True)
|
message.addon,
|
||||||
)
|
)
|
||||||
)
|
)
|
||||||
and discovered.state == AddonState.STARTED
|
and discovered.state == AddonState.STARTED
|
||||||
|
@ -6,7 +6,10 @@ from typing import Any
|
|||||||
from aiohttp import web
|
from aiohttp import web
|
||||||
import voluptuous as vol
|
import voluptuous as vol
|
||||||
|
|
||||||
|
from supervisor.resolution.const import ContextType, IssueType, SuggestionType
|
||||||
|
|
||||||
from ..const import (
|
from ..const import (
|
||||||
|
ATTR_ENABLE_IPV6,
|
||||||
ATTR_HOSTNAME,
|
ATTR_HOSTNAME,
|
||||||
ATTR_LOGGING,
|
ATTR_LOGGING,
|
||||||
ATTR_PASSWORD,
|
ATTR_PASSWORD,
|
||||||
@ -30,10 +33,48 @@ SCHEMA_DOCKER_REGISTRY = vol.Schema(
|
|||||||
}
|
}
|
||||||
)
|
)
|
||||||
|
|
||||||
|
# pylint: disable=no-value-for-parameter
|
||||||
|
SCHEMA_OPTIONS = vol.Schema({vol.Optional(ATTR_ENABLE_IPV6): vol.Maybe(vol.Boolean())})
|
||||||
|
|
||||||
|
|
||||||
class APIDocker(CoreSysAttributes):
|
class APIDocker(CoreSysAttributes):
|
||||||
"""Handle RESTful API for Docker configuration."""
|
"""Handle RESTful API for Docker configuration."""
|
||||||
|
|
||||||
|
@api_process
|
||||||
|
async def info(self, request: web.Request):
|
||||||
|
"""Get docker info."""
|
||||||
|
data_registries = {}
|
||||||
|
for hostname, registry in self.sys_docker.config.registries.items():
|
||||||
|
data_registries[hostname] = {
|
||||||
|
ATTR_USERNAME: registry[ATTR_USERNAME],
|
||||||
|
}
|
||||||
|
return {
|
||||||
|
ATTR_VERSION: self.sys_docker.info.version,
|
||||||
|
ATTR_ENABLE_IPV6: self.sys_docker.config.enable_ipv6,
|
||||||
|
ATTR_STORAGE: self.sys_docker.info.storage,
|
||||||
|
ATTR_LOGGING: self.sys_docker.info.logging,
|
||||||
|
ATTR_REGISTRIES: data_registries,
|
||||||
|
}
|
||||||
|
|
||||||
|
@api_process
|
||||||
|
async def options(self, request: web.Request) -> None:
|
||||||
|
"""Set docker options."""
|
||||||
|
body = await api_validate(SCHEMA_OPTIONS, request)
|
||||||
|
|
||||||
|
if (
|
||||||
|
ATTR_ENABLE_IPV6 in body
|
||||||
|
and self.sys_docker.config.enable_ipv6 != body[ATTR_ENABLE_IPV6]
|
||||||
|
):
|
||||||
|
self.sys_docker.config.enable_ipv6 = body[ATTR_ENABLE_IPV6]
|
||||||
|
_LOGGER.info("Host system reboot required to apply new IPv6 configuration")
|
||||||
|
self.sys_resolution.create_issue(
|
||||||
|
IssueType.REBOOT_REQUIRED,
|
||||||
|
ContextType.SYSTEM,
|
||||||
|
suggestions=[SuggestionType.EXECUTE_REBOOT],
|
||||||
|
)
|
||||||
|
|
||||||
|
await self.sys_docker.config.save_data()
|
||||||
|
|
||||||
@api_process
|
@api_process
|
||||||
async def registries(self, request) -> dict[str, Any]:
|
async def registries(self, request) -> dict[str, Any]:
|
||||||
"""Return the list of registries."""
|
"""Return the list of registries."""
|
||||||
@ -64,18 +105,3 @@ class APIDocker(CoreSysAttributes):
|
|||||||
|
|
||||||
del self.sys_docker.config.registries[hostname]
|
del self.sys_docker.config.registries[hostname]
|
||||||
await self.sys_docker.config.save_data()
|
await self.sys_docker.config.save_data()
|
||||||
|
|
||||||
@api_process
|
|
||||||
async def info(self, request: web.Request):
|
|
||||||
"""Get docker info."""
|
|
||||||
data_registries = {}
|
|
||||||
for hostname, registry in self.sys_docker.config.registries.items():
|
|
||||||
data_registries[hostname] = {
|
|
||||||
ATTR_USERNAME: registry[ATTR_USERNAME],
|
|
||||||
}
|
|
||||||
return {
|
|
||||||
ATTR_VERSION: self.sys_docker.info.version,
|
|
||||||
ATTR_STORAGE: self.sys_docker.info.storage,
|
|
||||||
ATTR_LOGGING: self.sys_docker.info.logging,
|
|
||||||
ATTR_REGISTRIES: data_registries,
|
|
||||||
}
|
|
||||||
|
@ -269,6 +269,13 @@ class APIHost(CoreSysAttributes):
|
|||||||
err,
|
err,
|
||||||
)
|
)
|
||||||
break
|
break
|
||||||
|
except ConnectionError as err:
|
||||||
|
_LOGGER.warning(
|
||||||
|
"%s raised when returning journal logs: %s",
|
||||||
|
type(err).__name__,
|
||||||
|
err,
|
||||||
|
)
|
||||||
|
break
|
||||||
except (ConnectionResetError, ClientPayloadError) as ex:
|
except (ConnectionResetError, ClientPayloadError) as ex:
|
||||||
# ClientPayloadError is most likely caused by the closing the connection
|
# ClientPayloadError is most likely caused by the closing the connection
|
||||||
raise APIError(
|
raise APIError(
|
||||||
|
@ -309,9 +309,9 @@ class APIIngress(CoreSysAttributes):
|
|||||||
|
|
||||||
def _init_header(
|
def _init_header(
|
||||||
request: web.Request, addon: Addon, session_data: IngressSessionData | None
|
request: web.Request, addon: Addon, session_data: IngressSessionData | None
|
||||||
) -> CIMultiDict | dict[str, str]:
|
) -> CIMultiDict[str]:
|
||||||
"""Create initial header."""
|
"""Create initial header."""
|
||||||
headers = {}
|
headers = CIMultiDict[str]()
|
||||||
|
|
||||||
if session_data is not None:
|
if session_data is not None:
|
||||||
headers[HEADER_REMOTE_USER_ID] = session_data.user.id
|
headers[HEADER_REMOTE_USER_ID] = session_data.user.id
|
||||||
@ -337,7 +337,7 @@ def _init_header(
|
|||||||
istr(HEADER_REMOTE_USER_DISPLAY_NAME),
|
istr(HEADER_REMOTE_USER_DISPLAY_NAME),
|
||||||
):
|
):
|
||||||
continue
|
continue
|
||||||
headers[name] = value
|
headers.add(name, value)
|
||||||
|
|
||||||
# Update X-Forwarded-For
|
# Update X-Forwarded-For
|
||||||
if request.transport:
|
if request.transport:
|
||||||
@ -348,9 +348,9 @@ def _init_header(
|
|||||||
return headers
|
return headers
|
||||||
|
|
||||||
|
|
||||||
def _response_header(response: aiohttp.ClientResponse) -> dict[str, str]:
|
def _response_header(response: aiohttp.ClientResponse) -> CIMultiDict[str]:
|
||||||
"""Create response header."""
|
"""Create response header."""
|
||||||
headers = {}
|
headers = CIMultiDict[str]()
|
||||||
|
|
||||||
for name, value in response.headers.items():
|
for name, value in response.headers.items():
|
||||||
if name in (
|
if name in (
|
||||||
@ -360,7 +360,7 @@ def _response_header(response: aiohttp.ClientResponse) -> dict[str, str]:
|
|||||||
hdrs.CONTENT_ENCODING,
|
hdrs.CONTENT_ENCODING,
|
||||||
):
|
):
|
||||||
continue
|
continue
|
||||||
headers[name] = value
|
headers.add(name, value)
|
||||||
|
|
||||||
return headers
|
return headers
|
||||||
|
|
||||||
|
@ -10,6 +10,7 @@ import voluptuous as vol
|
|||||||
|
|
||||||
from ..const import (
|
from ..const import (
|
||||||
ATTR_ACCESSPOINTS,
|
ATTR_ACCESSPOINTS,
|
||||||
|
ATTR_ADDR_GEN_MODE,
|
||||||
ATTR_ADDRESS,
|
ATTR_ADDRESS,
|
||||||
ATTR_AUTH,
|
ATTR_AUTH,
|
||||||
ATTR_CONNECTED,
|
ATTR_CONNECTED,
|
||||||
@ -22,6 +23,7 @@ from ..const import (
|
|||||||
ATTR_ID,
|
ATTR_ID,
|
||||||
ATTR_INTERFACE,
|
ATTR_INTERFACE,
|
||||||
ATTR_INTERFACES,
|
ATTR_INTERFACES,
|
||||||
|
ATTR_IP6_PRIVACY,
|
||||||
ATTR_IPV4,
|
ATTR_IPV4,
|
||||||
ATTR_IPV6,
|
ATTR_IPV6,
|
||||||
ATTR_MAC,
|
ATTR_MAC,
|
||||||
@ -38,15 +40,18 @@ from ..const import (
|
|||||||
ATTR_TYPE,
|
ATTR_TYPE,
|
||||||
ATTR_VLAN,
|
ATTR_VLAN,
|
||||||
ATTR_WIFI,
|
ATTR_WIFI,
|
||||||
|
DOCKER_IPV4_NETWORK_MASK,
|
||||||
DOCKER_NETWORK,
|
DOCKER_NETWORK,
|
||||||
DOCKER_NETWORK_MASK,
|
|
||||||
)
|
)
|
||||||
from ..coresys import CoreSysAttributes
|
from ..coresys import CoreSysAttributes
|
||||||
from ..exceptions import APIError, APINotFound, HostNetworkNotFound
|
from ..exceptions import APIError, APINotFound, HostNetworkNotFound
|
||||||
from ..host.configuration import (
|
from ..host.configuration import (
|
||||||
AccessPoint,
|
AccessPoint,
|
||||||
Interface,
|
Interface,
|
||||||
|
InterfaceAddrGenMode,
|
||||||
|
InterfaceIp6Privacy,
|
||||||
InterfaceMethod,
|
InterfaceMethod,
|
||||||
|
Ip6Setting,
|
||||||
IpConfig,
|
IpConfig,
|
||||||
IpSetting,
|
IpSetting,
|
||||||
VlanConfig,
|
VlanConfig,
|
||||||
@ -68,6 +73,8 @@ _SCHEMA_IPV6_CONFIG = vol.Schema(
|
|||||||
{
|
{
|
||||||
vol.Optional(ATTR_ADDRESS): [vol.Coerce(IPv6Interface)],
|
vol.Optional(ATTR_ADDRESS): [vol.Coerce(IPv6Interface)],
|
||||||
vol.Optional(ATTR_METHOD): vol.Coerce(InterfaceMethod),
|
vol.Optional(ATTR_METHOD): vol.Coerce(InterfaceMethod),
|
||||||
|
vol.Optional(ATTR_ADDR_GEN_MODE): vol.Coerce(InterfaceAddrGenMode),
|
||||||
|
vol.Optional(ATTR_IP6_PRIVACY): vol.Coerce(InterfaceIp6Privacy),
|
||||||
vol.Optional(ATTR_GATEWAY): vol.Coerce(IPv6Address),
|
vol.Optional(ATTR_GATEWAY): vol.Coerce(IPv6Address),
|
||||||
vol.Optional(ATTR_NAMESERVERS): [vol.Coerce(IPv6Address)],
|
vol.Optional(ATTR_NAMESERVERS): [vol.Coerce(IPv6Address)],
|
||||||
}
|
}
|
||||||
@ -94,8 +101,8 @@ SCHEMA_UPDATE = vol.Schema(
|
|||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
def ipconfig_struct(config: IpConfig, setting: IpSetting) -> dict[str, Any]:
|
def ip4config_struct(config: IpConfig, setting: IpSetting) -> dict[str, Any]:
|
||||||
"""Return a dict with information about ip configuration."""
|
"""Return a dict with information about IPv4 configuration."""
|
||||||
return {
|
return {
|
||||||
ATTR_METHOD: setting.method,
|
ATTR_METHOD: setting.method,
|
||||||
ATTR_ADDRESS: [address.with_prefixlen for address in config.address],
|
ATTR_ADDRESS: [address.with_prefixlen for address in config.address],
|
||||||
@ -105,6 +112,19 @@ def ipconfig_struct(config: IpConfig, setting: IpSetting) -> dict[str, Any]:
|
|||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
def ip6config_struct(config: IpConfig, setting: Ip6Setting) -> dict[str, Any]:
|
||||||
|
"""Return a dict with information about IPv6 configuration."""
|
||||||
|
return {
|
||||||
|
ATTR_METHOD: setting.method,
|
||||||
|
ATTR_ADDR_GEN_MODE: setting.addr_gen_mode,
|
||||||
|
ATTR_IP6_PRIVACY: setting.ip6_privacy,
|
||||||
|
ATTR_ADDRESS: [address.with_prefixlen for address in config.address],
|
||||||
|
ATTR_NAMESERVERS: [str(address) for address in config.nameservers],
|
||||||
|
ATTR_GATEWAY: str(config.gateway) if config.gateway else None,
|
||||||
|
ATTR_READY: config.ready,
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
def wifi_struct(config: WifiConfig) -> dict[str, Any]:
|
def wifi_struct(config: WifiConfig) -> dict[str, Any]:
|
||||||
"""Return a dict with information about wifi configuration."""
|
"""Return a dict with information about wifi configuration."""
|
||||||
return {
|
return {
|
||||||
@ -132,10 +152,10 @@ def interface_struct(interface: Interface) -> dict[str, Any]:
|
|||||||
ATTR_CONNECTED: interface.connected,
|
ATTR_CONNECTED: interface.connected,
|
||||||
ATTR_PRIMARY: interface.primary,
|
ATTR_PRIMARY: interface.primary,
|
||||||
ATTR_MAC: interface.mac,
|
ATTR_MAC: interface.mac,
|
||||||
ATTR_IPV4: ipconfig_struct(interface.ipv4, interface.ipv4setting)
|
ATTR_IPV4: ip4config_struct(interface.ipv4, interface.ipv4setting)
|
||||||
if interface.ipv4 and interface.ipv4setting
|
if interface.ipv4 and interface.ipv4setting
|
||||||
else None,
|
else None,
|
||||||
ATTR_IPV6: ipconfig_struct(interface.ipv6, interface.ipv6setting)
|
ATTR_IPV6: ip6config_struct(interface.ipv6, interface.ipv6setting)
|
||||||
if interface.ipv6 and interface.ipv6setting
|
if interface.ipv6 and interface.ipv6setting
|
||||||
else None,
|
else None,
|
||||||
ATTR_WIFI: wifi_struct(interface.wifi) if interface.wifi else None,
|
ATTR_WIFI: wifi_struct(interface.wifi) if interface.wifi else None,
|
||||||
@ -183,7 +203,7 @@ class APINetwork(CoreSysAttributes):
|
|||||||
],
|
],
|
||||||
ATTR_DOCKER: {
|
ATTR_DOCKER: {
|
||||||
ATTR_INTERFACE: DOCKER_NETWORK,
|
ATTR_INTERFACE: DOCKER_NETWORK,
|
||||||
ATTR_ADDRESS: str(DOCKER_NETWORK_MASK),
|
ATTR_ADDRESS: str(DOCKER_IPV4_NETWORK_MASK),
|
||||||
ATTR_GATEWAY: str(self.sys_docker.network.gateway),
|
ATTR_GATEWAY: str(self.sys_docker.network.gateway),
|
||||||
ATTR_DNS: str(self.sys_docker.network.dns),
|
ATTR_DNS: str(self.sys_docker.network.dns),
|
||||||
},
|
},
|
||||||
@ -212,25 +232,31 @@ class APINetwork(CoreSysAttributes):
|
|||||||
for key, config in body.items():
|
for key, config in body.items():
|
||||||
if key == ATTR_IPV4:
|
if key == ATTR_IPV4:
|
||||||
interface.ipv4setting = IpSetting(
|
interface.ipv4setting = IpSetting(
|
||||||
config.get(ATTR_METHOD, InterfaceMethod.STATIC),
|
method=config.get(ATTR_METHOD, InterfaceMethod.STATIC),
|
||||||
config.get(ATTR_ADDRESS, []),
|
address=config.get(ATTR_ADDRESS, []),
|
||||||
config.get(ATTR_GATEWAY),
|
gateway=config.get(ATTR_GATEWAY),
|
||||||
config.get(ATTR_NAMESERVERS, []),
|
nameservers=config.get(ATTR_NAMESERVERS, []),
|
||||||
)
|
)
|
||||||
elif key == ATTR_IPV6:
|
elif key == ATTR_IPV6:
|
||||||
interface.ipv6setting = IpSetting(
|
interface.ipv6setting = Ip6Setting(
|
||||||
config.get(ATTR_METHOD, InterfaceMethod.STATIC),
|
method=config.get(ATTR_METHOD, InterfaceMethod.STATIC),
|
||||||
config.get(ATTR_ADDRESS, []),
|
addr_gen_mode=config.get(
|
||||||
config.get(ATTR_GATEWAY),
|
ATTR_ADDR_GEN_MODE, InterfaceAddrGenMode.DEFAULT
|
||||||
config.get(ATTR_NAMESERVERS, []),
|
),
|
||||||
|
ip6_privacy=config.get(
|
||||||
|
ATTR_IP6_PRIVACY, InterfaceIp6Privacy.DEFAULT
|
||||||
|
),
|
||||||
|
address=config.get(ATTR_ADDRESS, []),
|
||||||
|
gateway=config.get(ATTR_GATEWAY),
|
||||||
|
nameservers=config.get(ATTR_NAMESERVERS, []),
|
||||||
)
|
)
|
||||||
elif key == ATTR_WIFI:
|
elif key == ATTR_WIFI:
|
||||||
interface.wifi = WifiConfig(
|
interface.wifi = WifiConfig(
|
||||||
config.get(ATTR_MODE, WifiMode.INFRASTRUCTURE),
|
mode=config.get(ATTR_MODE, WifiMode.INFRASTRUCTURE),
|
||||||
config.get(ATTR_SSID, ""),
|
ssid=config.get(ATTR_SSID, ""),
|
||||||
config.get(ATTR_AUTH, AuthMethod.OPEN),
|
auth=config.get(ATTR_AUTH, AuthMethod.OPEN),
|
||||||
config.get(ATTR_PSK, None),
|
psk=config.get(ATTR_PSK, None),
|
||||||
None,
|
signal=None,
|
||||||
)
|
)
|
||||||
elif key == ATTR_ENABLED:
|
elif key == ATTR_ENABLED:
|
||||||
interface.enabled = config
|
interface.enabled = config
|
||||||
@ -277,19 +303,25 @@ class APINetwork(CoreSysAttributes):
|
|||||||
ipv4_setting = None
|
ipv4_setting = None
|
||||||
if ATTR_IPV4 in body:
|
if ATTR_IPV4 in body:
|
||||||
ipv4_setting = IpSetting(
|
ipv4_setting = IpSetting(
|
||||||
body[ATTR_IPV4].get(ATTR_METHOD, InterfaceMethod.AUTO),
|
method=body[ATTR_IPV4].get(ATTR_METHOD, InterfaceMethod.AUTO),
|
||||||
body[ATTR_IPV4].get(ATTR_ADDRESS, []),
|
address=body[ATTR_IPV4].get(ATTR_ADDRESS, []),
|
||||||
body[ATTR_IPV4].get(ATTR_GATEWAY, None),
|
gateway=body[ATTR_IPV4].get(ATTR_GATEWAY, None),
|
||||||
body[ATTR_IPV4].get(ATTR_NAMESERVERS, []),
|
nameservers=body[ATTR_IPV4].get(ATTR_NAMESERVERS, []),
|
||||||
)
|
)
|
||||||
|
|
||||||
ipv6_setting = None
|
ipv6_setting = None
|
||||||
if ATTR_IPV6 in body:
|
if ATTR_IPV6 in body:
|
||||||
ipv6_setting = IpSetting(
|
ipv6_setting = Ip6Setting(
|
||||||
body[ATTR_IPV6].get(ATTR_METHOD, InterfaceMethod.AUTO),
|
method=body[ATTR_IPV6].get(ATTR_METHOD, InterfaceMethod.AUTO),
|
||||||
body[ATTR_IPV6].get(ATTR_ADDRESS, []),
|
addr_gen_mode=body[ATTR_IPV6].get(
|
||||||
body[ATTR_IPV6].get(ATTR_GATEWAY, None),
|
ATTR_ADDR_GEN_MODE, InterfaceAddrGenMode.DEFAULT
|
||||||
body[ATTR_IPV6].get(ATTR_NAMESERVERS, []),
|
),
|
||||||
|
ip6_privacy=body[ATTR_IPV6].get(
|
||||||
|
ATTR_IP6_PRIVACY, InterfaceIp6Privacy.DEFAULT
|
||||||
|
),
|
||||||
|
address=body[ATTR_IPV6].get(ATTR_ADDRESS, []),
|
||||||
|
gateway=body[ATTR_IPV6].get(ATTR_GATEWAY, None),
|
||||||
|
nameservers=body[ATTR_IPV6].get(ATTR_NAMESERVERS, []),
|
||||||
)
|
)
|
||||||
|
|
||||||
vlan_interface = Interface(
|
vlan_interface = Interface(
|
||||||
|
@ -17,6 +17,7 @@ from ..const import (
|
|||||||
ATTR_ICON,
|
ATTR_ICON,
|
||||||
ATTR_LOGGING,
|
ATTR_LOGGING,
|
||||||
ATTR_MACHINE,
|
ATTR_MACHINE,
|
||||||
|
ATTR_MACHINE_ID,
|
||||||
ATTR_NAME,
|
ATTR_NAME,
|
||||||
ATTR_OPERATING_SYSTEM,
|
ATTR_OPERATING_SYSTEM,
|
||||||
ATTR_STATE,
|
ATTR_STATE,
|
||||||
@ -48,6 +49,7 @@ class APIRoot(CoreSysAttributes):
|
|||||||
ATTR_OPERATING_SYSTEM: self.sys_host.info.operating_system,
|
ATTR_OPERATING_SYSTEM: self.sys_host.info.operating_system,
|
||||||
ATTR_FEATURES: self.sys_host.features,
|
ATTR_FEATURES: self.sys_host.features,
|
||||||
ATTR_MACHINE: self.sys_machine,
|
ATTR_MACHINE: self.sys_machine,
|
||||||
|
ATTR_MACHINE_ID: self.sys_machine_id,
|
||||||
ATTR_ARCH: self.sys_arch.default,
|
ATTR_ARCH: self.sys_arch.default,
|
||||||
ATTR_STATE: self.sys_core.state,
|
ATTR_STATE: self.sys_core.state,
|
||||||
ATTR_SUPPORTED_ARCH: self.sys_arch.supported,
|
ATTR_SUPPORTED_ARCH: self.sys_arch.supported,
|
||||||
|
@ -126,9 +126,7 @@ class APIStore(CoreSysAttributes):
|
|||||||
"""Generate addon information."""
|
"""Generate addon information."""
|
||||||
|
|
||||||
installed = (
|
installed = (
|
||||||
cast(Addon, self.sys_addons.get(addon.slug, local_only=True))
|
self.sys_addons.get_local_only(addon.slug) if addon.is_installed else None
|
||||||
if addon.is_installed
|
|
||||||
else None
|
|
||||||
)
|
)
|
||||||
|
|
||||||
data = {
|
data = {
|
||||||
|
@ -49,11 +49,7 @@ from ..const import (
|
|||||||
from ..coresys import CoreSysAttributes
|
from ..coresys import CoreSysAttributes
|
||||||
from ..exceptions import APIError
|
from ..exceptions import APIError
|
||||||
from ..store.validate import repositories
|
from ..store.validate import repositories
|
||||||
from ..utils.blockbuster import (
|
from ..utils.blockbuster import BlockBusterManager
|
||||||
activate_blockbuster,
|
|
||||||
blockbuster_enabled,
|
|
||||||
deactivate_blockbuster,
|
|
||||||
)
|
|
||||||
from ..utils.sentry import close_sentry, init_sentry
|
from ..utils.sentry import close_sentry, init_sentry
|
||||||
from ..utils.validate import validate_timezone
|
from ..utils.validate import validate_timezone
|
||||||
from ..validate import version_tag, wait_boot
|
from ..validate import version_tag, wait_boot
|
||||||
@ -110,7 +106,7 @@ class APISupervisor(CoreSysAttributes):
|
|||||||
ATTR_DEBUG_BLOCK: self.sys_config.debug_block,
|
ATTR_DEBUG_BLOCK: self.sys_config.debug_block,
|
||||||
ATTR_DIAGNOSTICS: self.sys_config.diagnostics,
|
ATTR_DIAGNOSTICS: self.sys_config.diagnostics,
|
||||||
ATTR_AUTO_UPDATE: self.sys_updater.auto_update,
|
ATTR_AUTO_UPDATE: self.sys_updater.auto_update,
|
||||||
ATTR_DETECT_BLOCKING_IO: blockbuster_enabled(),
|
ATTR_DETECT_BLOCKING_IO: BlockBusterManager.is_enabled(),
|
||||||
ATTR_COUNTRY: self.sys_config.country,
|
ATTR_COUNTRY: self.sys_config.country,
|
||||||
# Depricated
|
# Depricated
|
||||||
ATTR_WAIT_BOOT: self.sys_config.wait_boot,
|
ATTR_WAIT_BOOT: self.sys_config.wait_boot,
|
||||||
@ -180,10 +176,10 @@ class APISupervisor(CoreSysAttributes):
|
|||||||
detect_blocking_io = DetectBlockingIO.ON
|
detect_blocking_io = DetectBlockingIO.ON
|
||||||
|
|
||||||
if detect_blocking_io == DetectBlockingIO.ON:
|
if detect_blocking_io == DetectBlockingIO.ON:
|
||||||
activate_blockbuster()
|
BlockBusterManager.activate()
|
||||||
elif detect_blocking_io == DetectBlockingIO.OFF:
|
elif detect_blocking_io == DetectBlockingIO.OFF:
|
||||||
self.sys_config.detect_blocking_io = False
|
self.sys_config.detect_blocking_io = False
|
||||||
deactivate_blockbuster()
|
BlockBusterManager.deactivate()
|
||||||
|
|
||||||
# Deprecated
|
# Deprecated
|
||||||
if ATTR_WAIT_BOOT in body:
|
if ATTR_WAIT_BOOT in body:
|
||||||
|
@ -40,7 +40,7 @@ class CpuArch(CoreSysAttributes):
|
|||||||
@property
|
@property
|
||||||
def supervisor(self) -> str:
|
def supervisor(self) -> str:
|
||||||
"""Return supervisor arch."""
|
"""Return supervisor arch."""
|
||||||
return self.sys_supervisor.arch
|
return self.sys_supervisor.arch or self._default_arch
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def supported(self) -> list[str]:
|
def supported(self) -> list[str]:
|
||||||
@ -91,4 +91,14 @@ class CpuArch(CoreSysAttributes):
|
|||||||
for check, value in MAP_CPU.items():
|
for check, value in MAP_CPU.items():
|
||||||
if cpu.startswith(check):
|
if cpu.startswith(check):
|
||||||
return value
|
return value
|
||||||
return self.sys_supervisor.arch
|
if self.sys_supervisor.arch:
|
||||||
|
_LOGGER.warning(
|
||||||
|
"Unknown CPU architecture %s, falling back to Supervisor architecture.",
|
||||||
|
cpu,
|
||||||
|
)
|
||||||
|
return self.sys_supervisor.arch
|
||||||
|
_LOGGER.warning(
|
||||||
|
"Unknown CPU architecture %s, assuming CPU architecture equals Supervisor architecture.",
|
||||||
|
cpu,
|
||||||
|
)
|
||||||
|
return cpu
|
||||||
|
@ -3,10 +3,10 @@
|
|||||||
import asyncio
|
import asyncio
|
||||||
import hashlib
|
import hashlib
|
||||||
import logging
|
import logging
|
||||||
from typing import Any
|
from typing import Any, TypedDict, cast
|
||||||
|
|
||||||
from .addons.addon import Addon
|
from .addons.addon import Addon
|
||||||
from .const import ATTR_ADDON, ATTR_PASSWORD, ATTR_TYPE, ATTR_USERNAME, FILE_HASSIO_AUTH
|
from .const import ATTR_PASSWORD, ATTR_TYPE, ATTR_USERNAME, FILE_HASSIO_AUTH
|
||||||
from .coresys import CoreSys, CoreSysAttributes
|
from .coresys import CoreSys, CoreSysAttributes
|
||||||
from .exceptions import (
|
from .exceptions import (
|
||||||
AuthError,
|
AuthError,
|
||||||
@ -21,6 +21,17 @@ from .validate import SCHEMA_AUTH_CONFIG
|
|||||||
_LOGGER: logging.Logger = logging.getLogger(__name__)
|
_LOGGER: logging.Logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
class BackendAuthRequest(TypedDict):
|
||||||
|
"""Model for a backend auth request.
|
||||||
|
|
||||||
|
https://github.com/home-assistant/core/blob/ed9503324d9d255e6fb077f1614fb6d55800f389/homeassistant/components/hassio/auth.py#L66-L73
|
||||||
|
"""
|
||||||
|
|
||||||
|
username: str
|
||||||
|
password: str
|
||||||
|
addon: str
|
||||||
|
|
||||||
|
|
||||||
class Auth(FileConfiguration, CoreSysAttributes):
|
class Auth(FileConfiguration, CoreSysAttributes):
|
||||||
"""Manage SSO for Add-ons with Home Assistant user."""
|
"""Manage SSO for Add-ons with Home Assistant user."""
|
||||||
|
|
||||||
@ -74,6 +85,9 @@ class Auth(FileConfiguration, CoreSysAttributes):
|
|||||||
"""Check username login."""
|
"""Check username login."""
|
||||||
if password is None:
|
if password is None:
|
||||||
raise AuthError("None as password is not supported!", _LOGGER.error)
|
raise AuthError("None as password is not supported!", _LOGGER.error)
|
||||||
|
if username is None:
|
||||||
|
raise AuthError("None as username is not supported!", _LOGGER.error)
|
||||||
|
|
||||||
_LOGGER.info("Auth request from '%s' for '%s'", addon.slug, username)
|
_LOGGER.info("Auth request from '%s' for '%s'", addon.slug, username)
|
||||||
|
|
||||||
# Get from cache
|
# Get from cache
|
||||||
@ -103,11 +117,12 @@ class Auth(FileConfiguration, CoreSysAttributes):
|
|||||||
async with self.sys_homeassistant.api.make_request(
|
async with self.sys_homeassistant.api.make_request(
|
||||||
"post",
|
"post",
|
||||||
"api/hassio_auth",
|
"api/hassio_auth",
|
||||||
json={
|
json=cast(
|
||||||
ATTR_USERNAME: username,
|
dict[str, Any],
|
||||||
ATTR_PASSWORD: password,
|
BackendAuthRequest(
|
||||||
ATTR_ADDON: addon.slug,
|
username=username, password=password, addon=addon.slug
|
||||||
},
|
),
|
||||||
|
),
|
||||||
) as req:
|
) as req:
|
||||||
if req.status == 200:
|
if req.status == 200:
|
||||||
_LOGGER.info("Successful login for '%s'", username)
|
_LOGGER.info("Successful login for '%s'", username)
|
||||||
|
@ -18,8 +18,6 @@ import time
|
|||||||
from typing import Any, Self, cast
|
from typing import Any, Self, cast
|
||||||
|
|
||||||
from awesomeversion import AwesomeVersion, AwesomeVersionCompareException
|
from awesomeversion import AwesomeVersion, AwesomeVersionCompareException
|
||||||
from cryptography.hazmat.backends import default_backend
|
|
||||||
from cryptography.hazmat.primitives.ciphers import Cipher, algorithms, modes
|
|
||||||
from securetar import AddFileError, SecureTarFile, atomic_contents_add, secure_path
|
from securetar import AddFileError, SecureTarFile, atomic_contents_add, secure_path
|
||||||
import voluptuous as vol
|
import voluptuous as vol
|
||||||
from voluptuous.humanize import humanize_error
|
from voluptuous.humanize import humanize_error
|
||||||
@ -62,9 +60,11 @@ from ..utils.dt import parse_datetime, utcnow
|
|||||||
from ..utils.json import json_bytes
|
from ..utils.json import json_bytes
|
||||||
from ..utils.sentinel import DEFAULT
|
from ..utils.sentinel import DEFAULT
|
||||||
from .const import BUF_SIZE, LOCATION_CLOUD_BACKUP, BackupType
|
from .const import BUF_SIZE, LOCATION_CLOUD_BACKUP, BackupType
|
||||||
from .utils import key_to_iv, password_to_key
|
from .utils import password_to_key
|
||||||
from .validate import SCHEMA_BACKUP
|
from .validate import SCHEMA_BACKUP
|
||||||
|
|
||||||
|
IGNORED_COMPARISON_FIELDS = {ATTR_PROTECTED, ATTR_CRYPTO, ATTR_DOCKER}
|
||||||
|
|
||||||
_LOGGER: logging.Logger = logging.getLogger(__name__)
|
_LOGGER: logging.Logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
@ -102,7 +102,6 @@ class Backup(JobGroup):
|
|||||||
self._tmp: TemporaryDirectory | None = None
|
self._tmp: TemporaryDirectory | None = None
|
||||||
self._outer_secure_tarfile: SecureTarFile | None = None
|
self._outer_secure_tarfile: SecureTarFile | None = None
|
||||||
self._key: bytes | None = None
|
self._key: bytes | None = None
|
||||||
self._aes: Cipher | None = None
|
|
||||||
self._locations: dict[str | None, BackupLocation] = {
|
self._locations: dict[str | None, BackupLocation] = {
|
||||||
location: BackupLocation(
|
location: BackupLocation(
|
||||||
path=tar_file,
|
path=tar_file,
|
||||||
@ -244,11 +243,6 @@ class Backup(JobGroup):
|
|||||||
"""Return backup size in bytes."""
|
"""Return backup size in bytes."""
|
||||||
return self._locations[self.location].size_bytes
|
return self._locations[self.location].size_bytes
|
||||||
|
|
||||||
@property
|
|
||||||
def is_new(self) -> bool:
|
|
||||||
"""Return True if there is new."""
|
|
||||||
return not self.tarfile.exists()
|
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def tarfile(self) -> Path:
|
def tarfile(self) -> Path:
|
||||||
"""Return path to backup tarfile."""
|
"""Return path to backup tarfile."""
|
||||||
@ -273,7 +267,7 @@ class Backup(JobGroup):
|
|||||||
|
|
||||||
# Compare all fields except ones about protection. Current encryption status does not affect equality
|
# Compare all fields except ones about protection. Current encryption status does not affect equality
|
||||||
keys = self._data.keys() | other._data.keys()
|
keys = self._data.keys() | other._data.keys()
|
||||||
for k in keys - {ATTR_PROTECTED, ATTR_CRYPTO, ATTR_DOCKER}:
|
for k in keys - IGNORED_COMPARISON_FIELDS:
|
||||||
if (
|
if (
|
||||||
k not in self._data
|
k not in self._data
|
||||||
or k not in other._data
|
or k not in other._data
|
||||||
@ -353,16 +347,10 @@ class Backup(JobGroup):
|
|||||||
self._init_password(password)
|
self._init_password(password)
|
||||||
else:
|
else:
|
||||||
self._key = None
|
self._key = None
|
||||||
self._aes = None
|
|
||||||
|
|
||||||
def _init_password(self, password: str) -> None:
|
def _init_password(self, password: str) -> None:
|
||||||
"""Set password + init aes cipher."""
|
"""Create key from password."""
|
||||||
self._key = password_to_key(password)
|
self._key = password_to_key(password)
|
||||||
self._aes = Cipher(
|
|
||||||
algorithms.AES(self._key),
|
|
||||||
modes.CBC(key_to_iv(self._key)),
|
|
||||||
backend=default_backend(),
|
|
||||||
)
|
|
||||||
|
|
||||||
async def validate_backup(self, location: str | None) -> None:
|
async def validate_backup(self, location: str | None) -> None:
|
||||||
"""Validate backup.
|
"""Validate backup.
|
||||||
@ -591,13 +579,21 @@ class Backup(JobGroup):
|
|||||||
@Job(name="backup_addon_save", cleanup=False)
|
@Job(name="backup_addon_save", cleanup=False)
|
||||||
async def _addon_save(self, addon: Addon) -> asyncio.Task | None:
|
async def _addon_save(self, addon: Addon) -> asyncio.Task | None:
|
||||||
"""Store an add-on into backup."""
|
"""Store an add-on into backup."""
|
||||||
self.sys_jobs.current.reference = addon.slug
|
self.sys_jobs.current.reference = slug = addon.slug
|
||||||
if not self._outer_secure_tarfile:
|
if not self._outer_secure_tarfile:
|
||||||
raise RuntimeError(
|
raise RuntimeError(
|
||||||
"Cannot backup components without initializing backup tar"
|
"Cannot backup components without initializing backup tar"
|
||||||
)
|
)
|
||||||
|
|
||||||
tar_name = f"{addon.slug}.tar{'.gz' if self.compressed else ''}"
|
# Ensure it is still installed and get current data before proceeding
|
||||||
|
if not (curr_addon := self.sys_addons.get_local_only(slug)):
|
||||||
|
_LOGGER.warning(
|
||||||
|
"Skipping backup of add-on %s because it has been uninstalled",
|
||||||
|
slug,
|
||||||
|
)
|
||||||
|
return None
|
||||||
|
|
||||||
|
tar_name = f"{slug}.tar{'.gz' if self.compressed else ''}"
|
||||||
|
|
||||||
addon_file = self._outer_secure_tarfile.create_inner_tar(
|
addon_file = self._outer_secure_tarfile.create_inner_tar(
|
||||||
f"./{tar_name}",
|
f"./{tar_name}",
|
||||||
@ -606,18 +602,16 @@ class Backup(JobGroup):
|
|||||||
)
|
)
|
||||||
# Take backup
|
# Take backup
|
||||||
try:
|
try:
|
||||||
start_task = await addon.backup(addon_file)
|
start_task = await curr_addon.backup(addon_file)
|
||||||
except AddonsError as err:
|
except AddonsError as err:
|
||||||
raise BackupError(
|
raise BackupError(str(err)) from err
|
||||||
f"Can't create backup for {addon.slug}", _LOGGER.error
|
|
||||||
) from err
|
|
||||||
|
|
||||||
# Store to config
|
# Store to config
|
||||||
self._data[ATTR_ADDONS].append(
|
self._data[ATTR_ADDONS].append(
|
||||||
{
|
{
|
||||||
ATTR_SLUG: addon.slug,
|
ATTR_SLUG: slug,
|
||||||
ATTR_NAME: addon.name,
|
ATTR_NAME: curr_addon.name,
|
||||||
ATTR_VERSION: addon.version,
|
ATTR_VERSION: curr_addon.version,
|
||||||
# Bug - addon_file.size used to give us this information
|
# Bug - addon_file.size used to give us this information
|
||||||
# It always returns 0 in current securetar. Skipping until fixed
|
# It always returns 0 in current securetar. Skipping until fixed
|
||||||
ATTR_SIZE: 0,
|
ATTR_SIZE: 0,
|
||||||
@ -639,8 +633,11 @@ class Backup(JobGroup):
|
|||||||
try:
|
try:
|
||||||
if start_task := await self._addon_save(addon):
|
if start_task := await self._addon_save(addon):
|
||||||
start_tasks.append(start_task)
|
start_tasks.append(start_task)
|
||||||
except Exception as err: # pylint: disable=broad-except
|
except BackupError as err:
|
||||||
_LOGGER.warning("Can't save Add-on %s: %s", addon.slug, err)
|
err = BackupError(
|
||||||
|
f"Can't backup add-on {addon.slug}: {str(err)}", _LOGGER.error
|
||||||
|
)
|
||||||
|
self.sys_jobs.current.capture_error(err)
|
||||||
|
|
||||||
return start_tasks
|
return start_tasks
|
||||||
|
|
||||||
@ -769,16 +766,20 @@ class Backup(JobGroup):
|
|||||||
if await self.sys_run_in_executor(_save):
|
if await self.sys_run_in_executor(_save):
|
||||||
self._data[ATTR_FOLDERS].append(name)
|
self._data[ATTR_FOLDERS].append(name)
|
||||||
except (tarfile.TarError, OSError, AddFileError) as err:
|
except (tarfile.TarError, OSError, AddFileError) as err:
|
||||||
raise BackupError(
|
raise BackupError(f"Can't write tarfile: {str(err)}") from err
|
||||||
f"Can't backup folder {name}: {str(err)}", _LOGGER.error
|
|
||||||
) from err
|
|
||||||
|
|
||||||
@Job(name="backup_store_folders", cleanup=False)
|
@Job(name="backup_store_folders", cleanup=False)
|
||||||
async def store_folders(self, folder_list: list[str]):
|
async def store_folders(self, folder_list: list[str]):
|
||||||
"""Backup Supervisor data into backup."""
|
"""Backup Supervisor data into backup."""
|
||||||
# Save folder sequential avoid issue on slow IO
|
# Save folder sequential avoid issue on slow IO
|
||||||
for folder in folder_list:
|
for folder in folder_list:
|
||||||
await self._folder_save(folder)
|
try:
|
||||||
|
await self._folder_save(folder)
|
||||||
|
except BackupError as err:
|
||||||
|
err = BackupError(
|
||||||
|
f"Can't backup folder {folder}: {str(err)}", _LOGGER.error
|
||||||
|
)
|
||||||
|
self.sys_jobs.current.capture_error(err)
|
||||||
|
|
||||||
@Job(name="backup_folder_restore", cleanup=False)
|
@Job(name="backup_folder_restore", cleanup=False)
|
||||||
async def _folder_restore(self, name: str) -> None:
|
async def _folder_restore(self, name: str) -> None:
|
||||||
@ -930,5 +931,5 @@ class Backup(JobGroup):
|
|||||||
Return a coroutine.
|
Return a coroutine.
|
||||||
"""
|
"""
|
||||||
return self.sys_store.update_repositories(
|
return self.sys_store.update_repositories(
|
||||||
self.repositories, add_with_errors=True, replace=replace
|
set(self.repositories), issue_on_error=True, replace=replace
|
||||||
)
|
)
|
||||||
|
@ -378,66 +378,69 @@ class BackupManager(FileConfiguration, JobGroup):
|
|||||||
if not backup.all_locations:
|
if not backup.all_locations:
|
||||||
del self._backups[backup.slug]
|
del self._backups[backup.slug]
|
||||||
|
|
||||||
|
@Job(name="backup_copy_to_location", cleanup=False)
|
||||||
|
async def _copy_to_location(
|
||||||
|
self, backup: Backup, location: LOCATION_TYPE
|
||||||
|
) -> tuple[str | None, Path]:
|
||||||
|
"""Copy a backup file to the default location."""
|
||||||
|
location_name = location.name if isinstance(location, Mount) else location
|
||||||
|
self.sys_jobs.current.reference = location_name
|
||||||
|
try:
|
||||||
|
if location == LOCATION_CLOUD_BACKUP:
|
||||||
|
destination = self.sys_config.path_core_backup
|
||||||
|
elif location:
|
||||||
|
location_mount = cast(Mount, location)
|
||||||
|
if not location_mount.local_where.is_mount():
|
||||||
|
raise BackupMountDownError(
|
||||||
|
f"{location_mount.name} is down, cannot copy to it",
|
||||||
|
_LOGGER.error,
|
||||||
|
)
|
||||||
|
destination = location_mount.local_where
|
||||||
|
else:
|
||||||
|
destination = self.sys_config.path_backup
|
||||||
|
|
||||||
|
path = await self.sys_run_in_executor(copy, backup.tarfile, destination)
|
||||||
|
return (location_name, Path(path))
|
||||||
|
except OSError as err:
|
||||||
|
msg = f"Could not copy backup to {location_name} due to: {err!s}"
|
||||||
|
|
||||||
|
if err.errno == errno.EBADMSG and location in {
|
||||||
|
LOCATION_CLOUD_BACKUP,
|
||||||
|
None,
|
||||||
|
}:
|
||||||
|
raise BackupDataDiskBadMessageError(msg, _LOGGER.error) from err
|
||||||
|
raise BackupError(msg, _LOGGER.error) from err
|
||||||
|
|
||||||
|
@Job(name="backup_copy_to_additional_locations", cleanup=False)
|
||||||
async def _copy_to_additional_locations(
|
async def _copy_to_additional_locations(
|
||||||
self,
|
self,
|
||||||
backup: Backup,
|
backup: Backup,
|
||||||
locations: list[LOCATION_TYPE],
|
locations: list[LOCATION_TYPE],
|
||||||
):
|
):
|
||||||
"""Copy a backup file to additional locations."""
|
"""Copy a backup file to additional locations."""
|
||||||
|
|
||||||
all_new_locations: dict[str | None, Path] = {}
|
all_new_locations: dict[str | None, Path] = {}
|
||||||
|
for location in locations:
|
||||||
|
try:
|
||||||
|
location_name, path = await self._copy_to_location(backup, location)
|
||||||
|
all_new_locations[location_name] = path
|
||||||
|
except BackupDataDiskBadMessageError as err:
|
||||||
|
self.sys_resolution.add_unhealthy_reason(
|
||||||
|
UnhealthyReason.OSERROR_BAD_MESSAGE
|
||||||
|
)
|
||||||
|
self.sys_jobs.current.capture_error(err)
|
||||||
|
except BackupError as err:
|
||||||
|
self.sys_jobs.current.capture_error(err)
|
||||||
|
|
||||||
def copy_to_additional_locations() -> None:
|
backup.all_locations.update(
|
||||||
"""Copy backup file to additional locations."""
|
{
|
||||||
nonlocal all_new_locations
|
loc: BackupLocation(
|
||||||
for location in locations:
|
path=path,
|
||||||
try:
|
protected=backup.protected,
|
||||||
if location == LOCATION_CLOUD_BACKUP:
|
size_bytes=backup.size_bytes,
|
||||||
all_new_locations[LOCATION_CLOUD_BACKUP] = Path(
|
)
|
||||||
copy(backup.tarfile, self.sys_config.path_core_backup)
|
for loc, path in all_new_locations.items()
|
||||||
)
|
}
|
||||||
elif location:
|
)
|
||||||
location_mount = cast(Mount, location)
|
|
||||||
if not location_mount.local_where.is_mount():
|
|
||||||
raise BackupMountDownError(
|
|
||||||
f"{location_mount.name} is down, cannot copy to it",
|
|
||||||
_LOGGER.error,
|
|
||||||
)
|
|
||||||
all_new_locations[location_mount.name] = Path(
|
|
||||||
copy(backup.tarfile, location_mount.local_where)
|
|
||||||
)
|
|
||||||
else:
|
|
||||||
all_new_locations[None] = Path(
|
|
||||||
copy(backup.tarfile, self.sys_config.path_backup)
|
|
||||||
)
|
|
||||||
except OSError as err:
|
|
||||||
msg = f"Could not copy backup to {location.name if isinstance(location, Mount) else location} due to: {err!s}"
|
|
||||||
|
|
||||||
if err.errno == errno.EBADMSG and location in {
|
|
||||||
LOCATION_CLOUD_BACKUP,
|
|
||||||
None,
|
|
||||||
}:
|
|
||||||
raise BackupDataDiskBadMessageError(msg, _LOGGER.error) from err
|
|
||||||
raise BackupError(msg, _LOGGER.error) from err
|
|
||||||
|
|
||||||
try:
|
|
||||||
await self.sys_run_in_executor(copy_to_additional_locations)
|
|
||||||
except BackupDataDiskBadMessageError:
|
|
||||||
self.sys_resolution.add_unhealthy_reason(
|
|
||||||
UnhealthyReason.OSERROR_BAD_MESSAGE
|
|
||||||
)
|
|
||||||
raise
|
|
||||||
finally:
|
|
||||||
backup.all_locations.update(
|
|
||||||
{
|
|
||||||
loc: BackupLocation(
|
|
||||||
path=path,
|
|
||||||
protected=backup.protected,
|
|
||||||
size_bytes=backup.size_bytes,
|
|
||||||
)
|
|
||||||
for loc, path in all_new_locations.items()
|
|
||||||
}
|
|
||||||
)
|
|
||||||
|
|
||||||
@Job(name="backup_manager_import_backup")
|
@Job(name="backup_manager_import_backup")
|
||||||
async def import_backup(
|
async def import_backup(
|
||||||
@ -518,7 +521,8 @@ class BackupManager(FileConfiguration, JobGroup):
|
|||||||
) -> Backup | None:
|
) -> Backup | None:
|
||||||
"""Create a backup.
|
"""Create a backup.
|
||||||
|
|
||||||
Must be called from an existing backup job.
|
Must be called from an existing backup job. If the backup failed, the
|
||||||
|
backup file is being deleted and None is returned.
|
||||||
"""
|
"""
|
||||||
addon_start_tasks: list[Awaitable[None]] | None = None
|
addon_start_tasks: list[Awaitable[None]] | None = None
|
||||||
|
|
||||||
@ -548,9 +552,12 @@ class BackupManager(FileConfiguration, JobGroup):
|
|||||||
self._change_stage(BackupJobStage.FINISHING_FILE, backup)
|
self._change_stage(BackupJobStage.FINISHING_FILE, backup)
|
||||||
|
|
||||||
except BackupError as err:
|
except BackupError as err:
|
||||||
|
await self.sys_run_in_executor(backup.tarfile.unlink, missing_ok=True)
|
||||||
|
_LOGGER.error("Backup %s error: %s", backup.slug, err)
|
||||||
self.sys_jobs.current.capture_error(err)
|
self.sys_jobs.current.capture_error(err)
|
||||||
return None
|
return None
|
||||||
except Exception as err: # pylint: disable=broad-except
|
except Exception as err: # pylint: disable=broad-except
|
||||||
|
await self.sys_run_in_executor(backup.tarfile.unlink, missing_ok=True)
|
||||||
_LOGGER.exception("Backup %s error", backup.slug)
|
_LOGGER.exception("Backup %s error", backup.slug)
|
||||||
await async_capture_exception(err)
|
await async_capture_exception(err)
|
||||||
self.sys_jobs.current.capture_error(
|
self.sys_jobs.current.capture_error(
|
||||||
@ -562,12 +569,7 @@ class BackupManager(FileConfiguration, JobGroup):
|
|||||||
|
|
||||||
if additional_locations:
|
if additional_locations:
|
||||||
self._change_stage(BackupJobStage.COPY_ADDITONAL_LOCATIONS, backup)
|
self._change_stage(BackupJobStage.COPY_ADDITONAL_LOCATIONS, backup)
|
||||||
try:
|
await self._copy_to_additional_locations(backup, additional_locations)
|
||||||
await self._copy_to_additional_locations(
|
|
||||||
backup, additional_locations
|
|
||||||
)
|
|
||||||
except BackupError as err:
|
|
||||||
self.sys_jobs.current.capture_error(err)
|
|
||||||
|
|
||||||
if addon_start_tasks:
|
if addon_start_tasks:
|
||||||
self._change_stage(BackupJobStage.AWAIT_ADDON_RESTARTS, backup)
|
self._change_stage(BackupJobStage.AWAIT_ADDON_RESTARTS, backup)
|
||||||
|
@ -1,6 +1,7 @@
|
|||||||
"""Bootstrap Supervisor."""
|
"""Bootstrap Supervisor."""
|
||||||
|
|
||||||
# ruff: noqa: T100
|
# ruff: noqa: T100
|
||||||
|
import asyncio
|
||||||
from importlib import import_module
|
from importlib import import_module
|
||||||
import logging
|
import logging
|
||||||
import os
|
import os
|
||||||
@ -54,6 +55,14 @@ async def initialize_coresys() -> CoreSys:
|
|||||||
"""Initialize supervisor coresys/objects."""
|
"""Initialize supervisor coresys/objects."""
|
||||||
coresys = await CoreSys().load_config()
|
coresys = await CoreSys().load_config()
|
||||||
|
|
||||||
|
# Check if ENV is in development mode
|
||||||
|
if coresys.dev:
|
||||||
|
_LOGGER.warning("Environment variable 'SUPERVISOR_DEV' is set")
|
||||||
|
coresys.config.logging = LogLevel.DEBUG
|
||||||
|
coresys.config.debug = True
|
||||||
|
else:
|
||||||
|
coresys.config.modify_log_level()
|
||||||
|
|
||||||
# Initialize core objects
|
# Initialize core objects
|
||||||
coresys.docker = await DockerAPI(coresys).post_init()
|
coresys.docker = await DockerAPI(coresys).post_init()
|
||||||
coresys.resolution = await ResolutionManager(coresys).load_config()
|
coresys.resolution = await ResolutionManager(coresys).load_config()
|
||||||
@ -93,15 +102,9 @@ async def initialize_coresys() -> CoreSys:
|
|||||||
# bootstrap config
|
# bootstrap config
|
||||||
initialize_system(coresys)
|
initialize_system(coresys)
|
||||||
|
|
||||||
# Check if ENV is in development mode
|
|
||||||
if coresys.dev:
|
if coresys.dev:
|
||||||
_LOGGER.warning("Environment variable 'SUPERVISOR_DEV' is set")
|
|
||||||
coresys.config.logging = LogLevel.DEBUG
|
|
||||||
coresys.config.debug = True
|
|
||||||
coresys.updater.channel = UpdateChannel.DEV
|
coresys.updater.channel = UpdateChannel.DEV
|
||||||
coresys.security.content_trust = False
|
coresys.security.content_trust = False
|
||||||
else:
|
|
||||||
coresys.config.modify_log_level()
|
|
||||||
|
|
||||||
# Convert datetime
|
# Convert datetime
|
||||||
logging.Formatter.converter = lambda *args: coresys.now().timetuple()
|
logging.Formatter.converter = lambda *args: coresys.now().timetuple()
|
||||||
@ -282,8 +285,8 @@ def check_environment() -> None:
|
|||||||
_LOGGER.critical("Can't find Docker socket!")
|
_LOGGER.critical("Can't find Docker socket!")
|
||||||
|
|
||||||
|
|
||||||
def reg_signal(loop, coresys: CoreSys) -> None:
|
def register_signal_handlers(loop: asyncio.AbstractEventLoop, coresys: CoreSys) -> None:
|
||||||
"""Register SIGTERM and SIGKILL to stop system."""
|
"""Register SIGTERM, SIGHUP and SIGKILL to stop the Supervisor."""
|
||||||
try:
|
try:
|
||||||
loop.add_signal_handler(
|
loop.add_signal_handler(
|
||||||
signal.SIGTERM, lambda: loop.create_task(coresys.core.stop())
|
signal.SIGTERM, lambda: loop.create_task(coresys.core.stop())
|
||||||
|
@ -2,7 +2,7 @@
|
|||||||
|
|
||||||
from __future__ import annotations
|
from __future__ import annotations
|
||||||
|
|
||||||
from collections.abc import Awaitable, Callable
|
from collections.abc import Callable, Coroutine
|
||||||
import logging
|
import logging
|
||||||
from typing import Any
|
from typing import Any
|
||||||
|
|
||||||
@ -19,7 +19,7 @@ class EventListener:
|
|||||||
"""Event listener."""
|
"""Event listener."""
|
||||||
|
|
||||||
event_type: BusEvent = attr.ib()
|
event_type: BusEvent = attr.ib()
|
||||||
callback: Callable[[Any], Awaitable[None]] = attr.ib()
|
callback: Callable[[Any], Coroutine[Any, Any, None]] = attr.ib()
|
||||||
|
|
||||||
|
|
||||||
class Bus(CoreSysAttributes):
|
class Bus(CoreSysAttributes):
|
||||||
@ -31,7 +31,7 @@ class Bus(CoreSysAttributes):
|
|||||||
self._listeners: dict[BusEvent, list[EventListener]] = {}
|
self._listeners: dict[BusEvent, list[EventListener]] = {}
|
||||||
|
|
||||||
def register_event(
|
def register_event(
|
||||||
self, event: BusEvent, callback: Callable[[Any], Awaitable[None]]
|
self, event: BusEvent, callback: Callable[[Any], Coroutine[Any, Any, None]]
|
||||||
) -> EventListener:
|
) -> EventListener:
|
||||||
"""Register callback for an event."""
|
"""Register callback for an event."""
|
||||||
listener = EventListener(event, callback)
|
listener = EventListener(event, callback)
|
||||||
|
@ -66,7 +66,7 @@ _UTC = "UTC"
|
|||||||
class CoreConfig(FileConfiguration):
|
class CoreConfig(FileConfiguration):
|
||||||
"""Hold all core config data."""
|
"""Hold all core config data."""
|
||||||
|
|
||||||
def __init__(self):
|
def __init__(self) -> None:
|
||||||
"""Initialize config object."""
|
"""Initialize config object."""
|
||||||
super().__init__(FILE_HASSIO_CONFIG, SCHEMA_SUPERVISOR_CONFIG)
|
super().__init__(FILE_HASSIO_CONFIG, SCHEMA_SUPERVISOR_CONFIG)
|
||||||
self._timezone_tzinfo: tzinfo | None = None
|
self._timezone_tzinfo: tzinfo | None = None
|
||||||
|
@ -2,16 +2,20 @@
|
|||||||
|
|
||||||
from dataclasses import dataclass
|
from dataclasses import dataclass
|
||||||
from enum import StrEnum
|
from enum import StrEnum
|
||||||
from ipaddress import IPv4Network
|
from ipaddress import IPv4Network, IPv6Network
|
||||||
from pathlib import Path
|
from pathlib import Path
|
||||||
from sys import version_info as systemversion
|
from sys import version_info as systemversion
|
||||||
from typing import Self
|
from typing import NotRequired, Self, TypedDict
|
||||||
|
|
||||||
from aiohttp import __version__ as aiohttpversion
|
from aiohttp import __version__ as aiohttpversion
|
||||||
|
|
||||||
SUPERVISOR_VERSION = "9999.09.9.dev9999"
|
SUPERVISOR_VERSION = "9999.09.9.dev9999"
|
||||||
SERVER_SOFTWARE = f"HomeAssistantSupervisor/{SUPERVISOR_VERSION} aiohttp/{aiohttpversion} Python/{systemversion[0]}.{systemversion[1]}"
|
SERVER_SOFTWARE = f"HomeAssistantSupervisor/{SUPERVISOR_VERSION} aiohttp/{aiohttpversion} Python/{systemversion[0]}.{systemversion[1]}"
|
||||||
|
|
||||||
|
DOCKER_PREFIX: str = "hassio"
|
||||||
|
OBSERVER_DOCKER_NAME: str = f"{DOCKER_PREFIX}_observer"
|
||||||
|
SUPERVISOR_DOCKER_NAME: str = f"{DOCKER_PREFIX}_supervisor"
|
||||||
|
|
||||||
URL_HASSIO_ADDONS = "https://github.com/home-assistant/addons"
|
URL_HASSIO_ADDONS = "https://github.com/home-assistant/addons"
|
||||||
URL_HASSIO_APPARMOR = "https://version.home-assistant.io/apparmor_{channel}.txt"
|
URL_HASSIO_APPARMOR = "https://version.home-assistant.io/apparmor_{channel}.txt"
|
||||||
URL_HASSIO_VERSION = "https://version.home-assistant.io/{channel}.json"
|
URL_HASSIO_VERSION = "https://version.home-assistant.io/{channel}.json"
|
||||||
@ -41,8 +45,10 @@ SYSTEMD_JOURNAL_PERSISTENT = Path("/var/log/journal")
|
|||||||
SYSTEMD_JOURNAL_VOLATILE = Path("/run/log/journal")
|
SYSTEMD_JOURNAL_VOLATILE = Path("/run/log/journal")
|
||||||
|
|
||||||
DOCKER_NETWORK = "hassio"
|
DOCKER_NETWORK = "hassio"
|
||||||
DOCKER_NETWORK_MASK = IPv4Network("172.30.32.0/23")
|
DOCKER_NETWORK_DRIVER = "bridge"
|
||||||
DOCKER_NETWORK_RANGE = IPv4Network("172.30.33.0/24")
|
DOCKER_IPV6_NETWORK_MASK = IPv6Network("fd0c:ac1e:2100::/48")
|
||||||
|
DOCKER_IPV4_NETWORK_MASK = IPv4Network("172.30.32.0/23")
|
||||||
|
DOCKER_IPV4_NETWORK_RANGE = IPv4Network("172.30.33.0/24")
|
||||||
|
|
||||||
# This needs to match the dockerd --cpu-rt-runtime= argument.
|
# This needs to match the dockerd --cpu-rt-runtime= argument.
|
||||||
DOCKER_CPU_RUNTIME_TOTAL = 950_000
|
DOCKER_CPU_RUNTIME_TOTAL = 950_000
|
||||||
@ -97,6 +103,7 @@ ATTR_ADDON = "addon"
|
|||||||
ATTR_ADDONS = "addons"
|
ATTR_ADDONS = "addons"
|
||||||
ATTR_ADDONS_CUSTOM_LIST = "addons_custom_list"
|
ATTR_ADDONS_CUSTOM_LIST = "addons_custom_list"
|
||||||
ATTR_ADDONS_REPOSITORIES = "addons_repositories"
|
ATTR_ADDONS_REPOSITORIES = "addons_repositories"
|
||||||
|
ATTR_ADDR_GEN_MODE = "addr_gen_mode"
|
||||||
ATTR_ADDRESS = "address"
|
ATTR_ADDRESS = "address"
|
||||||
ATTR_ADDRESS_DATA = "address-data"
|
ATTR_ADDRESS_DATA = "address-data"
|
||||||
ATTR_ADMIN = "admin"
|
ATTR_ADMIN = "admin"
|
||||||
@ -171,6 +178,7 @@ ATTR_DOCKER_API = "docker_api"
|
|||||||
ATTR_DOCUMENTATION = "documentation"
|
ATTR_DOCUMENTATION = "documentation"
|
||||||
ATTR_DOMAINS = "domains"
|
ATTR_DOMAINS = "domains"
|
||||||
ATTR_ENABLE = "enable"
|
ATTR_ENABLE = "enable"
|
||||||
|
ATTR_ENABLE_IPV6 = "enable_ipv6"
|
||||||
ATTR_ENABLED = "enabled"
|
ATTR_ENABLED = "enabled"
|
||||||
ATTR_ENVIRONMENT = "environment"
|
ATTR_ENVIRONMENT = "environment"
|
||||||
ATTR_EVENT = "event"
|
ATTR_EVENT = "event"
|
||||||
@ -180,6 +188,7 @@ ATTR_FEATURES = "features"
|
|||||||
ATTR_FILENAME = "filename"
|
ATTR_FILENAME = "filename"
|
||||||
ATTR_FLAGS = "flags"
|
ATTR_FLAGS = "flags"
|
||||||
ATTR_FOLDERS = "folders"
|
ATTR_FOLDERS = "folders"
|
||||||
|
ATTR_FORCE = "force"
|
||||||
ATTR_FORCE_SECURITY = "force_security"
|
ATTR_FORCE_SECURITY = "force_security"
|
||||||
ATTR_FREQUENCY = "frequency"
|
ATTR_FREQUENCY = "frequency"
|
||||||
ATTR_FULL_ACCESS = "full_access"
|
ATTR_FULL_ACCESS = "full_access"
|
||||||
@ -220,6 +229,7 @@ ATTR_INSTALLED = "installed"
|
|||||||
ATTR_INTERFACE = "interface"
|
ATTR_INTERFACE = "interface"
|
||||||
ATTR_INTERFACES = "interfaces"
|
ATTR_INTERFACES = "interfaces"
|
||||||
ATTR_IP_ADDRESS = "ip_address"
|
ATTR_IP_ADDRESS = "ip_address"
|
||||||
|
ATTR_IP6_PRIVACY = "ip6_privacy"
|
||||||
ATTR_IPV4 = "ipv4"
|
ATTR_IPV4 = "ipv4"
|
||||||
ATTR_IPV6 = "ipv6"
|
ATTR_IPV6 = "ipv6"
|
||||||
ATTR_ISSUES = "issues"
|
ATTR_ISSUES = "issues"
|
||||||
@ -237,6 +247,7 @@ ATTR_LOGO = "logo"
|
|||||||
ATTR_LONG_DESCRIPTION = "long_description"
|
ATTR_LONG_DESCRIPTION = "long_description"
|
||||||
ATTR_MAC = "mac"
|
ATTR_MAC = "mac"
|
||||||
ATTR_MACHINE = "machine"
|
ATTR_MACHINE = "machine"
|
||||||
|
ATTR_MACHINE_ID = "machine_id"
|
||||||
ATTR_MAINTAINER = "maintainer"
|
ATTR_MAINTAINER = "maintainer"
|
||||||
ATTR_MAP = "map"
|
ATTR_MAP = "map"
|
||||||
ATTR_MEMORY_LIMIT = "memory_limit"
|
ATTR_MEMORY_LIMIT = "memory_limit"
|
||||||
@ -405,10 +416,12 @@ class AddonBoot(StrEnum):
|
|||||||
MANUAL = "manual"
|
MANUAL = "manual"
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
def _missing_(cls, value: str) -> Self | None:
|
def _missing_(cls, value: object) -> Self | None:
|
||||||
"""Convert 'forced' config values to their counterpart."""
|
"""Convert 'forced' config values to their counterpart."""
|
||||||
if value == AddonBootConfig.MANUAL_ONLY:
|
if value == AddonBootConfig.MANUAL_ONLY:
|
||||||
return AddonBoot.MANUAL
|
for member in cls:
|
||||||
|
if member == AddonBoot.MANUAL:
|
||||||
|
return member
|
||||||
return None
|
return None
|
||||||
|
|
||||||
|
|
||||||
@ -505,6 +518,16 @@ class CpuArch(StrEnum):
|
|||||||
AMD64 = "amd64"
|
AMD64 = "amd64"
|
||||||
|
|
||||||
|
|
||||||
|
class IngressSessionDataUserDict(TypedDict):
|
||||||
|
"""Response object for ingress session user."""
|
||||||
|
|
||||||
|
id: str
|
||||||
|
username: NotRequired[str | None]
|
||||||
|
# Name is an alias for displayname, only one should be used
|
||||||
|
displayname: NotRequired[str | None]
|
||||||
|
name: NotRequired[str | None]
|
||||||
|
|
||||||
|
|
||||||
@dataclass
|
@dataclass
|
||||||
class IngressSessionDataUser:
|
class IngressSessionDataUser:
|
||||||
"""Format of an IngressSessionDataUser object."""
|
"""Format of an IngressSessionDataUser object."""
|
||||||
@ -513,38 +536,42 @@ class IngressSessionDataUser:
|
|||||||
display_name: str | None = None
|
display_name: str | None = None
|
||||||
username: str | None = None
|
username: str | None = None
|
||||||
|
|
||||||
def to_dict(self) -> dict[str, str | None]:
|
def to_dict(self) -> IngressSessionDataUserDict:
|
||||||
"""Get dictionary representation."""
|
"""Get dictionary representation."""
|
||||||
return {
|
return IngressSessionDataUserDict(
|
||||||
ATTR_ID: self.id,
|
id=self.id, displayname=self.display_name, username=self.username
|
||||||
ATTR_DISPLAYNAME: self.display_name,
|
)
|
||||||
ATTR_USERNAME: self.username,
|
|
||||||
}
|
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
def from_dict(cls, data: dict[str, str | None]) -> Self:
|
def from_dict(cls, data: IngressSessionDataUserDict) -> Self:
|
||||||
"""Return object from dictionary representation."""
|
"""Return object from dictionary representation."""
|
||||||
return cls(
|
return cls(
|
||||||
id=data[ATTR_ID],
|
id=data["id"],
|
||||||
display_name=data.get(ATTR_DISPLAYNAME),
|
display_name=data.get("displayname") or data.get("name"),
|
||||||
username=data.get(ATTR_USERNAME),
|
username=data.get("username"),
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
|
class IngressSessionDataDict(TypedDict):
|
||||||
|
"""Response object for ingress session data."""
|
||||||
|
|
||||||
|
user: IngressSessionDataUserDict
|
||||||
|
|
||||||
|
|
||||||
@dataclass
|
@dataclass
|
||||||
class IngressSessionData:
|
class IngressSessionData:
|
||||||
"""Format of an IngressSessionData object."""
|
"""Format of an IngressSessionData object."""
|
||||||
|
|
||||||
user: IngressSessionDataUser
|
user: IngressSessionDataUser
|
||||||
|
|
||||||
def to_dict(self) -> dict[str, dict[str, str | None]]:
|
def to_dict(self) -> IngressSessionDataDict:
|
||||||
"""Get dictionary representation."""
|
"""Get dictionary representation."""
|
||||||
return {ATTR_USER: self.user.to_dict()}
|
return IngressSessionDataDict(user=self.user.to_dict())
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
def from_dict(cls, data: dict[str, dict[str, str | None]]) -> Self:
|
def from_dict(cls, data: IngressSessionDataDict) -> Self:
|
||||||
"""Return object from dictionary representation."""
|
"""Return object from dictionary representation."""
|
||||||
return cls(user=IngressSessionDataUser.from_dict(data[ATTR_USER]))
|
return cls(user=IngressSessionDataUser.from_dict(data["user"]))
|
||||||
|
|
||||||
|
|
||||||
STARTING_STATES = [
|
STARTING_STATES = [
|
||||||
|
@ -28,7 +28,7 @@ from .homeassistant.core import LANDINGPAGE
|
|||||||
from .resolution.const import ContextType, IssueType, SuggestionType, UnhealthyReason
|
from .resolution.const import ContextType, IssueType, SuggestionType, UnhealthyReason
|
||||||
from .utils.dt import utcnow
|
from .utils.dt import utcnow
|
||||||
from .utils.sentry import async_capture_exception
|
from .utils.sentry import async_capture_exception
|
||||||
from .utils.whoami import WhoamiData, retrieve_whoami
|
from .utils.whoami import retrieve_whoami
|
||||||
|
|
||||||
_LOGGER: logging.Logger = logging.getLogger(__name__)
|
_LOGGER: logging.Logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
@ -36,7 +36,7 @@ _LOGGER: logging.Logger = logging.getLogger(__name__)
|
|||||||
class Core(CoreSysAttributes):
|
class Core(CoreSysAttributes):
|
||||||
"""Main object of Supervisor."""
|
"""Main object of Supervisor."""
|
||||||
|
|
||||||
def __init__(self, coresys: CoreSys):
|
def __init__(self, coresys: CoreSys) -> None:
|
||||||
"""Initialize Supervisor object."""
|
"""Initialize Supervisor object."""
|
||||||
self.coresys: CoreSys = coresys
|
self.coresys: CoreSys = coresys
|
||||||
self._state: CoreState = CoreState.INITIALIZE
|
self._state: CoreState = CoreState.INITIALIZE
|
||||||
@ -91,7 +91,7 @@ class Core(CoreSysAttributes):
|
|||||||
"info", {"state": self._state}
|
"info", {"state": self._state}
|
||||||
)
|
)
|
||||||
|
|
||||||
async def connect(self):
|
async def connect(self) -> None:
|
||||||
"""Connect Supervisor container."""
|
"""Connect Supervisor container."""
|
||||||
# Load information from container
|
# Load information from container
|
||||||
await self.sys_supervisor.load()
|
await self.sys_supervisor.load()
|
||||||
@ -120,7 +120,7 @@ class Core(CoreSysAttributes):
|
|||||||
self.sys_config.version = self.sys_supervisor.version
|
self.sys_config.version = self.sys_supervisor.version
|
||||||
await self.sys_config.save_data()
|
await self.sys_config.save_data()
|
||||||
|
|
||||||
async def setup(self):
|
async def setup(self) -> None:
|
||||||
"""Start setting up supervisor orchestration."""
|
"""Start setting up supervisor orchestration."""
|
||||||
await self.set_state(CoreState.SETUP)
|
await self.set_state(CoreState.SETUP)
|
||||||
|
|
||||||
@ -188,7 +188,10 @@ class Core(CoreSysAttributes):
|
|||||||
await setup_task
|
await setup_task
|
||||||
except Exception as err: # pylint: disable=broad-except
|
except Exception as err: # pylint: disable=broad-except
|
||||||
_LOGGER.critical(
|
_LOGGER.critical(
|
||||||
"Fatal error happening on load Task %s: %s", setup_task, err
|
"Fatal error happening on load Task %s: %s",
|
||||||
|
setup_task,
|
||||||
|
err,
|
||||||
|
exc_info=True,
|
||||||
)
|
)
|
||||||
self.sys_resolution.add_unhealthy_reason(UnhealthyReason.SETUP)
|
self.sys_resolution.add_unhealthy_reason(UnhealthyReason.SETUP)
|
||||||
await async_capture_exception(err)
|
await async_capture_exception(err)
|
||||||
@ -213,7 +216,7 @@ class Core(CoreSysAttributes):
|
|||||||
# Evaluate the system
|
# Evaluate the system
|
||||||
await self.sys_resolution.evaluate.evaluate_system()
|
await self.sys_resolution.evaluate.evaluate_system()
|
||||||
|
|
||||||
async def start(self):
|
async def start(self) -> None:
|
||||||
"""Start Supervisor orchestration."""
|
"""Start Supervisor orchestration."""
|
||||||
await self.set_state(CoreState.STARTUP)
|
await self.set_state(CoreState.STARTUP)
|
||||||
|
|
||||||
@ -237,10 +240,10 @@ class Core(CoreSysAttributes):
|
|||||||
await self.sys_supervisor.update()
|
await self.sys_supervisor.update()
|
||||||
return
|
return
|
||||||
|
|
||||||
# Start addon mark as initialize
|
|
||||||
await self.sys_addons.boot(AddonStartup.INITIALIZE)
|
|
||||||
|
|
||||||
try:
|
try:
|
||||||
|
# Start addon mark as initialize
|
||||||
|
await self.sys_addons.boot(AddonStartup.INITIALIZE)
|
||||||
|
|
||||||
# HomeAssistant is already running, only Supervisor restarted
|
# HomeAssistant is already running, only Supervisor restarted
|
||||||
if await self.sys_hardware.helper.last_boot() == self.sys_config.last_boot:
|
if await self.sys_hardware.helper.last_boot() == self.sys_config.last_boot:
|
||||||
_LOGGER.info("Detected Supervisor restart")
|
_LOGGER.info("Detected Supervisor restart")
|
||||||
@ -307,7 +310,7 @@ class Core(CoreSysAttributes):
|
|||||||
)
|
)
|
||||||
_LOGGER.info("Supervisor is up and running")
|
_LOGGER.info("Supervisor is up and running")
|
||||||
|
|
||||||
async def stop(self):
|
async def stop(self) -> None:
|
||||||
"""Stop a running orchestration."""
|
"""Stop a running orchestration."""
|
||||||
# store new last boot / prevent time adjustments
|
# store new last boot / prevent time adjustments
|
||||||
if self.state in (CoreState.RUNNING, CoreState.SHUTDOWN):
|
if self.state in (CoreState.RUNNING, CoreState.SHUTDOWN):
|
||||||
@ -355,7 +358,7 @@ class Core(CoreSysAttributes):
|
|||||||
_LOGGER.info("Supervisor is down - %d", self.exit_code)
|
_LOGGER.info("Supervisor is down - %d", self.exit_code)
|
||||||
self.sys_loop.stop()
|
self.sys_loop.stop()
|
||||||
|
|
||||||
async def shutdown(self, *, remove_homeassistant_container: bool = False):
|
async def shutdown(self, *, remove_homeassistant_container: bool = False) -> None:
|
||||||
"""Shutdown all running containers in correct order."""
|
"""Shutdown all running containers in correct order."""
|
||||||
# don't process scheduler anymore
|
# don't process scheduler anymore
|
||||||
if self.state == CoreState.RUNNING:
|
if self.state == CoreState.RUNNING:
|
||||||
@ -379,19 +382,15 @@ class Core(CoreSysAttributes):
|
|||||||
if self.state in (CoreState.STOPPING, CoreState.SHUTDOWN):
|
if self.state in (CoreState.STOPPING, CoreState.SHUTDOWN):
|
||||||
await self.sys_plugins.shutdown()
|
await self.sys_plugins.shutdown()
|
||||||
|
|
||||||
async def _update_last_boot(self):
|
async def _update_last_boot(self) -> None:
|
||||||
"""Update last boot time."""
|
"""Update last boot time."""
|
||||||
self.sys_config.last_boot = await self.sys_hardware.helper.last_boot()
|
if not (last_boot := await self.sys_hardware.helper.last_boot()):
|
||||||
|
_LOGGER.error("Could not update last boot information!")
|
||||||
|
return
|
||||||
|
self.sys_config.last_boot = last_boot
|
||||||
await self.sys_config.save_data()
|
await self.sys_config.save_data()
|
||||||
|
|
||||||
async def _retrieve_whoami(self, with_ssl: bool) -> WhoamiData | None:
|
async def _adjust_system_datetime(self) -> None:
|
||||||
try:
|
|
||||||
return await retrieve_whoami(self.sys_websession, with_ssl)
|
|
||||||
except WhoamiSSLError:
|
|
||||||
_LOGGER.info("Whoami service SSL error")
|
|
||||||
return None
|
|
||||||
|
|
||||||
async def _adjust_system_datetime(self):
|
|
||||||
"""Adjust system time/date on startup."""
|
"""Adjust system time/date on startup."""
|
||||||
# If no timezone is detect or set
|
# If no timezone is detect or set
|
||||||
# If we are not connected or time sync
|
# If we are not connected or time sync
|
||||||
@ -403,11 +402,13 @@ class Core(CoreSysAttributes):
|
|||||||
|
|
||||||
# Get Timezone data
|
# Get Timezone data
|
||||||
try:
|
try:
|
||||||
data = await self._retrieve_whoami(True)
|
try:
|
||||||
|
data = await retrieve_whoami(self.sys_websession, True)
|
||||||
|
except WhoamiSSLError:
|
||||||
|
# SSL Date Issue & possible time drift
|
||||||
|
_LOGGER.info("Whoami service SSL error")
|
||||||
|
data = await retrieve_whoami(self.sys_websession, False)
|
||||||
|
|
||||||
# SSL Date Issue & possible time drift
|
|
||||||
if not data:
|
|
||||||
data = await self._retrieve_whoami(False)
|
|
||||||
except WhoamiError as err:
|
except WhoamiError as err:
|
||||||
_LOGGER.warning("Can't adjust Time/Date settings: %s", err)
|
_LOGGER.warning("Can't adjust Time/Date settings: %s", err)
|
||||||
return
|
return
|
||||||
@ -423,7 +424,7 @@ class Core(CoreSysAttributes):
|
|||||||
await self.sys_host.control.set_datetime(data.dt_utc)
|
await self.sys_host.control.set_datetime(data.dt_utc)
|
||||||
await self.sys_supervisor.check_connectivity()
|
await self.sys_supervisor.check_connectivity()
|
||||||
|
|
||||||
async def repair(self):
|
async def repair(self) -> None:
|
||||||
"""Repair system integrity."""
|
"""Repair system integrity."""
|
||||||
_LOGGER.info("Starting repair of Supervisor Environment")
|
_LOGGER.info("Starting repair of Supervisor Environment")
|
||||||
await self.sys_run_in_executor(self.sys_docker.repair)
|
await self.sys_run_in_executor(self.sys_docker.repair)
|
||||||
|
@ -13,6 +13,7 @@ from types import MappingProxyType
|
|||||||
from typing import TYPE_CHECKING, Any, Self, TypeVar
|
from typing import TYPE_CHECKING, Any, Self, TypeVar
|
||||||
|
|
||||||
import aiohttp
|
import aiohttp
|
||||||
|
from pycares import AresError
|
||||||
|
|
||||||
from .config import CoreConfig
|
from .config import CoreConfig
|
||||||
from .const import (
|
from .const import (
|
||||||
@ -61,17 +62,17 @@ _LOGGER: logging.Logger = logging.getLogger(__name__)
|
|||||||
class CoreSys:
|
class CoreSys:
|
||||||
"""Class that handle all shared data."""
|
"""Class that handle all shared data."""
|
||||||
|
|
||||||
def __init__(self):
|
def __init__(self) -> None:
|
||||||
"""Initialize coresys."""
|
"""Initialize coresys."""
|
||||||
# Static attributes protected
|
# Static attributes protected
|
||||||
self._machine_id: str | None = None
|
self._machine_id: str | None = None
|
||||||
self._machine: str | None = None
|
self._machine: str | None = None
|
||||||
|
|
||||||
# External objects
|
# External objects
|
||||||
self._loop: asyncio.BaseEventLoop = asyncio.get_running_loop()
|
self._loop = asyncio.get_running_loop()
|
||||||
|
|
||||||
# Global objects
|
# Global objects
|
||||||
self._config: CoreConfig = CoreConfig()
|
self._config = CoreConfig()
|
||||||
|
|
||||||
# Internal objects pointers
|
# Internal objects pointers
|
||||||
self._docker: DockerAPI | None = None
|
self._docker: DockerAPI | None = None
|
||||||
@ -121,13 +122,23 @@ class CoreSys:
|
|||||||
if self._websession:
|
if self._websession:
|
||||||
await self._websession.close()
|
await self._websession.close()
|
||||||
|
|
||||||
resolver = aiohttp.AsyncResolver()
|
resolver: aiohttp.abc.AbstractResolver
|
||||||
|
try:
|
||||||
|
# Use "unused" kwargs to force dedicated resolver instance. Otherwise
|
||||||
|
# aiodns won't reload /etc/resolv.conf which we need to make our connection
|
||||||
|
# check work in all cases.
|
||||||
|
resolver = aiohttp.AsyncResolver(loop=self.loop, timeout=None)
|
||||||
|
# pylint: disable=protected-access
|
||||||
|
_LOGGER.debug(
|
||||||
|
"Initializing ClientSession with AsyncResolver. Using nameservers %s",
|
||||||
|
resolver._resolver.nameservers,
|
||||||
|
)
|
||||||
|
except AresError as err:
|
||||||
|
_LOGGER.critical(
|
||||||
|
"Unable to initialize async DNS resolver: %s", err, exc_info=True
|
||||||
|
)
|
||||||
|
resolver = aiohttp.ThreadedResolver(loop=self.loop)
|
||||||
|
|
||||||
# pylint: disable=protected-access
|
|
||||||
_LOGGER.debug(
|
|
||||||
"Initializing ClientSession with AsyncResolver. Using nameservers %s",
|
|
||||||
resolver._resolver.nameservers,
|
|
||||||
)
|
|
||||||
connector = aiohttp.TCPConnector(loop=self.loop, resolver=resolver)
|
connector = aiohttp.TCPConnector(loop=self.loop, resolver=resolver)
|
||||||
|
|
||||||
session = aiohttp.ClientSession(
|
session = aiohttp.ClientSession(
|
||||||
@ -137,7 +148,7 @@ class CoreSys:
|
|||||||
|
|
||||||
self._websession = session
|
self._websession = session
|
||||||
|
|
||||||
async def init_machine(self):
|
async def init_machine(self) -> None:
|
||||||
"""Initialize machine information."""
|
"""Initialize machine information."""
|
||||||
|
|
||||||
def _load_machine_id() -> str | None:
|
def _load_machine_id() -> str | None:
|
||||||
@ -160,7 +171,7 @@ class CoreSys:
|
|||||||
@property
|
@property
|
||||||
def dev(self) -> bool:
|
def dev(self) -> bool:
|
||||||
"""Return True if we run dev mode."""
|
"""Return True if we run dev mode."""
|
||||||
return bool(os.environ.get(ENV_SUPERVISOR_DEV, 0))
|
return bool(os.environ.get(ENV_SUPERVISOR_DEV) == "1")
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def timezone(self) -> str:
|
def timezone(self) -> str:
|
||||||
@ -181,7 +192,7 @@ class CoreSys:
|
|||||||
return UTC
|
return UTC
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def loop(self) -> asyncio.BaseEventLoop:
|
def loop(self) -> asyncio.AbstractEventLoop:
|
||||||
"""Return loop object."""
|
"""Return loop object."""
|
||||||
return self._loop
|
return self._loop
|
||||||
|
|
||||||
@ -579,7 +590,7 @@ class CoreSys:
|
|||||||
return self._machine_id
|
return self._machine_id
|
||||||
|
|
||||||
@machine_id.setter
|
@machine_id.setter
|
||||||
def machine_id(self, value: str) -> None:
|
def machine_id(self, value: str | None) -> None:
|
||||||
"""Set a machine-id type string."""
|
"""Set a machine-id type string."""
|
||||||
if self._machine_id:
|
if self._machine_id:
|
||||||
raise RuntimeError("Machine-ID type already set!")
|
raise RuntimeError("Machine-ID type already set!")
|
||||||
@ -601,8 +612,8 @@ class CoreSys:
|
|||||||
self._set_task_context.append(callback)
|
self._set_task_context.append(callback)
|
||||||
|
|
||||||
def run_in_executor(
|
def run_in_executor(
|
||||||
self, funct: Callable[..., T], *args: tuple[Any], **kwargs: dict[str, Any]
|
self, funct: Callable[..., T], *args, **kwargs
|
||||||
) -> Coroutine[Any, Any, T]:
|
) -> asyncio.Future[T]:
|
||||||
"""Add an job to the executor pool."""
|
"""Add an job to the executor pool."""
|
||||||
if kwargs:
|
if kwargs:
|
||||||
funct = partial(funct, **kwargs)
|
funct = partial(funct, **kwargs)
|
||||||
@ -623,9 +634,9 @@ class CoreSys:
|
|||||||
def call_later(
|
def call_later(
|
||||||
self,
|
self,
|
||||||
delay: float,
|
delay: float,
|
||||||
funct: Callable[..., Coroutine[Any, Any, T]],
|
funct: Callable[..., Any],
|
||||||
*args: tuple[Any],
|
*args,
|
||||||
**kwargs: dict[str, Any],
|
**kwargs,
|
||||||
) -> asyncio.TimerHandle:
|
) -> asyncio.TimerHandle:
|
||||||
"""Start a task after a delay."""
|
"""Start a task after a delay."""
|
||||||
if kwargs:
|
if kwargs:
|
||||||
@ -636,9 +647,9 @@ class CoreSys:
|
|||||||
def call_at(
|
def call_at(
|
||||||
self,
|
self,
|
||||||
when: datetime,
|
when: datetime,
|
||||||
funct: Callable[..., Coroutine[Any, Any, T]],
|
funct: Callable[..., Any],
|
||||||
*args: tuple[Any],
|
*args,
|
||||||
**kwargs: dict[str, Any],
|
**kwargs,
|
||||||
) -> asyncio.TimerHandle:
|
) -> asyncio.TimerHandle:
|
||||||
"""Start a task at the specified datetime."""
|
"""Start a task at the specified datetime."""
|
||||||
if kwargs:
|
if kwargs:
|
||||||
@ -666,7 +677,7 @@ class CoreSysAttributes:
|
|||||||
|
|
||||||
@property
|
@property
|
||||||
def sys_machine_id(self) -> str | None:
|
def sys_machine_id(self) -> str | None:
|
||||||
"""Return machine id."""
|
"""Return machine ID."""
|
||||||
return self.coresys.machine_id
|
return self.coresys.machine_id
|
||||||
|
|
||||||
@property
|
@property
|
||||||
@ -675,7 +686,7 @@ class CoreSysAttributes:
|
|||||||
return self.coresys.dev
|
return self.coresys.dev
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def sys_loop(self) -> asyncio.BaseEventLoop:
|
def sys_loop(self) -> asyncio.AbstractEventLoop:
|
||||||
"""Return loop object."""
|
"""Return loop object."""
|
||||||
return self.coresys.loop
|
return self.coresys.loop
|
||||||
|
|
||||||
@ -825,7 +836,7 @@ class CoreSysAttributes:
|
|||||||
|
|
||||||
def sys_run_in_executor(
|
def sys_run_in_executor(
|
||||||
self, funct: Callable[..., T], *args, **kwargs
|
self, funct: Callable[..., T], *args, **kwargs
|
||||||
) -> Coroutine[Any, Any, T]:
|
) -> asyncio.Future[T]:
|
||||||
"""Add a job to the executor pool."""
|
"""Add a job to the executor pool."""
|
||||||
return self.coresys.run_in_executor(funct, *args, **kwargs)
|
return self.coresys.run_in_executor(funct, *args, **kwargs)
|
||||||
|
|
||||||
@ -836,7 +847,7 @@ class CoreSysAttributes:
|
|||||||
def sys_call_later(
|
def sys_call_later(
|
||||||
self,
|
self,
|
||||||
delay: float,
|
delay: float,
|
||||||
funct: Callable[..., Coroutine[Any, Any, T]],
|
funct: Callable[..., Any],
|
||||||
*args,
|
*args,
|
||||||
**kwargs,
|
**kwargs,
|
||||||
) -> asyncio.TimerHandle:
|
) -> asyncio.TimerHandle:
|
||||||
@ -846,7 +857,7 @@ class CoreSysAttributes:
|
|||||||
def sys_call_at(
|
def sys_call_at(
|
||||||
self,
|
self,
|
||||||
when: datetime,
|
when: datetime,
|
||||||
funct: Callable[..., Coroutine[Any, Any, T]],
|
funct: Callable[..., Any],
|
||||||
*args,
|
*args,
|
||||||
**kwargs,
|
**kwargs,
|
||||||
) -> asyncio.TimerHandle:
|
) -> asyncio.TimerHandle:
|
||||||
|
@ -135,6 +135,7 @@ DBUS_ATTR_LAST_ERROR = "LastError"
|
|||||||
DBUS_ATTR_LLMNR = "LLMNR"
|
DBUS_ATTR_LLMNR = "LLMNR"
|
||||||
DBUS_ATTR_LLMNR_HOSTNAME = "LLMNRHostname"
|
DBUS_ATTR_LLMNR_HOSTNAME = "LLMNRHostname"
|
||||||
DBUS_ATTR_LOADER_TIMESTAMP_MONOTONIC = "LoaderTimestampMonotonic"
|
DBUS_ATTR_LOADER_TIMESTAMP_MONOTONIC = "LoaderTimestampMonotonic"
|
||||||
|
DBUS_ATTR_LOCAL_RTC = "LocalRTC"
|
||||||
DBUS_ATTR_MANAGED = "Managed"
|
DBUS_ATTR_MANAGED = "Managed"
|
||||||
DBUS_ATTR_MODE = "Mode"
|
DBUS_ATTR_MODE = "Mode"
|
||||||
DBUS_ATTR_MODEL = "Model"
|
DBUS_ATTR_MODEL = "Model"
|
||||||
@ -210,6 +211,24 @@ class InterfaceMethod(StrEnum):
|
|||||||
LINK_LOCAL = "link-local"
|
LINK_LOCAL = "link-local"
|
||||||
|
|
||||||
|
|
||||||
|
class InterfaceAddrGenMode(IntEnum):
|
||||||
|
"""Interface addr_gen_mode."""
|
||||||
|
|
||||||
|
EUI64 = 0
|
||||||
|
STABLE_PRIVACY = 1
|
||||||
|
DEFAULT_OR_EUI64 = 2
|
||||||
|
DEFAULT = 3
|
||||||
|
|
||||||
|
|
||||||
|
class InterfaceIp6Privacy(IntEnum):
|
||||||
|
"""Interface ip6_privacy."""
|
||||||
|
|
||||||
|
DEFAULT = -1
|
||||||
|
DISABLED = 0
|
||||||
|
ENABLED_PREFER_PUBLIC = 1
|
||||||
|
ENABLED = 2
|
||||||
|
|
||||||
|
|
||||||
class ConnectionType(StrEnum):
|
class ConnectionType(StrEnum):
|
||||||
"""Connection type."""
|
"""Connection type."""
|
||||||
|
|
||||||
|
@ -117,7 +117,7 @@ class DBusInterfaceProxy(DBusInterface, ABC):
|
|||||||
"""Initialize object with already connected dbus object."""
|
"""Initialize object with already connected dbus object."""
|
||||||
await super().initialize(connected_dbus)
|
await super().initialize(connected_dbus)
|
||||||
|
|
||||||
if not self.connected_dbus.properties:
|
if not self.connected_dbus.supports_properties:
|
||||||
self.disconnect()
|
self.disconnect()
|
||||||
raise DBusInterfaceError(
|
raise DBusInterfaceError(
|
||||||
f"D-Bus object {self.object_path} is not usable, introspection is missing required properties interface"
|
f"D-Bus object {self.object_path} is not usable, introspection is missing required properties interface"
|
||||||
|
@ -8,7 +8,7 @@ from dbus_fast.aio.message_bus import MessageBus
|
|||||||
|
|
||||||
from ..const import SOCKET_DBUS
|
from ..const import SOCKET_DBUS
|
||||||
from ..coresys import CoreSys, CoreSysAttributes
|
from ..coresys import CoreSys, CoreSysAttributes
|
||||||
from ..exceptions import DBusFatalError
|
from ..exceptions import DBusFatalError, DBusNotConnectedError
|
||||||
from .agent import OSAgent
|
from .agent import OSAgent
|
||||||
from .hostname import Hostname
|
from .hostname import Hostname
|
||||||
from .interface import DBusInterface
|
from .interface import DBusInterface
|
||||||
@ -91,6 +91,13 @@ class DBusManager(CoreSysAttributes):
|
|||||||
"""Return the message bus."""
|
"""Return the message bus."""
|
||||||
return self._bus
|
return self._bus
|
||||||
|
|
||||||
|
@property
|
||||||
|
def connected_bus(self) -> MessageBus:
|
||||||
|
"""Return the message bus. Raise if not connected."""
|
||||||
|
if not self._bus:
|
||||||
|
raise DBusNotConnectedError()
|
||||||
|
return self._bus
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def all(self) -> list[DBusInterface]:
|
def all(self) -> list[DBusInterface]:
|
||||||
"""Return all managed dbus interfaces."""
|
"""Return all managed dbus interfaces."""
|
||||||
|
@ -185,10 +185,14 @@ class NetworkManager(DBusInterfaceProxy):
|
|||||||
if not changed and self.dns.is_connected:
|
if not changed and self.dns.is_connected:
|
||||||
await self.dns.update()
|
await self.dns.update()
|
||||||
|
|
||||||
if changed and (
|
if (
|
||||||
DBUS_ATTR_DEVICES not in changed
|
changed
|
||||||
or {intr.object_path for intr in self.interfaces if intr.managed}.issubset(
|
and DBUS_ATTR_PRIMARY_CONNECTION not in changed
|
||||||
set(changed[DBUS_ATTR_DEVICES])
|
and (
|
||||||
|
DBUS_ATTR_DEVICES not in changed
|
||||||
|
or {
|
||||||
|
intr.object_path for intr in self.interfaces if intr.managed
|
||||||
|
}.issubset(set(changed[DBUS_ATTR_DEVICES]))
|
||||||
)
|
)
|
||||||
):
|
):
|
||||||
# If none of our managed devices were removed then most likely this is just veths changing.
|
# If none of our managed devices were removed then most likely this is just veths changing.
|
||||||
@ -255,7 +259,7 @@ class NetworkManager(DBusInterfaceProxy):
|
|||||||
else:
|
else:
|
||||||
interface.primary = False
|
interface.primary = False
|
||||||
|
|
||||||
interfaces[interface.name] = interface
|
interfaces[interface.interface_name] = interface
|
||||||
interfaces[interface.hw_address] = interface
|
interfaces[interface.hw_address] = interface
|
||||||
|
|
||||||
# Disconnect removed devices
|
# Disconnect removed devices
|
||||||
|
@ -1,5 +1,6 @@
|
|||||||
"""NetworkConnection objects for Network Manager."""
|
"""NetworkConnection objects for Network Manager."""
|
||||||
|
|
||||||
|
from abc import ABC
|
||||||
from dataclasses import dataclass
|
from dataclasses import dataclass
|
||||||
from ipaddress import IPv4Address, IPv6Address
|
from ipaddress import IPv4Address, IPv6Address
|
||||||
|
|
||||||
@ -29,7 +30,7 @@ class ConnectionProperties:
|
|||||||
class WirelessProperties:
|
class WirelessProperties:
|
||||||
"""Wireless Properties object for Network Manager."""
|
"""Wireless Properties object for Network Manager."""
|
||||||
|
|
||||||
ssid: str | None
|
ssid: str
|
||||||
assigned_mac: str | None
|
assigned_mac: str | None
|
||||||
mode: str | None
|
mode: str | None
|
||||||
powersave: int | None
|
powersave: int | None
|
||||||
@ -55,7 +56,7 @@ class EthernetProperties:
|
|||||||
class VlanProperties:
|
class VlanProperties:
|
||||||
"""Ethernet properties object for Network Manager."""
|
"""Ethernet properties object for Network Manager."""
|
||||||
|
|
||||||
id: int | None
|
id: int
|
||||||
parent: str | None
|
parent: str | None
|
||||||
|
|
||||||
|
|
||||||
@ -67,14 +68,29 @@ class IpAddress:
|
|||||||
prefix: int
|
prefix: int
|
||||||
|
|
||||||
|
|
||||||
@dataclass(slots=True)
|
@dataclass
|
||||||
class IpProperties:
|
class IpProperties(ABC):
|
||||||
"""IP properties object for Network Manager."""
|
"""IP properties object for Network Manager."""
|
||||||
|
|
||||||
method: str | None
|
method: str | None
|
||||||
address_data: list[IpAddress] | None
|
address_data: list[IpAddress] | None
|
||||||
gateway: str | None
|
gateway: str | None
|
||||||
dns: list[bytes | int] | None
|
|
||||||
|
|
||||||
|
@dataclass(slots=True)
|
||||||
|
class Ip4Properties(IpProperties):
|
||||||
|
"""IPv4 properties object."""
|
||||||
|
|
||||||
|
dns: list[int] | None
|
||||||
|
|
||||||
|
|
||||||
|
@dataclass(slots=True)
|
||||||
|
class Ip6Properties(IpProperties):
|
||||||
|
"""IPv6 properties object for Network Manager."""
|
||||||
|
|
||||||
|
addr_gen_mode: int
|
||||||
|
ip6_privacy: int
|
||||||
|
dns: list[bytes] | None
|
||||||
|
|
||||||
|
|
||||||
@dataclass(slots=True)
|
@dataclass(slots=True)
|
||||||
|
@ -96,7 +96,7 @@ class NetworkConnection(DBusInterfaceProxy):
|
|||||||
|
|
||||||
@ipv4.setter
|
@ipv4.setter
|
||||||
def ipv4(self, ipv4: IpConfiguration | None) -> None:
|
def ipv4(self, ipv4: IpConfiguration | None) -> None:
|
||||||
"""Set ipv4 configuration."""
|
"""Set IPv4 configuration."""
|
||||||
if self._ipv4 and self._ipv4 is not ipv4:
|
if self._ipv4 and self._ipv4 is not ipv4:
|
||||||
self._ipv4.shutdown()
|
self._ipv4.shutdown()
|
||||||
|
|
||||||
@ -109,7 +109,7 @@ class NetworkConnection(DBusInterfaceProxy):
|
|||||||
|
|
||||||
@ipv6.setter
|
@ipv6.setter
|
||||||
def ipv6(self, ipv6: IpConfiguration | None) -> None:
|
def ipv6(self, ipv6: IpConfiguration | None) -> None:
|
||||||
"""Set ipv6 configuration."""
|
"""Set IPv6 configuration."""
|
||||||
if self._ipv6 and self._ipv6 is not ipv6:
|
if self._ipv6 and self._ipv6 is not ipv6:
|
||||||
self._ipv6.shutdown()
|
self._ipv6.shutdown()
|
||||||
|
|
||||||
|
@ -49,7 +49,7 @@ class NetworkInterface(DBusInterfaceProxy):
|
|||||||
|
|
||||||
@property
|
@property
|
||||||
@dbus_property
|
@dbus_property
|
||||||
def name(self) -> str:
|
def interface_name(self) -> str:
|
||||||
"""Return interface name."""
|
"""Return interface name."""
|
||||||
return self.properties[DBUS_ATTR_DEVICE_INTERFACE]
|
return self.properties[DBUS_ATTR_DEVICE_INTERFACE]
|
||||||
|
|
||||||
|
@ -12,8 +12,9 @@ from ...utils import dbus_connected
|
|||||||
from ..configuration import (
|
from ..configuration import (
|
||||||
ConnectionProperties,
|
ConnectionProperties,
|
||||||
EthernetProperties,
|
EthernetProperties,
|
||||||
|
Ip4Properties,
|
||||||
|
Ip6Properties,
|
||||||
IpAddress,
|
IpAddress,
|
||||||
IpProperties,
|
|
||||||
MatchProperties,
|
MatchProperties,
|
||||||
VlanProperties,
|
VlanProperties,
|
||||||
WirelessProperties,
|
WirelessProperties,
|
||||||
@ -58,6 +59,8 @@ CONF_ATTR_IPV4_GATEWAY = "gateway"
|
|||||||
CONF_ATTR_IPV4_DNS = "dns"
|
CONF_ATTR_IPV4_DNS = "dns"
|
||||||
|
|
||||||
CONF_ATTR_IPV6_METHOD = "method"
|
CONF_ATTR_IPV6_METHOD = "method"
|
||||||
|
CONF_ATTR_IPV6_ADDR_GEN_MODE = "addr-gen-mode"
|
||||||
|
CONF_ATTR_IPV6_PRIVACY = "ip6-privacy"
|
||||||
CONF_ATTR_IPV6_ADDRESS_DATA = "address-data"
|
CONF_ATTR_IPV6_ADDRESS_DATA = "address-data"
|
||||||
CONF_ATTR_IPV6_GATEWAY = "gateway"
|
CONF_ATTR_IPV6_GATEWAY = "gateway"
|
||||||
CONF_ATTR_IPV6_DNS = "dns"
|
CONF_ATTR_IPV6_DNS = "dns"
|
||||||
@ -69,6 +72,8 @@ IPV4_6_IGNORE_FIELDS = [
|
|||||||
"dns-data",
|
"dns-data",
|
||||||
"gateway",
|
"gateway",
|
||||||
"method",
|
"method",
|
||||||
|
"addr-gen-mode",
|
||||||
|
"ip6-privacy",
|
||||||
]
|
]
|
||||||
|
|
||||||
_LOGGER: logging.Logger = logging.getLogger(__name__)
|
_LOGGER: logging.Logger = logging.getLogger(__name__)
|
||||||
@ -110,8 +115,8 @@ class NetworkSetting(DBusInterface):
|
|||||||
self._wireless_security: WirelessSecurityProperties | None = None
|
self._wireless_security: WirelessSecurityProperties | None = None
|
||||||
self._ethernet: EthernetProperties | None = None
|
self._ethernet: EthernetProperties | None = None
|
||||||
self._vlan: VlanProperties | None = None
|
self._vlan: VlanProperties | None = None
|
||||||
self._ipv4: IpProperties | None = None
|
self._ipv4: Ip4Properties | None = None
|
||||||
self._ipv6: IpProperties | None = None
|
self._ipv6: Ip6Properties | None = None
|
||||||
self._match: MatchProperties | None = None
|
self._match: MatchProperties | None = None
|
||||||
super().__init__()
|
super().__init__()
|
||||||
|
|
||||||
@ -146,13 +151,13 @@ class NetworkSetting(DBusInterface):
|
|||||||
return self._vlan
|
return self._vlan
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def ipv4(self) -> IpProperties | None:
|
def ipv4(self) -> Ip4Properties | None:
|
||||||
"""Return ipv4 properties if any."""
|
"""Return IPv4 properties if any."""
|
||||||
return self._ipv4
|
return self._ipv4
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def ipv6(self) -> IpProperties | None:
|
def ipv6(self) -> Ip6Properties | None:
|
||||||
"""Return ipv6 properties if any."""
|
"""Return IPv6 properties if any."""
|
||||||
return self._ipv6
|
return self._ipv6
|
||||||
|
|
||||||
@property
|
@property
|
||||||
@ -223,66 +228,83 @@ class NetworkSetting(DBusInterface):
|
|||||||
# See: https://developer-old.gnome.org/NetworkManager/stable/ch01.html
|
# See: https://developer-old.gnome.org/NetworkManager/stable/ch01.html
|
||||||
if CONF_ATTR_CONNECTION in data:
|
if CONF_ATTR_CONNECTION in data:
|
||||||
self._connection = ConnectionProperties(
|
self._connection = ConnectionProperties(
|
||||||
data[CONF_ATTR_CONNECTION].get(CONF_ATTR_CONNECTION_ID),
|
id=data[CONF_ATTR_CONNECTION].get(CONF_ATTR_CONNECTION_ID),
|
||||||
data[CONF_ATTR_CONNECTION].get(CONF_ATTR_CONNECTION_UUID),
|
uuid=data[CONF_ATTR_CONNECTION].get(CONF_ATTR_CONNECTION_UUID),
|
||||||
data[CONF_ATTR_CONNECTION].get(CONF_ATTR_CONNECTION_TYPE),
|
type=data[CONF_ATTR_CONNECTION].get(CONF_ATTR_CONNECTION_TYPE),
|
||||||
data[CONF_ATTR_CONNECTION].get(CONF_ATTR_CONNECTION_INTERFACE_NAME),
|
interface_name=data[CONF_ATTR_CONNECTION].get(
|
||||||
|
CONF_ATTR_CONNECTION_INTERFACE_NAME
|
||||||
|
),
|
||||||
)
|
)
|
||||||
|
|
||||||
if CONF_ATTR_802_ETHERNET in data:
|
if CONF_ATTR_802_ETHERNET in data:
|
||||||
self._ethernet = EthernetProperties(
|
self._ethernet = EthernetProperties(
|
||||||
data[CONF_ATTR_802_ETHERNET].get(CONF_ATTR_802_ETHERNET_ASSIGNED_MAC),
|
assigned_mac=data[CONF_ATTR_802_ETHERNET].get(
|
||||||
|
CONF_ATTR_802_ETHERNET_ASSIGNED_MAC
|
||||||
|
),
|
||||||
)
|
)
|
||||||
|
|
||||||
if CONF_ATTR_802_WIRELESS in data:
|
if CONF_ATTR_802_WIRELESS in data:
|
||||||
self._wireless = WirelessProperties(
|
self._wireless = WirelessProperties(
|
||||||
bytes(
|
ssid=bytes(
|
||||||
data[CONF_ATTR_802_WIRELESS].get(CONF_ATTR_802_WIRELESS_SSID, [])
|
data[CONF_ATTR_802_WIRELESS].get(CONF_ATTR_802_WIRELESS_SSID, [])
|
||||||
).decode(),
|
).decode(),
|
||||||
data[CONF_ATTR_802_WIRELESS].get(CONF_ATTR_802_WIRELESS_ASSIGNED_MAC),
|
assigned_mac=data[CONF_ATTR_802_WIRELESS].get(
|
||||||
data[CONF_ATTR_802_WIRELESS].get(CONF_ATTR_802_WIRELESS_MODE),
|
CONF_ATTR_802_WIRELESS_ASSIGNED_MAC
|
||||||
data[CONF_ATTR_802_WIRELESS].get(CONF_ATTR_802_WIRELESS_POWERSAVE),
|
),
|
||||||
|
mode=data[CONF_ATTR_802_WIRELESS].get(CONF_ATTR_802_WIRELESS_MODE),
|
||||||
|
powersave=data[CONF_ATTR_802_WIRELESS].get(
|
||||||
|
CONF_ATTR_802_WIRELESS_POWERSAVE
|
||||||
|
),
|
||||||
)
|
)
|
||||||
|
|
||||||
if CONF_ATTR_802_WIRELESS_SECURITY in data:
|
if CONF_ATTR_802_WIRELESS_SECURITY in data:
|
||||||
self._wireless_security = WirelessSecurityProperties(
|
self._wireless_security = WirelessSecurityProperties(
|
||||||
data[CONF_ATTR_802_WIRELESS_SECURITY].get(
|
auth_alg=data[CONF_ATTR_802_WIRELESS_SECURITY].get(
|
||||||
CONF_ATTR_802_WIRELESS_SECURITY_AUTH_ALG
|
CONF_ATTR_802_WIRELESS_SECURITY_AUTH_ALG
|
||||||
),
|
),
|
||||||
data[CONF_ATTR_802_WIRELESS_SECURITY].get(
|
key_mgmt=data[CONF_ATTR_802_WIRELESS_SECURITY].get(
|
||||||
CONF_ATTR_802_WIRELESS_SECURITY_KEY_MGMT
|
CONF_ATTR_802_WIRELESS_SECURITY_KEY_MGMT
|
||||||
),
|
),
|
||||||
data[CONF_ATTR_802_WIRELESS_SECURITY].get(
|
psk=data[CONF_ATTR_802_WIRELESS_SECURITY].get(
|
||||||
CONF_ATTR_802_WIRELESS_SECURITY_PSK
|
CONF_ATTR_802_WIRELESS_SECURITY_PSK
|
||||||
),
|
),
|
||||||
)
|
)
|
||||||
|
|
||||||
if CONF_ATTR_VLAN in data:
|
if CONF_ATTR_VLAN in data:
|
||||||
self._vlan = VlanProperties(
|
if CONF_ATTR_VLAN_ID in data[CONF_ATTR_VLAN]:
|
||||||
data[CONF_ATTR_VLAN].get(CONF_ATTR_VLAN_ID),
|
self._vlan = VlanProperties(
|
||||||
data[CONF_ATTR_VLAN].get(CONF_ATTR_VLAN_PARENT),
|
data[CONF_ATTR_VLAN][CONF_ATTR_VLAN_ID],
|
||||||
)
|
data[CONF_ATTR_VLAN].get(CONF_ATTR_VLAN_PARENT),
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
self._vlan = None
|
||||||
|
_LOGGER.warning(
|
||||||
|
"Network settings for vlan connection %s missing required vlan id, cannot process it",
|
||||||
|
self.connection.interface_name,
|
||||||
|
)
|
||||||
|
|
||||||
if CONF_ATTR_IPV4 in data:
|
if CONF_ATTR_IPV4 in data:
|
||||||
address_data = None
|
address_data = None
|
||||||
if ips := data[CONF_ATTR_IPV4].get(CONF_ATTR_IPV4_ADDRESS_DATA):
|
if ips := data[CONF_ATTR_IPV4].get(CONF_ATTR_IPV4_ADDRESS_DATA):
|
||||||
address_data = [IpAddress(ip["address"], ip["prefix"]) for ip in ips]
|
address_data = [IpAddress(ip["address"], ip["prefix"]) for ip in ips]
|
||||||
self._ipv4 = IpProperties(
|
self._ipv4 = Ip4Properties(
|
||||||
data[CONF_ATTR_IPV4].get(CONF_ATTR_IPV4_METHOD),
|
method=data[CONF_ATTR_IPV4].get(CONF_ATTR_IPV4_METHOD),
|
||||||
address_data,
|
address_data=address_data,
|
||||||
data[CONF_ATTR_IPV4].get(CONF_ATTR_IPV4_GATEWAY),
|
gateway=data[CONF_ATTR_IPV4].get(CONF_ATTR_IPV4_GATEWAY),
|
||||||
data[CONF_ATTR_IPV4].get(CONF_ATTR_IPV4_DNS),
|
dns=data[CONF_ATTR_IPV4].get(CONF_ATTR_IPV4_DNS),
|
||||||
)
|
)
|
||||||
|
|
||||||
if CONF_ATTR_IPV6 in data:
|
if CONF_ATTR_IPV6 in data:
|
||||||
address_data = None
|
address_data = None
|
||||||
if ips := data[CONF_ATTR_IPV6].get(CONF_ATTR_IPV6_ADDRESS_DATA):
|
if ips := data[CONF_ATTR_IPV6].get(CONF_ATTR_IPV6_ADDRESS_DATA):
|
||||||
address_data = [IpAddress(ip["address"], ip["prefix"]) for ip in ips]
|
address_data = [IpAddress(ip["address"], ip["prefix"]) for ip in ips]
|
||||||
self._ipv6 = IpProperties(
|
self._ipv6 = Ip6Properties(
|
||||||
data[CONF_ATTR_IPV6].get(CONF_ATTR_IPV6_METHOD),
|
method=data[CONF_ATTR_IPV6].get(CONF_ATTR_IPV6_METHOD),
|
||||||
address_data,
|
addr_gen_mode=data[CONF_ATTR_IPV6].get(CONF_ATTR_IPV6_ADDR_GEN_MODE),
|
||||||
data[CONF_ATTR_IPV6].get(CONF_ATTR_IPV6_GATEWAY),
|
ip6_privacy=data[CONF_ATTR_IPV6].get(CONF_ATTR_IPV6_PRIVACY),
|
||||||
data[CONF_ATTR_IPV6].get(CONF_ATTR_IPV6_DNS),
|
address_data=address_data,
|
||||||
|
gateway=data[CONF_ATTR_IPV6].get(CONF_ATTR_IPV6_GATEWAY),
|
||||||
|
dns=data[CONF_ATTR_IPV6].get(CONF_ATTR_IPV6_DNS),
|
||||||
)
|
)
|
||||||
|
|
||||||
if CONF_ATTR_MATCH in data:
|
if CONF_ATTR_MATCH in data:
|
||||||
|
@ -8,8 +8,13 @@ from uuid import uuid4
|
|||||||
|
|
||||||
from dbus_fast import Variant
|
from dbus_fast import Variant
|
||||||
|
|
||||||
from ....host.configuration import VlanConfig
|
from ....host.configuration import Ip6Setting, IpSetting, VlanConfig
|
||||||
from ....host.const import InterfaceMethod, InterfaceType
|
from ....host.const import (
|
||||||
|
InterfaceAddrGenMode,
|
||||||
|
InterfaceIp6Privacy,
|
||||||
|
InterfaceMethod,
|
||||||
|
InterfaceType,
|
||||||
|
)
|
||||||
from .. import NetworkManager
|
from .. import NetworkManager
|
||||||
from . import (
|
from . import (
|
||||||
CONF_ATTR_802_ETHERNET,
|
CONF_ATTR_802_ETHERNET,
|
||||||
@ -36,10 +41,12 @@ from . import (
|
|||||||
CONF_ATTR_IPV4_GATEWAY,
|
CONF_ATTR_IPV4_GATEWAY,
|
||||||
CONF_ATTR_IPV4_METHOD,
|
CONF_ATTR_IPV4_METHOD,
|
||||||
CONF_ATTR_IPV6,
|
CONF_ATTR_IPV6,
|
||||||
|
CONF_ATTR_IPV6_ADDR_GEN_MODE,
|
||||||
CONF_ATTR_IPV6_ADDRESS_DATA,
|
CONF_ATTR_IPV6_ADDRESS_DATA,
|
||||||
CONF_ATTR_IPV6_DNS,
|
CONF_ATTR_IPV6_DNS,
|
||||||
CONF_ATTR_IPV6_GATEWAY,
|
CONF_ATTR_IPV6_GATEWAY,
|
||||||
CONF_ATTR_IPV6_METHOD,
|
CONF_ATTR_IPV6_METHOD,
|
||||||
|
CONF_ATTR_IPV6_PRIVACY,
|
||||||
CONF_ATTR_MATCH,
|
CONF_ATTR_MATCH,
|
||||||
CONF_ATTR_MATCH_PATH,
|
CONF_ATTR_MATCH_PATH,
|
||||||
CONF_ATTR_VLAN,
|
CONF_ATTR_VLAN,
|
||||||
@ -51,7 +58,7 @@ if TYPE_CHECKING:
|
|||||||
from ....host.configuration import Interface
|
from ....host.configuration import Interface
|
||||||
|
|
||||||
|
|
||||||
def _get_ipv4_connection_settings(ipv4setting) -> dict:
|
def _get_ipv4_connection_settings(ipv4setting: IpSetting | None) -> dict:
|
||||||
ipv4 = {}
|
ipv4 = {}
|
||||||
if not ipv4setting or ipv4setting.method == InterfaceMethod.AUTO:
|
if not ipv4setting or ipv4setting.method == InterfaceMethod.AUTO:
|
||||||
ipv4[CONF_ATTR_IPV4_METHOD] = Variant("s", "auto")
|
ipv4[CONF_ATTR_IPV4_METHOD] = Variant("s", "auto")
|
||||||
@ -93,10 +100,32 @@ def _get_ipv4_connection_settings(ipv4setting) -> dict:
|
|||||||
return ipv4
|
return ipv4
|
||||||
|
|
||||||
|
|
||||||
def _get_ipv6_connection_settings(ipv6setting) -> dict:
|
def _get_ipv6_connection_settings(
|
||||||
|
ipv6setting: Ip6Setting | None, support_addr_gen_mode_defaults: bool = False
|
||||||
|
) -> dict:
|
||||||
ipv6 = {}
|
ipv6 = {}
|
||||||
if not ipv6setting or ipv6setting.method == InterfaceMethod.AUTO:
|
if not ipv6setting or ipv6setting.method == InterfaceMethod.AUTO:
|
||||||
ipv6[CONF_ATTR_IPV6_METHOD] = Variant("s", "auto")
|
ipv6[CONF_ATTR_IPV6_METHOD] = Variant("s", "auto")
|
||||||
|
if ipv6setting:
|
||||||
|
if ipv6setting.addr_gen_mode == InterfaceAddrGenMode.EUI64:
|
||||||
|
ipv6[CONF_ATTR_IPV6_ADDR_GEN_MODE] = Variant("i", 0)
|
||||||
|
elif (
|
||||||
|
not support_addr_gen_mode_defaults
|
||||||
|
or ipv6setting.addr_gen_mode == InterfaceAddrGenMode.STABLE_PRIVACY
|
||||||
|
):
|
||||||
|
ipv6[CONF_ATTR_IPV6_ADDR_GEN_MODE] = Variant("i", 1)
|
||||||
|
elif ipv6setting.addr_gen_mode == InterfaceAddrGenMode.DEFAULT_OR_EUI64:
|
||||||
|
ipv6[CONF_ATTR_IPV6_ADDR_GEN_MODE] = Variant("i", 2)
|
||||||
|
else:
|
||||||
|
ipv6[CONF_ATTR_IPV6_ADDR_GEN_MODE] = Variant("i", 3)
|
||||||
|
if ipv6setting.ip6_privacy == InterfaceIp6Privacy.DISABLED:
|
||||||
|
ipv6[CONF_ATTR_IPV6_PRIVACY] = Variant("i", 0)
|
||||||
|
elif ipv6setting.ip6_privacy == InterfaceIp6Privacy.ENABLED_PREFER_PUBLIC:
|
||||||
|
ipv6[CONF_ATTR_IPV6_PRIVACY] = Variant("i", 1)
|
||||||
|
elif ipv6setting.ip6_privacy == InterfaceIp6Privacy.ENABLED:
|
||||||
|
ipv6[CONF_ATTR_IPV6_PRIVACY] = Variant("i", 2)
|
||||||
|
else:
|
||||||
|
ipv6[CONF_ATTR_IPV6_PRIVACY] = Variant("i", -1)
|
||||||
elif ipv6setting.method == InterfaceMethod.DISABLED:
|
elif ipv6setting.method == InterfaceMethod.DISABLED:
|
||||||
ipv6[CONF_ATTR_IPV6_METHOD] = Variant("s", "link-local")
|
ipv6[CONF_ATTR_IPV6_METHOD] = Variant("s", "link-local")
|
||||||
elif ipv6setting.method == InterfaceMethod.STATIC:
|
elif ipv6setting.method == InterfaceMethod.STATIC:
|
||||||
@ -183,7 +212,9 @@ def get_connection_from_interface(
|
|||||||
|
|
||||||
conn[CONF_ATTR_IPV4] = _get_ipv4_connection_settings(interface.ipv4setting)
|
conn[CONF_ATTR_IPV4] = _get_ipv4_connection_settings(interface.ipv4setting)
|
||||||
|
|
||||||
conn[CONF_ATTR_IPV6] = _get_ipv6_connection_settings(interface.ipv6setting)
|
conn[CONF_ATTR_IPV6] = _get_ipv6_connection_settings(
|
||||||
|
interface.ipv6setting, network_manager.version >= "1.40.0"
|
||||||
|
)
|
||||||
|
|
||||||
if interface.type == InterfaceType.ETHERNET:
|
if interface.type == InterfaceType.ETHERNET:
|
||||||
conn[CONF_ATTR_802_ETHERNET] = {
|
conn[CONF_ATTR_802_ETHERNET] = {
|
||||||
@ -191,8 +222,10 @@ def get_connection_from_interface(
|
|||||||
}
|
}
|
||||||
elif interface.type == "vlan":
|
elif interface.type == "vlan":
|
||||||
parent = cast(VlanConfig, interface.vlan).interface
|
parent = cast(VlanConfig, interface.vlan).interface
|
||||||
if parent in network_manager and (
|
if (
|
||||||
parent_connection := network_manager.get(parent).connection
|
parent
|
||||||
|
and parent in network_manager
|
||||||
|
and (parent_connection := network_manager.get(parent).connection)
|
||||||
):
|
):
|
||||||
parent = parent_connection.uuid
|
parent = parent_connection.uuid
|
||||||
|
|
||||||
|
@ -10,6 +10,7 @@ from dbus_fast.aio.message_bus import MessageBus
|
|||||||
from ..exceptions import DBusError, DBusInterfaceError, DBusServiceUnkownError
|
from ..exceptions import DBusError, DBusInterfaceError, DBusServiceUnkownError
|
||||||
from ..utils.dt import get_time_zone, utc_from_timestamp
|
from ..utils.dt import get_time_zone, utc_from_timestamp
|
||||||
from .const import (
|
from .const import (
|
||||||
|
DBUS_ATTR_LOCAL_RTC,
|
||||||
DBUS_ATTR_NTP,
|
DBUS_ATTR_NTP,
|
||||||
DBUS_ATTR_NTPSYNCHRONIZED,
|
DBUS_ATTR_NTPSYNCHRONIZED,
|
||||||
DBUS_ATTR_TIMEUSEC,
|
DBUS_ATTR_TIMEUSEC,
|
||||||
@ -46,6 +47,12 @@ class TimeDate(DBusInterfaceProxy):
|
|||||||
"""Return host timezone."""
|
"""Return host timezone."""
|
||||||
return self.properties[DBUS_ATTR_TIMEZONE]
|
return self.properties[DBUS_ATTR_TIMEZONE]
|
||||||
|
|
||||||
|
@property
|
||||||
|
@dbus_property
|
||||||
|
def local_rtc(self) -> bool:
|
||||||
|
"""Return whether rtc is local time or utc."""
|
||||||
|
return self.properties[DBUS_ATTR_LOCAL_RTC]
|
||||||
|
|
||||||
@property
|
@property
|
||||||
@dbus_property
|
@dbus_property
|
||||||
def ntp(self) -> bool:
|
def ntp(self) -> bool:
|
||||||
|
@ -28,6 +28,8 @@ class DeviceSpecificationDataType(TypedDict, total=False):
|
|||||||
path: str
|
path: str
|
||||||
label: str
|
label: str
|
||||||
uuid: str
|
uuid: str
|
||||||
|
partuuid: str
|
||||||
|
partlabel: str
|
||||||
|
|
||||||
|
|
||||||
@dataclass(slots=True)
|
@dataclass(slots=True)
|
||||||
@ -40,6 +42,8 @@ class DeviceSpecification:
|
|||||||
path: Path | None = None
|
path: Path | None = None
|
||||||
label: str | None = None
|
label: str | None = None
|
||||||
uuid: str | None = None
|
uuid: str | None = None
|
||||||
|
partuuid: str | None = None
|
||||||
|
partlabel: str | None = None
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def from_dict(data: DeviceSpecificationDataType) -> "DeviceSpecification":
|
def from_dict(data: DeviceSpecificationDataType) -> "DeviceSpecification":
|
||||||
@ -48,6 +52,8 @@ class DeviceSpecification:
|
|||||||
path=Path(data["path"]) if "path" in data else None,
|
path=Path(data["path"]) if "path" in data else None,
|
||||||
label=data.get("label"),
|
label=data.get("label"),
|
||||||
uuid=data.get("uuid"),
|
uuid=data.get("uuid"),
|
||||||
|
partuuid=data.get("partuuid"),
|
||||||
|
partlabel=data.get("partlabel"),
|
||||||
)
|
)
|
||||||
|
|
||||||
def to_dict(self) -> dict[str, Variant]:
|
def to_dict(self) -> dict[str, Variant]:
|
||||||
@ -56,6 +62,8 @@ class DeviceSpecification:
|
|||||||
"path": Variant("s", self.path.as_posix()) if self.path else None,
|
"path": Variant("s", self.path.as_posix()) if self.path else None,
|
||||||
"label": _optional_variant("s", self.label),
|
"label": _optional_variant("s", self.label),
|
||||||
"uuid": _optional_variant("s", self.uuid),
|
"uuid": _optional_variant("s", self.uuid),
|
||||||
|
"partuuid": _optional_variant("s", self.partuuid),
|
||||||
|
"partlabel": _optional_variant("s", self.partlabel),
|
||||||
}
|
}
|
||||||
return {k: v for k, v in data.items() if v}
|
return {k: v for k, v in data.items() if v}
|
||||||
|
|
||||||
|
@ -12,6 +12,7 @@ from typing import TYPE_CHECKING, cast
|
|||||||
from attr import evolve
|
from attr import evolve
|
||||||
from awesomeversion import AwesomeVersion
|
from awesomeversion import AwesomeVersion
|
||||||
import docker
|
import docker
|
||||||
|
import docker.errors
|
||||||
from docker.types import Mount
|
from docker.types import Mount
|
||||||
import requests
|
import requests
|
||||||
|
|
||||||
@ -43,6 +44,7 @@ from ..jobs.decorator import Job
|
|||||||
from ..resolution.const import CGROUP_V2_VERSION, ContextType, IssueType, SuggestionType
|
from ..resolution.const import CGROUP_V2_VERSION, ContextType, IssueType, SuggestionType
|
||||||
from ..utils.sentry import async_capture_exception
|
from ..utils.sentry import async_capture_exception
|
||||||
from .const import (
|
from .const import (
|
||||||
|
ADDON_BUILDER_IMAGE,
|
||||||
ENV_TIME,
|
ENV_TIME,
|
||||||
ENV_TOKEN,
|
ENV_TOKEN,
|
||||||
ENV_TOKEN_OLD,
|
ENV_TOKEN_OLD,
|
||||||
@ -344,7 +346,7 @@ class DockerAddon(DockerInterface):
|
|||||||
mounts = [
|
mounts = [
|
||||||
MOUNT_DEV,
|
MOUNT_DEV,
|
||||||
Mount(
|
Mount(
|
||||||
type=MountType.BIND,
|
type=MountType.BIND.value,
|
||||||
source=self.addon.path_extern_data.as_posix(),
|
source=self.addon.path_extern_data.as_posix(),
|
||||||
target=target_data_path or PATH_PRIVATE_DATA.as_posix(),
|
target=target_data_path or PATH_PRIVATE_DATA.as_posix(),
|
||||||
read_only=False,
|
read_only=False,
|
||||||
@ -355,7 +357,7 @@ class DockerAddon(DockerInterface):
|
|||||||
if MappingType.CONFIG in addon_mapping:
|
if MappingType.CONFIG in addon_mapping:
|
||||||
mounts.append(
|
mounts.append(
|
||||||
Mount(
|
Mount(
|
||||||
type=MountType.BIND,
|
type=MountType.BIND.value,
|
||||||
source=self.sys_config.path_extern_homeassistant.as_posix(),
|
source=self.sys_config.path_extern_homeassistant.as_posix(),
|
||||||
target=addon_mapping[MappingType.CONFIG].path
|
target=addon_mapping[MappingType.CONFIG].path
|
||||||
or PATH_HOMEASSISTANT_CONFIG_LEGACY.as_posix(),
|
or PATH_HOMEASSISTANT_CONFIG_LEGACY.as_posix(),
|
||||||
@ -368,7 +370,7 @@ class DockerAddon(DockerInterface):
|
|||||||
if self.addon.addon_config_used:
|
if self.addon.addon_config_used:
|
||||||
mounts.append(
|
mounts.append(
|
||||||
Mount(
|
Mount(
|
||||||
type=MountType.BIND,
|
type=MountType.BIND.value,
|
||||||
source=self.addon.path_extern_config.as_posix(),
|
source=self.addon.path_extern_config.as_posix(),
|
||||||
target=addon_mapping[MappingType.ADDON_CONFIG].path
|
target=addon_mapping[MappingType.ADDON_CONFIG].path
|
||||||
or PATH_PUBLIC_CONFIG.as_posix(),
|
or PATH_PUBLIC_CONFIG.as_posix(),
|
||||||
@ -380,7 +382,7 @@ class DockerAddon(DockerInterface):
|
|||||||
if MappingType.HOMEASSISTANT_CONFIG in addon_mapping:
|
if MappingType.HOMEASSISTANT_CONFIG in addon_mapping:
|
||||||
mounts.append(
|
mounts.append(
|
||||||
Mount(
|
Mount(
|
||||||
type=MountType.BIND,
|
type=MountType.BIND.value,
|
||||||
source=self.sys_config.path_extern_homeassistant.as_posix(),
|
source=self.sys_config.path_extern_homeassistant.as_posix(),
|
||||||
target=addon_mapping[MappingType.HOMEASSISTANT_CONFIG].path
|
target=addon_mapping[MappingType.HOMEASSISTANT_CONFIG].path
|
||||||
or PATH_HOMEASSISTANT_CONFIG.as_posix(),
|
or PATH_HOMEASSISTANT_CONFIG.as_posix(),
|
||||||
@ -393,7 +395,7 @@ class DockerAddon(DockerInterface):
|
|||||||
if MappingType.ALL_ADDON_CONFIGS in addon_mapping:
|
if MappingType.ALL_ADDON_CONFIGS in addon_mapping:
|
||||||
mounts.append(
|
mounts.append(
|
||||||
Mount(
|
Mount(
|
||||||
type=MountType.BIND,
|
type=MountType.BIND.value,
|
||||||
source=self.sys_config.path_extern_addon_configs.as_posix(),
|
source=self.sys_config.path_extern_addon_configs.as_posix(),
|
||||||
target=addon_mapping[MappingType.ALL_ADDON_CONFIGS].path
|
target=addon_mapping[MappingType.ALL_ADDON_CONFIGS].path
|
||||||
or PATH_ALL_ADDON_CONFIGS.as_posix(),
|
or PATH_ALL_ADDON_CONFIGS.as_posix(),
|
||||||
@ -404,7 +406,7 @@ class DockerAddon(DockerInterface):
|
|||||||
if MappingType.SSL in addon_mapping:
|
if MappingType.SSL in addon_mapping:
|
||||||
mounts.append(
|
mounts.append(
|
||||||
Mount(
|
Mount(
|
||||||
type=MountType.BIND,
|
type=MountType.BIND.value,
|
||||||
source=self.sys_config.path_extern_ssl.as_posix(),
|
source=self.sys_config.path_extern_ssl.as_posix(),
|
||||||
target=addon_mapping[MappingType.SSL].path or PATH_SSL.as_posix(),
|
target=addon_mapping[MappingType.SSL].path or PATH_SSL.as_posix(),
|
||||||
read_only=addon_mapping[MappingType.SSL].read_only,
|
read_only=addon_mapping[MappingType.SSL].read_only,
|
||||||
@ -414,7 +416,7 @@ class DockerAddon(DockerInterface):
|
|||||||
if MappingType.ADDONS in addon_mapping:
|
if MappingType.ADDONS in addon_mapping:
|
||||||
mounts.append(
|
mounts.append(
|
||||||
Mount(
|
Mount(
|
||||||
type=MountType.BIND,
|
type=MountType.BIND.value,
|
||||||
source=self.sys_config.path_extern_addons_local.as_posix(),
|
source=self.sys_config.path_extern_addons_local.as_posix(),
|
||||||
target=addon_mapping[MappingType.ADDONS].path
|
target=addon_mapping[MappingType.ADDONS].path
|
||||||
or PATH_LOCAL_ADDONS.as_posix(),
|
or PATH_LOCAL_ADDONS.as_posix(),
|
||||||
@ -425,7 +427,7 @@ class DockerAddon(DockerInterface):
|
|||||||
if MappingType.BACKUP in addon_mapping:
|
if MappingType.BACKUP in addon_mapping:
|
||||||
mounts.append(
|
mounts.append(
|
||||||
Mount(
|
Mount(
|
||||||
type=MountType.BIND,
|
type=MountType.BIND.value,
|
||||||
source=self.sys_config.path_extern_backup.as_posix(),
|
source=self.sys_config.path_extern_backup.as_posix(),
|
||||||
target=addon_mapping[MappingType.BACKUP].path
|
target=addon_mapping[MappingType.BACKUP].path
|
||||||
or PATH_BACKUP.as_posix(),
|
or PATH_BACKUP.as_posix(),
|
||||||
@ -436,7 +438,7 @@ class DockerAddon(DockerInterface):
|
|||||||
if MappingType.SHARE in addon_mapping:
|
if MappingType.SHARE in addon_mapping:
|
||||||
mounts.append(
|
mounts.append(
|
||||||
Mount(
|
Mount(
|
||||||
type=MountType.BIND,
|
type=MountType.BIND.value,
|
||||||
source=self.sys_config.path_extern_share.as_posix(),
|
source=self.sys_config.path_extern_share.as_posix(),
|
||||||
target=addon_mapping[MappingType.SHARE].path
|
target=addon_mapping[MappingType.SHARE].path
|
||||||
or PATH_SHARE.as_posix(),
|
or PATH_SHARE.as_posix(),
|
||||||
@ -448,7 +450,7 @@ class DockerAddon(DockerInterface):
|
|||||||
if MappingType.MEDIA in addon_mapping:
|
if MappingType.MEDIA in addon_mapping:
|
||||||
mounts.append(
|
mounts.append(
|
||||||
Mount(
|
Mount(
|
||||||
type=MountType.BIND,
|
type=MountType.BIND.value,
|
||||||
source=self.sys_config.path_extern_media.as_posix(),
|
source=self.sys_config.path_extern_media.as_posix(),
|
||||||
target=addon_mapping[MappingType.MEDIA].path
|
target=addon_mapping[MappingType.MEDIA].path
|
||||||
or PATH_MEDIA.as_posix(),
|
or PATH_MEDIA.as_posix(),
|
||||||
@ -466,7 +468,7 @@ class DockerAddon(DockerInterface):
|
|||||||
continue
|
continue
|
||||||
mounts.append(
|
mounts.append(
|
||||||
Mount(
|
Mount(
|
||||||
type=MountType.BIND,
|
type=MountType.BIND.value,
|
||||||
source=gpio_path,
|
source=gpio_path,
|
||||||
target=gpio_path,
|
target=gpio_path,
|
||||||
read_only=False,
|
read_only=False,
|
||||||
@ -477,7 +479,7 @@ class DockerAddon(DockerInterface):
|
|||||||
if self.addon.with_devicetree:
|
if self.addon.with_devicetree:
|
||||||
mounts.append(
|
mounts.append(
|
||||||
Mount(
|
Mount(
|
||||||
type=MountType.BIND,
|
type=MountType.BIND.value,
|
||||||
source="/sys/firmware/devicetree/base",
|
source="/sys/firmware/devicetree/base",
|
||||||
target="/device-tree",
|
target="/device-tree",
|
||||||
read_only=True,
|
read_only=True,
|
||||||
@ -492,7 +494,7 @@ class DockerAddon(DockerInterface):
|
|||||||
if self.addon.with_kernel_modules:
|
if self.addon.with_kernel_modules:
|
||||||
mounts.append(
|
mounts.append(
|
||||||
Mount(
|
Mount(
|
||||||
type=MountType.BIND,
|
type=MountType.BIND.value,
|
||||||
source="/lib/modules",
|
source="/lib/modules",
|
||||||
target="/lib/modules",
|
target="/lib/modules",
|
||||||
read_only=True,
|
read_only=True,
|
||||||
@ -511,19 +513,19 @@ class DockerAddon(DockerInterface):
|
|||||||
if self.addon.with_audio:
|
if self.addon.with_audio:
|
||||||
mounts += [
|
mounts += [
|
||||||
Mount(
|
Mount(
|
||||||
type=MountType.BIND,
|
type=MountType.BIND.value,
|
||||||
source=self.addon.path_extern_pulse.as_posix(),
|
source=self.addon.path_extern_pulse.as_posix(),
|
||||||
target="/etc/pulse/client.conf",
|
target="/etc/pulse/client.conf",
|
||||||
read_only=True,
|
read_only=True,
|
||||||
),
|
),
|
||||||
Mount(
|
Mount(
|
||||||
type=MountType.BIND,
|
type=MountType.BIND.value,
|
||||||
source=self.sys_plugins.audio.path_extern_pulse.as_posix(),
|
source=self.sys_plugins.audio.path_extern_pulse.as_posix(),
|
||||||
target="/run/audio",
|
target="/run/audio",
|
||||||
read_only=True,
|
read_only=True,
|
||||||
),
|
),
|
||||||
Mount(
|
Mount(
|
||||||
type=MountType.BIND,
|
type=MountType.BIND.value,
|
||||||
source=self.sys_plugins.audio.path_extern_asound.as_posix(),
|
source=self.sys_plugins.audio.path_extern_asound.as_posix(),
|
||||||
target="/etc/asound.conf",
|
target="/etc/asound.conf",
|
||||||
read_only=True,
|
read_only=True,
|
||||||
@ -534,13 +536,13 @@ class DockerAddon(DockerInterface):
|
|||||||
if self.addon.with_journald:
|
if self.addon.with_journald:
|
||||||
mounts += [
|
mounts += [
|
||||||
Mount(
|
Mount(
|
||||||
type=MountType.BIND,
|
type=MountType.BIND.value,
|
||||||
source=SYSTEMD_JOURNAL_PERSISTENT.as_posix(),
|
source=SYSTEMD_JOURNAL_PERSISTENT.as_posix(),
|
||||||
target=SYSTEMD_JOURNAL_PERSISTENT.as_posix(),
|
target=SYSTEMD_JOURNAL_PERSISTENT.as_posix(),
|
||||||
read_only=True,
|
read_only=True,
|
||||||
),
|
),
|
||||||
Mount(
|
Mount(
|
||||||
type=MountType.BIND,
|
type=MountType.BIND.value,
|
||||||
source=SYSTEMD_JOURNAL_VOLATILE.as_posix(),
|
source=SYSTEMD_JOURNAL_VOLATILE.as_posix(),
|
||||||
target=SYSTEMD_JOURNAL_VOLATILE.as_posix(),
|
target=SYSTEMD_JOURNAL_VOLATILE.as_posix(),
|
||||||
read_only=True,
|
read_only=True,
|
||||||
@ -673,10 +675,41 @@ class DockerAddon(DockerInterface):
|
|||||||
_LOGGER.info("Starting build for %s:%s", self.image, version)
|
_LOGGER.info("Starting build for %s:%s", self.image, version)
|
||||||
|
|
||||||
def build_image():
|
def build_image():
|
||||||
return self.sys_docker.images.build(
|
if build_env.squash:
|
||||||
use_config_proxy=False, **build_env.get_docker_args(version, image)
|
_LOGGER.warning(
|
||||||
|
"Ignoring squash build option for %s as Docker BuildKit does not support it.",
|
||||||
|
self.addon.slug,
|
||||||
|
)
|
||||||
|
|
||||||
|
addon_image_tag = f"{image or self.addon.image}:{version!s}"
|
||||||
|
|
||||||
|
docker_version = self.sys_docker.info.version
|
||||||
|
builder_version_tag = f"{docker_version.major}.{docker_version.minor}.{docker_version.micro}-cli"
|
||||||
|
|
||||||
|
builder_name = f"addon_builder_{self.addon.slug}"
|
||||||
|
|
||||||
|
# Remove dangling builder container if it exists by any chance
|
||||||
|
# E.g. because of an abrupt host shutdown/reboot during a build
|
||||||
|
with suppress(docker.errors.NotFound):
|
||||||
|
self.sys_docker.containers.get(builder_name).remove(force=True, v=True)
|
||||||
|
|
||||||
|
result = self.sys_docker.run_command(
|
||||||
|
ADDON_BUILDER_IMAGE,
|
||||||
|
version=builder_version_tag,
|
||||||
|
name=builder_name,
|
||||||
|
**build_env.get_docker_args(version, addon_image_tag),
|
||||||
)
|
)
|
||||||
|
|
||||||
|
logs = result.output.decode("utf-8")
|
||||||
|
|
||||||
|
if result.exit_code != 0:
|
||||||
|
error_message = f"Docker build failed for {addon_image_tag} (exit code {result.exit_code}). Build output:\n{logs}"
|
||||||
|
raise docker.errors.DockerException(error_message)
|
||||||
|
|
||||||
|
addon_image = self.sys_docker.images.get(addon_image_tag)
|
||||||
|
|
||||||
|
return addon_image, logs
|
||||||
|
|
||||||
try:
|
try:
|
||||||
docker_image, log = await self.sys_run_in_executor(build_image)
|
docker_image, log = await self.sys_run_in_executor(build_image)
|
||||||
|
|
||||||
@ -687,15 +720,6 @@ class DockerAddon(DockerInterface):
|
|||||||
|
|
||||||
except (docker.errors.DockerException, requests.RequestException) as err:
|
except (docker.errors.DockerException, requests.RequestException) as err:
|
||||||
_LOGGER.error("Can't build %s:%s: %s", self.image, version, err)
|
_LOGGER.error("Can't build %s:%s: %s", self.image, version, err)
|
||||||
if hasattr(err, "build_log"):
|
|
||||||
log = "\n".join(
|
|
||||||
[
|
|
||||||
x["stream"]
|
|
||||||
for x in err.build_log # pylint: disable=no-member
|
|
||||||
if isinstance(x, dict) and "stream" in x
|
|
||||||
]
|
|
||||||
)
|
|
||||||
_LOGGER.error("Build log: \n%s", log)
|
|
||||||
raise DockerError() from err
|
raise DockerError() from err
|
||||||
|
|
||||||
_LOGGER.info("Build %s:%s done", self.image, version)
|
_LOGGER.info("Build %s:%s done", self.image, version)
|
||||||
|
@ -47,7 +47,7 @@ class DockerAudio(DockerInterface, CoreSysAttributes):
|
|||||||
mounts = [
|
mounts = [
|
||||||
MOUNT_DEV,
|
MOUNT_DEV,
|
||||||
Mount(
|
Mount(
|
||||||
type=MountType.BIND,
|
type=MountType.BIND.value,
|
||||||
source=self.sys_config.path_extern_audio.as_posix(),
|
source=self.sys_config.path_extern_audio.as_posix(),
|
||||||
target=PATH_PRIVATE_DATA.as_posix(),
|
target=PATH_PRIVATE_DATA.as_posix(),
|
||||||
read_only=False,
|
read_only=False,
|
||||||
|
@ -74,24 +74,26 @@ ENV_TOKEN_OLD = "HASSIO_TOKEN"
|
|||||||
LABEL_MANAGED = "supervisor_managed"
|
LABEL_MANAGED = "supervisor_managed"
|
||||||
|
|
||||||
MOUNT_DBUS = Mount(
|
MOUNT_DBUS = Mount(
|
||||||
type=MountType.BIND, source="/run/dbus", target="/run/dbus", read_only=True
|
type=MountType.BIND.value, source="/run/dbus", target="/run/dbus", read_only=True
|
||||||
|
)
|
||||||
|
MOUNT_DEV = Mount(
|
||||||
|
type=MountType.BIND.value, source="/dev", target="/dev", read_only=True
|
||||||
)
|
)
|
||||||
MOUNT_DEV = Mount(type=MountType.BIND, source="/dev", target="/dev", read_only=True)
|
|
||||||
MOUNT_DEV.setdefault("BindOptions", {})["ReadOnlyNonRecursive"] = True
|
MOUNT_DEV.setdefault("BindOptions", {})["ReadOnlyNonRecursive"] = True
|
||||||
MOUNT_DOCKER = Mount(
|
MOUNT_DOCKER = Mount(
|
||||||
type=MountType.BIND,
|
type=MountType.BIND.value,
|
||||||
source="/run/docker.sock",
|
source="/run/docker.sock",
|
||||||
target="/run/docker.sock",
|
target="/run/docker.sock",
|
||||||
read_only=True,
|
read_only=True,
|
||||||
)
|
)
|
||||||
MOUNT_MACHINE_ID = Mount(
|
MOUNT_MACHINE_ID = Mount(
|
||||||
type=MountType.BIND,
|
type=MountType.BIND.value,
|
||||||
source=MACHINE_ID.as_posix(),
|
source=MACHINE_ID.as_posix(),
|
||||||
target=MACHINE_ID.as_posix(),
|
target=MACHINE_ID.as_posix(),
|
||||||
read_only=True,
|
read_only=True,
|
||||||
)
|
)
|
||||||
MOUNT_UDEV = Mount(
|
MOUNT_UDEV = Mount(
|
||||||
type=MountType.BIND, source="/run/udev", target="/run/udev", read_only=True
|
type=MountType.BIND.value, source="/run/udev", target="/run/udev", read_only=True
|
||||||
)
|
)
|
||||||
|
|
||||||
PATH_PRIVATE_DATA = PurePath("/data")
|
PATH_PRIVATE_DATA = PurePath("/data")
|
||||||
@ -105,3 +107,6 @@ PATH_BACKUP = PurePath("/backup")
|
|||||||
PATH_SHARE = PurePath("/share")
|
PATH_SHARE = PurePath("/share")
|
||||||
PATH_MEDIA = PurePath("/media")
|
PATH_MEDIA = PurePath("/media")
|
||||||
PATH_CLOUD_BACKUP = PurePath("/cloud_backup")
|
PATH_CLOUD_BACKUP = PurePath("/cloud_backup")
|
||||||
|
|
||||||
|
# https://hub.docker.com/_/docker
|
||||||
|
ADDON_BUILDER_IMAGE = "docker.io/library/docker"
|
||||||
|
@ -48,7 +48,7 @@ class DockerDNS(DockerInterface, CoreSysAttributes):
|
|||||||
environment={ENV_TIME: self.sys_timezone},
|
environment={ENV_TIME: self.sys_timezone},
|
||||||
mounts=[
|
mounts=[
|
||||||
Mount(
|
Mount(
|
||||||
type=MountType.BIND,
|
type=MountType.BIND.value,
|
||||||
source=self.sys_config.path_extern_dns.as_posix(),
|
source=self.sys_config.path_extern_dns.as_posix(),
|
||||||
target="/config",
|
target="/config",
|
||||||
read_only=False,
|
read_only=False,
|
||||||
|
@ -99,7 +99,7 @@ class DockerHomeAssistant(DockerInterface):
|
|||||||
MOUNT_UDEV,
|
MOUNT_UDEV,
|
||||||
# HA config folder
|
# HA config folder
|
||||||
Mount(
|
Mount(
|
||||||
type=MountType.BIND,
|
type=MountType.BIND.value,
|
||||||
source=self.sys_config.path_extern_homeassistant.as_posix(),
|
source=self.sys_config.path_extern_homeassistant.as_posix(),
|
||||||
target=PATH_PUBLIC_CONFIG.as_posix(),
|
target=PATH_PUBLIC_CONFIG.as_posix(),
|
||||||
read_only=False,
|
read_only=False,
|
||||||
@ -112,20 +112,20 @@ class DockerHomeAssistant(DockerInterface):
|
|||||||
[
|
[
|
||||||
# All other folders
|
# All other folders
|
||||||
Mount(
|
Mount(
|
||||||
type=MountType.BIND,
|
type=MountType.BIND.value,
|
||||||
source=self.sys_config.path_extern_ssl.as_posix(),
|
source=self.sys_config.path_extern_ssl.as_posix(),
|
||||||
target=PATH_SSL.as_posix(),
|
target=PATH_SSL.as_posix(),
|
||||||
read_only=True,
|
read_only=True,
|
||||||
),
|
),
|
||||||
Mount(
|
Mount(
|
||||||
type=MountType.BIND,
|
type=MountType.BIND.value,
|
||||||
source=self.sys_config.path_extern_share.as_posix(),
|
source=self.sys_config.path_extern_share.as_posix(),
|
||||||
target=PATH_SHARE.as_posix(),
|
target=PATH_SHARE.as_posix(),
|
||||||
read_only=False,
|
read_only=False,
|
||||||
propagation=PropagationMode.RSLAVE.value,
|
propagation=PropagationMode.RSLAVE.value,
|
||||||
),
|
),
|
||||||
Mount(
|
Mount(
|
||||||
type=MountType.BIND,
|
type=MountType.BIND.value,
|
||||||
source=self.sys_config.path_extern_media.as_posix(),
|
source=self.sys_config.path_extern_media.as_posix(),
|
||||||
target=PATH_MEDIA.as_posix(),
|
target=PATH_MEDIA.as_posix(),
|
||||||
read_only=False,
|
read_only=False,
|
||||||
@ -133,19 +133,19 @@ class DockerHomeAssistant(DockerInterface):
|
|||||||
),
|
),
|
||||||
# Configuration audio
|
# Configuration audio
|
||||||
Mount(
|
Mount(
|
||||||
type=MountType.BIND,
|
type=MountType.BIND.value,
|
||||||
source=self.sys_homeassistant.path_extern_pulse.as_posix(),
|
source=self.sys_homeassistant.path_extern_pulse.as_posix(),
|
||||||
target="/etc/pulse/client.conf",
|
target="/etc/pulse/client.conf",
|
||||||
read_only=True,
|
read_only=True,
|
||||||
),
|
),
|
||||||
Mount(
|
Mount(
|
||||||
type=MountType.BIND,
|
type=MountType.BIND.value,
|
||||||
source=self.sys_plugins.audio.path_extern_pulse.as_posix(),
|
source=self.sys_plugins.audio.path_extern_pulse.as_posix(),
|
||||||
target="/run/audio",
|
target="/run/audio",
|
||||||
read_only=True,
|
read_only=True,
|
||||||
),
|
),
|
||||||
Mount(
|
Mount(
|
||||||
type=MountType.BIND,
|
type=MountType.BIND.value,
|
||||||
source=self.sys_plugins.audio.path_extern_asound.as_posix(),
|
source=self.sys_plugins.audio.path_extern_asound.as_posix(),
|
||||||
target="/etc/asound.conf",
|
target="/etc/asound.conf",
|
||||||
read_only=True,
|
read_only=True,
|
||||||
@ -213,24 +213,21 @@ class DockerHomeAssistant(DockerInterface):
|
|||||||
privileged=True,
|
privileged=True,
|
||||||
init=True,
|
init=True,
|
||||||
entrypoint=[],
|
entrypoint=[],
|
||||||
detach=True,
|
|
||||||
stdout=True,
|
|
||||||
stderr=True,
|
|
||||||
mounts=[
|
mounts=[
|
||||||
Mount(
|
Mount(
|
||||||
type=MountType.BIND,
|
type=MountType.BIND.value,
|
||||||
source=self.sys_config.path_extern_homeassistant.as_posix(),
|
source=self.sys_config.path_extern_homeassistant.as_posix(),
|
||||||
target="/config",
|
target="/config",
|
||||||
read_only=False,
|
read_only=False,
|
||||||
),
|
),
|
||||||
Mount(
|
Mount(
|
||||||
type=MountType.BIND,
|
type=MountType.BIND.value,
|
||||||
source=self.sys_config.path_extern_ssl.as_posix(),
|
source=self.sys_config.path_extern_ssl.as_posix(),
|
||||||
target="/ssl",
|
target="/ssl",
|
||||||
read_only=True,
|
read_only=True,
|
||||||
),
|
),
|
||||||
Mount(
|
Mount(
|
||||||
type=MountType.BIND,
|
type=MountType.BIND.value,
|
||||||
source=self.sys_config.path_extern_share.as_posix(),
|
source=self.sys_config.path_extern_share.as_posix(),
|
||||||
target="/share",
|
target="/share",
|
||||||
read_only=False,
|
read_only=False,
|
||||||
|
@ -455,11 +455,11 @@ class DockerInterface(JobGroup, ABC):
|
|||||||
self,
|
self,
|
||||||
version: AwesomeVersion,
|
version: AwesomeVersion,
|
||||||
expected_image: str,
|
expected_image: str,
|
||||||
expected_arch: CpuArch | None = None,
|
expected_cpu_arch: CpuArch | None = None,
|
||||||
) -> None:
|
) -> None:
|
||||||
"""Check we have expected image with correct arch."""
|
"""Check we have expected image with correct arch."""
|
||||||
expected_image_arch = (
|
expected_image_cpu_arch = (
|
||||||
str(expected_arch) if expected_arch else self.sys_arch.supervisor
|
str(expected_cpu_arch) if expected_cpu_arch else self.sys_arch.supervisor
|
||||||
)
|
)
|
||||||
image_name = f"{expected_image}:{version!s}"
|
image_name = f"{expected_image}:{version!s}"
|
||||||
if self.image == expected_image:
|
if self.image == expected_image:
|
||||||
@ -478,13 +478,22 @@ class DockerInterface(JobGroup, ABC):
|
|||||||
image_arch = f"{image_arch}/{image.attrs['Variant']}"
|
image_arch = f"{image_arch}/{image.attrs['Variant']}"
|
||||||
|
|
||||||
# If we have an image and its the right arch, all set
|
# If we have an image and its the right arch, all set
|
||||||
if MAP_ARCH[expected_image_arch] == image_arch:
|
# It seems that newer Docker version return a variant for arm64 images.
|
||||||
|
# Make sure we match linux/arm64 and linux/arm64/v8.
|
||||||
|
expected_image_arch = MAP_ARCH[expected_image_cpu_arch]
|
||||||
|
if image_arch.startswith(expected_image_arch):
|
||||||
return
|
return
|
||||||
|
_LOGGER.info(
|
||||||
|
"Image %s has arch %s, expected %s. Reinstalling.",
|
||||||
|
image_name,
|
||||||
|
image_arch,
|
||||||
|
expected_image_arch,
|
||||||
|
)
|
||||||
|
|
||||||
# We're missing the image we need. Stop and clean up what we have then pull the right one
|
# We're missing the image we need. Stop and clean up what we have then pull the right one
|
||||||
with suppress(DockerError):
|
with suppress(DockerError):
|
||||||
await self.remove()
|
await self.remove()
|
||||||
await self.install(version, expected_image, arch=expected_image_arch)
|
await self.install(version, expected_image, arch=expected_image_cpu_arch)
|
||||||
|
|
||||||
@Job(
|
@Job(
|
||||||
name="docker_interface_update",
|
name="docker_interface_update",
|
||||||
|
@ -22,6 +22,7 @@ from docker.types.daemon import CancellableStream
|
|||||||
import requests
|
import requests
|
||||||
|
|
||||||
from ..const import (
|
from ..const import (
|
||||||
|
ATTR_ENABLE_IPV6,
|
||||||
ATTR_REGISTRIES,
|
ATTR_REGISTRIES,
|
||||||
DNS_SUFFIX,
|
DNS_SUFFIX,
|
||||||
DOCKER_NETWORK,
|
DOCKER_NETWORK,
|
||||||
@ -83,7 +84,7 @@ class DockerInfo:
|
|||||||
"""Return true, if CONFIG_RT_GROUP_SCHED is loaded."""
|
"""Return true, if CONFIG_RT_GROUP_SCHED is loaded."""
|
||||||
if not Path("/sys/fs/cgroup/cpu/cpu.rt_runtime_us").exists():
|
if not Path("/sys/fs/cgroup/cpu/cpu.rt_runtime_us").exists():
|
||||||
return False
|
return False
|
||||||
return bool(os.environ.get(ENV_SUPERVISOR_CPU_RT, 0))
|
return bool(os.environ.get(ENV_SUPERVISOR_CPU_RT) == "1")
|
||||||
|
|
||||||
|
|
||||||
class DockerConfig(FileConfiguration):
|
class DockerConfig(FileConfiguration):
|
||||||
@ -93,6 +94,16 @@ class DockerConfig(FileConfiguration):
|
|||||||
"""Initialize the JSON configuration."""
|
"""Initialize the JSON configuration."""
|
||||||
super().__init__(FILE_HASSIO_DOCKER, SCHEMA_DOCKER_CONFIG)
|
super().__init__(FILE_HASSIO_DOCKER, SCHEMA_DOCKER_CONFIG)
|
||||||
|
|
||||||
|
@property
|
||||||
|
def enable_ipv6(self) -> bool | None:
|
||||||
|
"""Return IPv6 configuration for docker network."""
|
||||||
|
return self._data.get(ATTR_ENABLE_IPV6, None)
|
||||||
|
|
||||||
|
@enable_ipv6.setter
|
||||||
|
def enable_ipv6(self, value: bool | None) -> None:
|
||||||
|
"""Set IPv6 configuration for docker network."""
|
||||||
|
self._data[ATTR_ENABLE_IPV6] = value
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def registries(self) -> dict[str, Any]:
|
def registries(self) -> dict[str, Any]:
|
||||||
"""Return credentials for docker registries."""
|
"""Return credentials for docker registries."""
|
||||||
@ -124,9 +135,11 @@ class DockerAPI:
|
|||||||
timeout=900,
|
timeout=900,
|
||||||
),
|
),
|
||||||
)
|
)
|
||||||
self._network = DockerNetwork(self._docker)
|
|
||||||
self._info = DockerInfo.new(self.docker.info())
|
self._info = DockerInfo.new(self.docker.info())
|
||||||
await self.config.read_data()
|
await self.config.read_data()
|
||||||
|
self._network = await DockerNetwork(self.docker).post_init(
|
||||||
|
self.config.enable_ipv6
|
||||||
|
)
|
||||||
return self
|
return self
|
||||||
|
|
||||||
@property
|
@property
|
||||||
@ -202,7 +215,7 @@ class DockerAPI:
|
|||||||
if "labels" not in kwargs:
|
if "labels" not in kwargs:
|
||||||
kwargs["labels"] = {}
|
kwargs["labels"] = {}
|
||||||
elif isinstance(kwargs["labels"], list):
|
elif isinstance(kwargs["labels"], list):
|
||||||
kwargs["labels"] = {label: "" for label in kwargs["labels"]}
|
kwargs["labels"] = dict.fromkeys(kwargs["labels"], "")
|
||||||
|
|
||||||
kwargs["labels"][LABEL_MANAGED] = ""
|
kwargs["labels"][LABEL_MANAGED] = ""
|
||||||
|
|
||||||
@ -281,8 +294,8 @@ class DockerAPI:
|
|||||||
def run_command(
|
def run_command(
|
||||||
self,
|
self,
|
||||||
image: str,
|
image: str,
|
||||||
tag: str = "latest",
|
version: str = "latest",
|
||||||
command: str | None = None,
|
command: str | list[str] | None = None,
|
||||||
**kwargs: Any,
|
**kwargs: Any,
|
||||||
) -> CommandReturn:
|
) -> CommandReturn:
|
||||||
"""Create a temporary container and run command.
|
"""Create a temporary container and run command.
|
||||||
@ -292,12 +305,15 @@ class DockerAPI:
|
|||||||
stdout = kwargs.get("stdout", True)
|
stdout = kwargs.get("stdout", True)
|
||||||
stderr = kwargs.get("stderr", True)
|
stderr = kwargs.get("stderr", True)
|
||||||
|
|
||||||
_LOGGER.info("Runing command '%s' on %s", command, image)
|
image_with_tag = f"{image}:{version}"
|
||||||
|
|
||||||
|
_LOGGER.info("Runing command '%s' on %s", command, image_with_tag)
|
||||||
container = None
|
container = None
|
||||||
try:
|
try:
|
||||||
container = self.docker.containers.run(
|
container = self.docker.containers.run(
|
||||||
f"{image}:{tag}",
|
image_with_tag,
|
||||||
command=command,
|
command=command,
|
||||||
|
detach=True,
|
||||||
network=self.network.name,
|
network=self.network.name,
|
||||||
use_config_proxy=False,
|
use_config_proxy=False,
|
||||||
**kwargs,
|
**kwargs,
|
||||||
@ -314,9 +330,9 @@ class DockerAPI:
|
|||||||
# cleanup container
|
# cleanup container
|
||||||
if container:
|
if container:
|
||||||
with suppress(docker_errors.DockerException, requests.RequestException):
|
with suppress(docker_errors.DockerException, requests.RequestException):
|
||||||
container.remove(force=True)
|
container.remove(force=True, v=True)
|
||||||
|
|
||||||
return CommandReturn(result.get("StatusCode"), output)
|
return CommandReturn(result["StatusCode"], output)
|
||||||
|
|
||||||
def repair(self) -> None:
|
def repair(self) -> None:
|
||||||
"""Repair local docker overlayfs2 issues."""
|
"""Repair local docker overlayfs2 issues."""
|
||||||
@ -429,7 +445,7 @@ class DockerAPI:
|
|||||||
if remove_container:
|
if remove_container:
|
||||||
with suppress(DockerException, requests.RequestException):
|
with suppress(DockerException, requests.RequestException):
|
||||||
_LOGGER.info("Cleaning %s application", name)
|
_LOGGER.info("Cleaning %s application", name)
|
||||||
docker_container.remove(force=True)
|
docker_container.remove(force=True, v=True)
|
||||||
|
|
||||||
def start_container(self, name: str) -> None:
|
def start_container(self, name: str) -> None:
|
||||||
"""Start Docker container."""
|
"""Start Docker container."""
|
||||||
|
@ -1,17 +1,54 @@
|
|||||||
"""Internal network manager for Supervisor."""
|
"""Internal network manager for Supervisor."""
|
||||||
|
|
||||||
|
import asyncio
|
||||||
from contextlib import suppress
|
from contextlib import suppress
|
||||||
from ipaddress import IPv4Address
|
from ipaddress import IPv4Address
|
||||||
import logging
|
import logging
|
||||||
|
from typing import Self
|
||||||
|
|
||||||
import docker
|
import docker
|
||||||
import requests
|
import requests
|
||||||
|
|
||||||
from ..const import DOCKER_NETWORK, DOCKER_NETWORK_MASK, DOCKER_NETWORK_RANGE
|
from ..const import (
|
||||||
|
ATTR_AUDIO,
|
||||||
|
ATTR_CLI,
|
||||||
|
ATTR_DNS,
|
||||||
|
ATTR_ENABLE_IPV6,
|
||||||
|
ATTR_OBSERVER,
|
||||||
|
ATTR_SUPERVISOR,
|
||||||
|
DOCKER_IPV4_NETWORK_MASK,
|
||||||
|
DOCKER_IPV4_NETWORK_RANGE,
|
||||||
|
DOCKER_IPV6_NETWORK_MASK,
|
||||||
|
DOCKER_NETWORK,
|
||||||
|
DOCKER_NETWORK_DRIVER,
|
||||||
|
DOCKER_PREFIX,
|
||||||
|
OBSERVER_DOCKER_NAME,
|
||||||
|
SUPERVISOR_DOCKER_NAME,
|
||||||
|
)
|
||||||
from ..exceptions import DockerError
|
from ..exceptions import DockerError
|
||||||
|
|
||||||
_LOGGER: logging.Logger = logging.getLogger(__name__)
|
_LOGGER: logging.Logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
DOCKER_ENABLEIPV6 = "EnableIPv6"
|
||||||
|
DOCKER_NETWORK_PARAMS = {
|
||||||
|
"name": DOCKER_NETWORK,
|
||||||
|
"driver": DOCKER_NETWORK_DRIVER,
|
||||||
|
"ipam": docker.types.IPAMConfig(
|
||||||
|
pool_configs=[
|
||||||
|
docker.types.IPAMPool(subnet=str(DOCKER_IPV6_NETWORK_MASK)),
|
||||||
|
docker.types.IPAMPool(
|
||||||
|
subnet=str(DOCKER_IPV4_NETWORK_MASK),
|
||||||
|
gateway=str(DOCKER_IPV4_NETWORK_MASK[1]),
|
||||||
|
iprange=str(DOCKER_IPV4_NETWORK_RANGE),
|
||||||
|
),
|
||||||
|
]
|
||||||
|
),
|
||||||
|
ATTR_ENABLE_IPV6: True,
|
||||||
|
"options": {"com.docker.network.bridge.name": DOCKER_NETWORK},
|
||||||
|
}
|
||||||
|
|
||||||
|
DOCKER_ENABLE_IPV6_DEFAULT = True
|
||||||
|
|
||||||
|
|
||||||
class DockerNetwork:
|
class DockerNetwork:
|
||||||
"""Internal Supervisor Network.
|
"""Internal Supervisor Network.
|
||||||
@ -22,7 +59,14 @@ class DockerNetwork:
|
|||||||
def __init__(self, docker_client: docker.DockerClient):
|
def __init__(self, docker_client: docker.DockerClient):
|
||||||
"""Initialize internal Supervisor network."""
|
"""Initialize internal Supervisor network."""
|
||||||
self.docker: docker.DockerClient = docker_client
|
self.docker: docker.DockerClient = docker_client
|
||||||
self._network: docker.models.networks.Network = self._get_network()
|
self._network: docker.models.networks.Network
|
||||||
|
|
||||||
|
async def post_init(self, enable_ipv6: bool | None = None) -> Self:
|
||||||
|
"""Post init actions that must be done in event loop."""
|
||||||
|
self._network = await asyncio.get_running_loop().run_in_executor(
|
||||||
|
None, self._get_network, enable_ipv6
|
||||||
|
)
|
||||||
|
return self
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def name(self) -> str:
|
def name(self) -> str:
|
||||||
@ -42,55 +86,112 @@ class DockerNetwork:
|
|||||||
@property
|
@property
|
||||||
def gateway(self) -> IPv4Address:
|
def gateway(self) -> IPv4Address:
|
||||||
"""Return gateway of the network."""
|
"""Return gateway of the network."""
|
||||||
return DOCKER_NETWORK_MASK[1]
|
return DOCKER_IPV4_NETWORK_MASK[1]
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def supervisor(self) -> IPv4Address:
|
def supervisor(self) -> IPv4Address:
|
||||||
"""Return supervisor of the network."""
|
"""Return supervisor of the network."""
|
||||||
return DOCKER_NETWORK_MASK[2]
|
return DOCKER_IPV4_NETWORK_MASK[2]
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def dns(self) -> IPv4Address:
|
def dns(self) -> IPv4Address:
|
||||||
"""Return dns of the network."""
|
"""Return dns of the network."""
|
||||||
return DOCKER_NETWORK_MASK[3]
|
return DOCKER_IPV4_NETWORK_MASK[3]
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def audio(self) -> IPv4Address:
|
def audio(self) -> IPv4Address:
|
||||||
"""Return audio of the network."""
|
"""Return audio of the network."""
|
||||||
return DOCKER_NETWORK_MASK[4]
|
return DOCKER_IPV4_NETWORK_MASK[4]
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def cli(self) -> IPv4Address:
|
def cli(self) -> IPv4Address:
|
||||||
"""Return cli of the network."""
|
"""Return cli of the network."""
|
||||||
return DOCKER_NETWORK_MASK[5]
|
return DOCKER_IPV4_NETWORK_MASK[5]
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def observer(self) -> IPv4Address:
|
def observer(self) -> IPv4Address:
|
||||||
"""Return observer of the network."""
|
"""Return observer of the network."""
|
||||||
return DOCKER_NETWORK_MASK[6]
|
return DOCKER_IPV4_NETWORK_MASK[6]
|
||||||
|
|
||||||
def _get_network(self) -> docker.models.networks.Network:
|
def _get_network(
|
||||||
|
self, enable_ipv6: bool | None = None
|
||||||
|
) -> docker.models.networks.Network:
|
||||||
"""Get supervisor network."""
|
"""Get supervisor network."""
|
||||||
try:
|
try:
|
||||||
return self.docker.networks.get(DOCKER_NETWORK)
|
if network := self.docker.networks.get(DOCKER_NETWORK):
|
||||||
|
current_ipv6 = network.attrs.get(DOCKER_ENABLEIPV6, False)
|
||||||
|
# If the network exists and we don't have an explicit setting,
|
||||||
|
# simply stick with what we have.
|
||||||
|
if enable_ipv6 is None or current_ipv6 == enable_ipv6:
|
||||||
|
return network
|
||||||
|
|
||||||
|
# We have an explicit setting which differs from the current state.
|
||||||
|
_LOGGER.info(
|
||||||
|
"Migrating Supervisor network to %s",
|
||||||
|
"IPv4/IPv6 Dual-Stack" if enable_ipv6 else "IPv4-Only",
|
||||||
|
)
|
||||||
|
|
||||||
|
if (containers := network.containers) and (
|
||||||
|
containers_all := all(
|
||||||
|
container.name in (OBSERVER_DOCKER_NAME, SUPERVISOR_DOCKER_NAME)
|
||||||
|
for container in containers
|
||||||
|
)
|
||||||
|
):
|
||||||
|
for container in containers:
|
||||||
|
with suppress(
|
||||||
|
docker.errors.APIError,
|
||||||
|
docker.errors.DockerException,
|
||||||
|
requests.RequestException,
|
||||||
|
):
|
||||||
|
network.disconnect(container, force=True)
|
||||||
|
|
||||||
|
if not containers or containers_all:
|
||||||
|
try:
|
||||||
|
network.remove()
|
||||||
|
except docker.errors.APIError:
|
||||||
|
_LOGGER.warning("Failed to remove existing Supervisor network")
|
||||||
|
return network
|
||||||
|
else:
|
||||||
|
_LOGGER.warning(
|
||||||
|
"System appears to be running, "
|
||||||
|
"not applying Supervisor network change. "
|
||||||
|
"Reboot your system to apply the change."
|
||||||
|
)
|
||||||
|
return network
|
||||||
except docker.errors.NotFound:
|
except docker.errors.NotFound:
|
||||||
_LOGGER.info("Can't find Supervisor network, creating a new network")
|
_LOGGER.info("Can't find Supervisor network, creating a new network")
|
||||||
|
|
||||||
ipam_pool = docker.types.IPAMPool(
|
network_params = DOCKER_NETWORK_PARAMS.copy()
|
||||||
subnet=str(DOCKER_NETWORK_MASK),
|
network_params[ATTR_ENABLE_IPV6] = (
|
||||||
gateway=str(self.gateway),
|
DOCKER_ENABLE_IPV6_DEFAULT if enable_ipv6 is None else enable_ipv6
|
||||||
iprange=str(DOCKER_NETWORK_RANGE),
|
|
||||||
)
|
)
|
||||||
|
|
||||||
ipam_config = docker.types.IPAMConfig(pool_configs=[ipam_pool])
|
try:
|
||||||
|
self._network = self.docker.networks.create(**network_params) # type: ignore
|
||||||
|
except docker.errors.APIError as err:
|
||||||
|
raise DockerError(
|
||||||
|
f"Can't create Supervisor network: {err}", _LOGGER.error
|
||||||
|
) from err
|
||||||
|
|
||||||
return self.docker.networks.create(
|
with suppress(DockerError):
|
||||||
DOCKER_NETWORK,
|
self.attach_container_by_name(
|
||||||
driver="bridge",
|
SUPERVISOR_DOCKER_NAME, [ATTR_SUPERVISOR], self.supervisor
|
||||||
ipam=ipam_config,
|
)
|
||||||
enable_ipv6=False,
|
|
||||||
options={"com.docker.network.bridge.name": DOCKER_NETWORK},
|
with suppress(DockerError):
|
||||||
)
|
self.attach_container_by_name(
|
||||||
|
OBSERVER_DOCKER_NAME, [ATTR_OBSERVER], self.observer
|
||||||
|
)
|
||||||
|
|
||||||
|
for name, ip in (
|
||||||
|
(ATTR_CLI, self.cli),
|
||||||
|
(ATTR_DNS, self.dns),
|
||||||
|
(ATTR_AUDIO, self.audio),
|
||||||
|
):
|
||||||
|
with suppress(DockerError):
|
||||||
|
self.attach_container_by_name(f"{DOCKER_PREFIX}_{name}", [name], ip)
|
||||||
|
|
||||||
|
return self._network
|
||||||
|
|
||||||
def attach_container(
|
def attach_container(
|
||||||
self,
|
self,
|
||||||
@ -102,8 +203,6 @@ class DockerNetwork:
|
|||||||
|
|
||||||
Need run inside executor.
|
Need run inside executor.
|
||||||
"""
|
"""
|
||||||
ipv4_address = str(ipv4) if ipv4 else None
|
|
||||||
|
|
||||||
# Reload Network information
|
# Reload Network information
|
||||||
with suppress(docker.errors.DockerException, requests.RequestException):
|
with suppress(docker.errors.DockerException, requests.RequestException):
|
||||||
self.network.reload()
|
self.network.reload()
|
||||||
@ -116,12 +215,43 @@ class DockerNetwork:
|
|||||||
|
|
||||||
# Attach Network
|
# Attach Network
|
||||||
try:
|
try:
|
||||||
self.network.connect(container, aliases=alias, ipv4_address=ipv4_address)
|
self.network.connect(
|
||||||
except docker.errors.APIError as err:
|
container, aliases=alias, ipv4_address=str(ipv4) if ipv4 else None
|
||||||
|
)
|
||||||
|
except (
|
||||||
|
docker.errors.NotFound,
|
||||||
|
docker.errors.APIError,
|
||||||
|
docker.errors.DockerException,
|
||||||
|
requests.RequestException,
|
||||||
|
) as err:
|
||||||
raise DockerError(
|
raise DockerError(
|
||||||
f"Can't link container to hassio-net: {err}", _LOGGER.error
|
f"Can't connect {container.name} to Supervisor network: {err}",
|
||||||
|
_LOGGER.error,
|
||||||
) from err
|
) from err
|
||||||
|
|
||||||
|
def attach_container_by_name(
|
||||||
|
self,
|
||||||
|
name: str,
|
||||||
|
alias: list[str] | None = None,
|
||||||
|
ipv4: IPv4Address | None = None,
|
||||||
|
) -> None:
|
||||||
|
"""Attach container to Supervisor network.
|
||||||
|
|
||||||
|
Need run inside executor.
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
container = self.docker.containers.get(name)
|
||||||
|
except (
|
||||||
|
docker.errors.NotFound,
|
||||||
|
docker.errors.APIError,
|
||||||
|
docker.errors.DockerException,
|
||||||
|
requests.RequestException,
|
||||||
|
) as err:
|
||||||
|
raise DockerError(f"Can't find {name}: {err}", _LOGGER.error) from err
|
||||||
|
|
||||||
|
if container.id not in self.containers:
|
||||||
|
self.attach_container(container, alias, ipv4)
|
||||||
|
|
||||||
def detach_default_bridge(
|
def detach_default_bridge(
|
||||||
self, container: docker.models.containers.Container
|
self, container: docker.models.containers.Container
|
||||||
) -> None:
|
) -> None:
|
||||||
@ -130,25 +260,33 @@ class DockerNetwork:
|
|||||||
Need run inside executor.
|
Need run inside executor.
|
||||||
"""
|
"""
|
||||||
try:
|
try:
|
||||||
default_network = self.docker.networks.get("bridge")
|
default_network = self.docker.networks.get(DOCKER_NETWORK_DRIVER)
|
||||||
default_network.disconnect(container)
|
default_network.disconnect(container)
|
||||||
|
|
||||||
except docker.errors.NotFound:
|
except docker.errors.NotFound:
|
||||||
return
|
pass
|
||||||
|
except (
|
||||||
except docker.errors.APIError as err:
|
docker.errors.APIError,
|
||||||
|
docker.errors.DockerException,
|
||||||
|
requests.RequestException,
|
||||||
|
) as err:
|
||||||
raise DockerError(
|
raise DockerError(
|
||||||
f"Can't disconnect container from default: {err}", _LOGGER.warning
|
f"Can't disconnect {container.name} from default network: {err}",
|
||||||
|
_LOGGER.warning,
|
||||||
) from err
|
) from err
|
||||||
|
|
||||||
def stale_cleanup(self, container_name: str):
|
def stale_cleanup(self, name: str) -> None:
|
||||||
"""Remove force a container from Network.
|
"""Force remove a container from Network.
|
||||||
|
|
||||||
Fix: https://github.com/moby/moby/issues/23302
|
Fix: https://github.com/moby/moby/issues/23302
|
||||||
"""
|
"""
|
||||||
try:
|
try:
|
||||||
self.network.disconnect(container_name, force=True)
|
self.network.disconnect(name, force=True)
|
||||||
except docker.errors.NotFound:
|
except (
|
||||||
pass
|
docker.errors.APIError,
|
||||||
except (docker.errors.DockerException, requests.RequestException) as err:
|
docker.errors.DockerException,
|
||||||
raise DockerError() from err
|
requests.RequestException,
|
||||||
|
) as err:
|
||||||
|
raise DockerError(
|
||||||
|
f"Can't disconnect {name} from Supervisor network: {err}",
|
||||||
|
_LOGGER.warning,
|
||||||
|
) from err
|
||||||
|
@ -2,7 +2,7 @@
|
|||||||
|
|
||||||
import logging
|
import logging
|
||||||
|
|
||||||
from ..const import DOCKER_NETWORK_MASK
|
from ..const import DOCKER_IPV4_NETWORK_MASK, OBSERVER_DOCKER_NAME
|
||||||
from ..coresys import CoreSysAttributes
|
from ..coresys import CoreSysAttributes
|
||||||
from ..exceptions import DockerJobError
|
from ..exceptions import DockerJobError
|
||||||
from ..jobs.const import JobExecutionLimit
|
from ..jobs.const import JobExecutionLimit
|
||||||
@ -12,7 +12,6 @@ from .interface import DockerInterface
|
|||||||
|
|
||||||
_LOGGER: logging.Logger = logging.getLogger(__name__)
|
_LOGGER: logging.Logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
OBSERVER_DOCKER_NAME: str = "hassio_observer"
|
|
||||||
ENV_NETWORK_MASK: str = "NETWORK_MASK"
|
ENV_NETWORK_MASK: str = "NETWORK_MASK"
|
||||||
|
|
||||||
|
|
||||||
@ -49,7 +48,7 @@ class DockerObserver(DockerInterface, CoreSysAttributes):
|
|||||||
environment={
|
environment={
|
||||||
ENV_TIME: self.sys_timezone,
|
ENV_TIME: self.sys_timezone,
|
||||||
ENV_TOKEN: self.sys_plugins.observer.supervisor_token,
|
ENV_TOKEN: self.sys_plugins.observer.supervisor_token,
|
||||||
ENV_NETWORK_MASK: DOCKER_NETWORK_MASK,
|
ENV_NETWORK_MASK: DOCKER_IPV4_NETWORK_MASK,
|
||||||
},
|
},
|
||||||
mounts=[MOUNT_DOCKER],
|
mounts=[MOUNT_DOCKER],
|
||||||
ports={"80/tcp": 4357},
|
ports={"80/tcp": 4357},
|
||||||
|
@ -32,6 +32,7 @@ class WSType(StrEnum):
|
|||||||
SUPERVISOR_EVENT = "supervisor/event"
|
SUPERVISOR_EVENT = "supervisor/event"
|
||||||
BACKUP_START = "backup/start"
|
BACKUP_START = "backup/start"
|
||||||
BACKUP_END = "backup/end"
|
BACKUP_END = "backup/end"
|
||||||
|
HASSIO_UPDATE_ADDON = "hassio/update/addon"
|
||||||
|
|
||||||
|
|
||||||
class WSEvent(StrEnum):
|
class WSEvent(StrEnum):
|
||||||
|
@ -87,19 +87,19 @@ class HomeAssistantCore(JobGroup):
|
|||||||
|
|
||||||
try:
|
try:
|
||||||
# Evaluate Version if we lost this information
|
# Evaluate Version if we lost this information
|
||||||
if not self.sys_homeassistant.version:
|
if self.sys_homeassistant.version:
|
||||||
|
version = self.sys_homeassistant.version
|
||||||
|
else:
|
||||||
self.sys_homeassistant.version = (
|
self.sys_homeassistant.version = (
|
||||||
await self.instance.get_latest_version()
|
version
|
||||||
)
|
) = await self.instance.get_latest_version()
|
||||||
|
|
||||||
await self.instance.attach(
|
await self.instance.attach(version=version, skip_state_event_if_down=True)
|
||||||
version=self.sys_homeassistant.version, skip_state_event_if_down=True
|
|
||||||
)
|
|
||||||
|
|
||||||
# Ensure we are using correct image for this system (unless user has overridden it)
|
# Ensure we are using correct image for this system (unless user has overridden it)
|
||||||
if not self.sys_homeassistant.override_image:
|
if not self.sys_homeassistant.override_image:
|
||||||
await self.instance.check_image(
|
await self.instance.check_image(
|
||||||
self.sys_homeassistant.version, self.sys_homeassistant.default_image
|
version, self.sys_homeassistant.default_image
|
||||||
)
|
)
|
||||||
self.sys_homeassistant.set_image(self.sys_homeassistant.default_image)
|
self.sys_homeassistant.set_image(self.sys_homeassistant.default_image)
|
||||||
except DockerError:
|
except DockerError:
|
||||||
@ -108,7 +108,7 @@ class HomeAssistantCore(JobGroup):
|
|||||||
)
|
)
|
||||||
await self.install_landingpage()
|
await self.install_landingpage()
|
||||||
else:
|
else:
|
||||||
self.sys_homeassistant.version = self.instance.version
|
self.sys_homeassistant.version = self.instance.version or version
|
||||||
self.sys_homeassistant.set_image(self.instance.image)
|
self.sys_homeassistant.set_image(self.instance.image)
|
||||||
await self.sys_homeassistant.save_data()
|
await self.sys_homeassistant.save_data()
|
||||||
|
|
||||||
@ -182,12 +182,13 @@ class HomeAssistantCore(JobGroup):
|
|||||||
if not self.sys_homeassistant.latest_version:
|
if not self.sys_homeassistant.latest_version:
|
||||||
await self.sys_updater.reload()
|
await self.sys_updater.reload()
|
||||||
|
|
||||||
if self.sys_homeassistant.latest_version:
|
if to_version := self.sys_homeassistant.latest_version:
|
||||||
try:
|
try:
|
||||||
await self.instance.update(
|
await self.instance.update(
|
||||||
self.sys_homeassistant.latest_version,
|
to_version,
|
||||||
image=self.sys_updater.image_homeassistant,
|
image=self.sys_updater.image_homeassistant,
|
||||||
)
|
)
|
||||||
|
self.sys_homeassistant.version = self.instance.version or to_version
|
||||||
break
|
break
|
||||||
except (DockerError, JobException):
|
except (DockerError, JobException):
|
||||||
pass
|
pass
|
||||||
@ -198,7 +199,6 @@ class HomeAssistantCore(JobGroup):
|
|||||||
await asyncio.sleep(30)
|
await asyncio.sleep(30)
|
||||||
|
|
||||||
_LOGGER.info("Home Assistant docker now installed")
|
_LOGGER.info("Home Assistant docker now installed")
|
||||||
self.sys_homeassistant.version = self.instance.version
|
|
||||||
self.sys_homeassistant.set_image(self.sys_updater.image_homeassistant)
|
self.sys_homeassistant.set_image(self.sys_updater.image_homeassistant)
|
||||||
await self.sys_homeassistant.save_data()
|
await self.sys_homeassistant.save_data()
|
||||||
|
|
||||||
@ -231,8 +231,8 @@ class HomeAssistantCore(JobGroup):
|
|||||||
backup: bool | None = False,
|
backup: bool | None = False,
|
||||||
) -> None:
|
) -> None:
|
||||||
"""Update HomeAssistant version."""
|
"""Update HomeAssistant version."""
|
||||||
version = version or self.sys_homeassistant.latest_version
|
to_version = version or self.sys_homeassistant.latest_version
|
||||||
if not version:
|
if not to_version:
|
||||||
raise HomeAssistantUpdateError(
|
raise HomeAssistantUpdateError(
|
||||||
"Cannot determine latest version of Home Assistant for update",
|
"Cannot determine latest version of Home Assistant for update",
|
||||||
_LOGGER.error,
|
_LOGGER.error,
|
||||||
@ -243,9 +243,9 @@ class HomeAssistantCore(JobGroup):
|
|||||||
running = await self.instance.is_running()
|
running = await self.instance.is_running()
|
||||||
exists = await self.instance.exists()
|
exists = await self.instance.exists()
|
||||||
|
|
||||||
if exists and version == self.instance.version:
|
if exists and to_version == self.instance.version:
|
||||||
raise HomeAssistantUpdateError(
|
raise HomeAssistantUpdateError(
|
||||||
f"Version {version!s} is already installed", _LOGGER.warning
|
f"Version {to_version!s} is already installed", _LOGGER.warning
|
||||||
)
|
)
|
||||||
|
|
||||||
if backup:
|
if backup:
|
||||||
@ -268,7 +268,7 @@ class HomeAssistantCore(JobGroup):
|
|||||||
"Updating Home Assistant image failed", _LOGGER.warning
|
"Updating Home Assistant image failed", _LOGGER.warning
|
||||||
) from err
|
) from err
|
||||||
|
|
||||||
self.sys_homeassistant.version = self.instance.version
|
self.sys_homeassistant.version = self.instance.version or to_version
|
||||||
self.sys_homeassistant.set_image(self.sys_updater.image_homeassistant)
|
self.sys_homeassistant.set_image(self.sys_updater.image_homeassistant)
|
||||||
|
|
||||||
if running:
|
if running:
|
||||||
@ -282,7 +282,7 @@ class HomeAssistantCore(JobGroup):
|
|||||||
|
|
||||||
# Update Home Assistant
|
# Update Home Assistant
|
||||||
with suppress(HomeAssistantError):
|
with suppress(HomeAssistantError):
|
||||||
await _update(version)
|
await _update(to_version)
|
||||||
|
|
||||||
if not self.error_state and rollback:
|
if not self.error_state and rollback:
|
||||||
try:
|
try:
|
||||||
|
@ -35,6 +35,7 @@ from ..const import (
|
|||||||
FILE_HASSIO_HOMEASSISTANT,
|
FILE_HASSIO_HOMEASSISTANT,
|
||||||
BusEvent,
|
BusEvent,
|
||||||
IngressSessionDataUser,
|
IngressSessionDataUser,
|
||||||
|
IngressSessionDataUserDict,
|
||||||
)
|
)
|
||||||
from ..coresys import CoreSys, CoreSysAttributes
|
from ..coresys import CoreSys, CoreSysAttributes
|
||||||
from ..exceptions import (
|
from ..exceptions import (
|
||||||
@ -557,18 +558,11 @@ class HomeAssistant(FileConfiguration, CoreSysAttributes):
|
|||||||
async def get_users(self) -> list[IngressSessionDataUser]:
|
async def get_users(self) -> list[IngressSessionDataUser]:
|
||||||
"""Get list of all configured users."""
|
"""Get list of all configured users."""
|
||||||
list_of_users: (
|
list_of_users: (
|
||||||
list[dict[str, Any]] | None
|
list[IngressSessionDataUserDict] | None
|
||||||
) = await self.sys_homeassistant.websocket.async_send_command(
|
) = await self.sys_homeassistant.websocket.async_send_command(
|
||||||
{ATTR_TYPE: "config/auth/list"}
|
{ATTR_TYPE: "config/auth/list"}
|
||||||
)
|
)
|
||||||
|
|
||||||
if list_of_users:
|
if list_of_users:
|
||||||
return [
|
return [IngressSessionDataUser.from_dict(data) for data in list_of_users]
|
||||||
IngressSessionDataUser(
|
|
||||||
id=data["id"],
|
|
||||||
username=data.get("username"),
|
|
||||||
display_name=data.get("name"),
|
|
||||||
)
|
|
||||||
for data in list_of_users
|
|
||||||
]
|
|
||||||
return []
|
return []
|
||||||
|
@ -2,17 +2,29 @@
|
|||||||
|
|
||||||
from dataclasses import dataclass
|
from dataclasses import dataclass
|
||||||
from ipaddress import IPv4Address, IPv4Interface, IPv6Address, IPv6Interface
|
from ipaddress import IPv4Address, IPv4Interface, IPv6Address, IPv6Interface
|
||||||
|
import logging
|
||||||
import socket
|
import socket
|
||||||
|
|
||||||
from ..dbus.const import (
|
from ..dbus.const import (
|
||||||
ConnectionStateFlags,
|
ConnectionStateFlags,
|
||||||
ConnectionStateType,
|
ConnectionStateType,
|
||||||
DeviceType,
|
DeviceType,
|
||||||
|
InterfaceAddrGenMode as NMInterfaceAddrGenMode,
|
||||||
|
InterfaceIp6Privacy as NMInterfaceIp6Privacy,
|
||||||
InterfaceMethod as NMInterfaceMethod,
|
InterfaceMethod as NMInterfaceMethod,
|
||||||
)
|
)
|
||||||
from ..dbus.network.connection import NetworkConnection
|
from ..dbus.network.connection import NetworkConnection
|
||||||
from ..dbus.network.interface import NetworkInterface
|
from ..dbus.network.interface import NetworkInterface
|
||||||
from .const import AuthMethod, InterfaceMethod, InterfaceType, WifiMode
|
from .const import (
|
||||||
|
AuthMethod,
|
||||||
|
InterfaceAddrGenMode,
|
||||||
|
InterfaceIp6Privacy,
|
||||||
|
InterfaceMethod,
|
||||||
|
InterfaceType,
|
||||||
|
WifiMode,
|
||||||
|
)
|
||||||
|
|
||||||
|
_LOGGER: logging.Logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
@dataclass(slots=True)
|
@dataclass(slots=True)
|
||||||
@ -46,6 +58,14 @@ class IpSetting:
|
|||||||
nameservers: list[IPv4Address | IPv6Address]
|
nameservers: list[IPv4Address | IPv6Address]
|
||||||
|
|
||||||
|
|
||||||
|
@dataclass(slots=True)
|
||||||
|
class Ip6Setting(IpSetting):
|
||||||
|
"""Represent a user IPv6 setting."""
|
||||||
|
|
||||||
|
addr_gen_mode: InterfaceAddrGenMode = InterfaceAddrGenMode.DEFAULT
|
||||||
|
ip6_privacy: InterfaceIp6Privacy = InterfaceIp6Privacy.DEFAULT
|
||||||
|
|
||||||
|
|
||||||
@dataclass(slots=True)
|
@dataclass(slots=True)
|
||||||
class WifiConfig:
|
class WifiConfig:
|
||||||
"""Represent a wifi configuration."""
|
"""Represent a wifi configuration."""
|
||||||
@ -62,7 +82,7 @@ class VlanConfig:
|
|||||||
"""Represent a vlan configuration."""
|
"""Represent a vlan configuration."""
|
||||||
|
|
||||||
id: int
|
id: int
|
||||||
interface: str
|
interface: str | None
|
||||||
|
|
||||||
|
|
||||||
@dataclass(slots=True)
|
@dataclass(slots=True)
|
||||||
@ -79,7 +99,7 @@ class Interface:
|
|||||||
ipv4: IpConfig | None
|
ipv4: IpConfig | None
|
||||||
ipv4setting: IpSetting | None
|
ipv4setting: IpSetting | None
|
||||||
ipv6: IpConfig | None
|
ipv6: IpConfig | None
|
||||||
ipv6setting: IpSetting | None
|
ipv6setting: Ip6Setting | None
|
||||||
wifi: WifiConfig | None
|
wifi: WifiConfig | None
|
||||||
vlan: VlanConfig | None
|
vlan: VlanConfig | None
|
||||||
|
|
||||||
@ -91,7 +111,10 @@ class Interface:
|
|||||||
if inet.settings.match and inet.settings.match.path:
|
if inet.settings.match and inet.settings.match.path:
|
||||||
return inet.settings.match.path == [self.path]
|
return inet.settings.match.path == [self.path]
|
||||||
|
|
||||||
return inet.settings.connection.interface_name == self.name
|
return (
|
||||||
|
inet.settings.connection is not None
|
||||||
|
and inet.settings.connection.interface_name == self.name
|
||||||
|
)
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def from_dbus_interface(inet: NetworkInterface) -> "Interface":
|
def from_dbus_interface(inet: NetworkInterface) -> "Interface":
|
||||||
@ -118,8 +141,14 @@ class Interface:
|
|||||||
ipv4_setting = IpSetting(InterfaceMethod.DISABLED, [], None, [])
|
ipv4_setting = IpSetting(InterfaceMethod.DISABLED, [], None, [])
|
||||||
|
|
||||||
if inet.settings and inet.settings.ipv6:
|
if inet.settings and inet.settings.ipv6:
|
||||||
ipv6_setting = IpSetting(
|
ipv6_setting = Ip6Setting(
|
||||||
method=Interface._map_nm_method(inet.settings.ipv6.method),
|
method=Interface._map_nm_method(inet.settings.ipv6.method),
|
||||||
|
addr_gen_mode=Interface._map_nm_addr_gen_mode(
|
||||||
|
inet.settings.ipv6.addr_gen_mode
|
||||||
|
),
|
||||||
|
ip6_privacy=Interface._map_nm_ip6_privacy(
|
||||||
|
inet.settings.ipv6.ip6_privacy
|
||||||
|
),
|
||||||
address=[
|
address=[
|
||||||
IPv6Interface(f"{ip.address}/{ip.prefix}")
|
IPv6Interface(f"{ip.address}/{ip.prefix}")
|
||||||
for ip in inet.settings.ipv6.address_data
|
for ip in inet.settings.ipv6.address_data
|
||||||
@ -134,26 +163,26 @@ class Interface:
|
|||||||
else [],
|
else [],
|
||||||
)
|
)
|
||||||
else:
|
else:
|
||||||
ipv6_setting = IpSetting(InterfaceMethod.DISABLED, [], None, [])
|
ipv6_setting = Ip6Setting(InterfaceMethod.DISABLED, [], None, [])
|
||||||
|
|
||||||
ipv4_ready = (
|
ipv4_ready = (
|
||||||
bool(inet.connection)
|
inet.connection is not None
|
||||||
and ConnectionStateFlags.IP4_READY in inet.connection.state_flags
|
and ConnectionStateFlags.IP4_READY in inet.connection.state_flags
|
||||||
)
|
)
|
||||||
ipv6_ready = (
|
ipv6_ready = (
|
||||||
bool(inet.connection)
|
inet.connection is not None
|
||||||
and ConnectionStateFlags.IP6_READY in inet.connection.state_flags
|
and ConnectionStateFlags.IP6_READY in inet.connection.state_flags
|
||||||
)
|
)
|
||||||
|
|
||||||
return Interface(
|
return Interface(
|
||||||
inet.name,
|
name=inet.interface_name,
|
||||||
inet.hw_address,
|
mac=inet.hw_address,
|
||||||
inet.path,
|
path=inet.path,
|
||||||
inet.settings is not None,
|
enabled=inet.settings is not None,
|
||||||
Interface._map_nm_connected(inet.connection),
|
connected=Interface._map_nm_connected(inet.connection),
|
||||||
inet.primary,
|
primary=inet.primary,
|
||||||
Interface._map_nm_type(inet.type),
|
type=Interface._map_nm_type(inet.type),
|
||||||
IpConfig(
|
ipv4=IpConfig(
|
||||||
address=inet.connection.ipv4.address
|
address=inet.connection.ipv4.address
|
||||||
if inet.connection.ipv4.address
|
if inet.connection.ipv4.address
|
||||||
else [],
|
else [],
|
||||||
@ -165,8 +194,8 @@ class Interface:
|
|||||||
)
|
)
|
||||||
if inet.connection and inet.connection.ipv4
|
if inet.connection and inet.connection.ipv4
|
||||||
else IpConfig([], None, [], ipv4_ready),
|
else IpConfig([], None, [], ipv4_ready),
|
||||||
ipv4_setting,
|
ipv4setting=ipv4_setting,
|
||||||
IpConfig(
|
ipv6=IpConfig(
|
||||||
address=inet.connection.ipv6.address
|
address=inet.connection.ipv6.address
|
||||||
if inet.connection.ipv6.address
|
if inet.connection.ipv6.address
|
||||||
else [],
|
else [],
|
||||||
@ -178,22 +207,42 @@ class Interface:
|
|||||||
)
|
)
|
||||||
if inet.connection and inet.connection.ipv6
|
if inet.connection and inet.connection.ipv6
|
||||||
else IpConfig([], None, [], ipv6_ready),
|
else IpConfig([], None, [], ipv6_ready),
|
||||||
ipv6_setting,
|
ipv6setting=ipv6_setting,
|
||||||
Interface._map_nm_wifi(inet),
|
wifi=Interface._map_nm_wifi(inet),
|
||||||
Interface._map_nm_vlan(inet),
|
vlan=Interface._map_nm_vlan(inet),
|
||||||
)
|
)
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def _map_nm_method(method: str) -> InterfaceMethod:
|
def _map_nm_method(method: str | None) -> InterfaceMethod:
|
||||||
"""Map IP interface method."""
|
"""Map IP interface method."""
|
||||||
|
match method:
|
||||||
|
case NMInterfaceMethod.AUTO.value:
|
||||||
|
return InterfaceMethod.AUTO
|
||||||
|
case NMInterfaceMethod.MANUAL:
|
||||||
|
return InterfaceMethod.STATIC
|
||||||
|
return InterfaceMethod.DISABLED
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def _map_nm_addr_gen_mode(addr_gen_mode: int) -> InterfaceAddrGenMode:
|
||||||
|
"""Map IPv6 interface addr_gen_mode."""
|
||||||
mapping = {
|
mapping = {
|
||||||
NMInterfaceMethod.AUTO: InterfaceMethod.AUTO,
|
NMInterfaceAddrGenMode.EUI64.value: InterfaceAddrGenMode.EUI64,
|
||||||
NMInterfaceMethod.DISABLED: InterfaceMethod.DISABLED,
|
NMInterfaceAddrGenMode.STABLE_PRIVACY.value: InterfaceAddrGenMode.STABLE_PRIVACY,
|
||||||
NMInterfaceMethod.MANUAL: InterfaceMethod.STATIC,
|
NMInterfaceAddrGenMode.DEFAULT_OR_EUI64.value: InterfaceAddrGenMode.DEFAULT_OR_EUI64,
|
||||||
NMInterfaceMethod.LINK_LOCAL: InterfaceMethod.DISABLED,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return mapping.get(method, InterfaceMethod.DISABLED)
|
return mapping.get(addr_gen_mode, InterfaceAddrGenMode.DEFAULT)
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def _map_nm_ip6_privacy(ip6_privacy: int) -> InterfaceIp6Privacy:
|
||||||
|
"""Map IPv6 interface ip6_privacy."""
|
||||||
|
mapping = {
|
||||||
|
NMInterfaceIp6Privacy.DISABLED.value: InterfaceIp6Privacy.DISABLED,
|
||||||
|
NMInterfaceIp6Privacy.ENABLED_PREFER_PUBLIC.value: InterfaceIp6Privacy.ENABLED_PREFER_PUBLIC,
|
||||||
|
NMInterfaceIp6Privacy.ENABLED.value: InterfaceIp6Privacy.ENABLED,
|
||||||
|
}
|
||||||
|
|
||||||
|
return mapping.get(ip6_privacy, InterfaceIp6Privacy.DEFAULT)
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def _map_nm_connected(connection: NetworkConnection | None) -> bool:
|
def _map_nm_connected(connection: NetworkConnection | None) -> bool:
|
||||||
@ -208,12 +257,14 @@ class Interface:
|
|||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def _map_nm_type(device_type: int) -> InterfaceType:
|
def _map_nm_type(device_type: int) -> InterfaceType:
|
||||||
mapping = {
|
match device_type:
|
||||||
DeviceType.ETHERNET: InterfaceType.ETHERNET,
|
case DeviceType.ETHERNET.value:
|
||||||
DeviceType.WIRELESS: InterfaceType.WIRELESS,
|
return InterfaceType.ETHERNET
|
||||||
DeviceType.VLAN: InterfaceType.VLAN,
|
case DeviceType.WIRELESS.value:
|
||||||
}
|
return InterfaceType.WIRELESS
|
||||||
return mapping[device_type]
|
case DeviceType.VLAN.value:
|
||||||
|
return InterfaceType.VLAN
|
||||||
|
raise ValueError(f"Invalid device type: {device_type}")
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def _map_nm_wifi(inet: NetworkInterface) -> WifiConfig | None:
|
def _map_nm_wifi(inet: NetworkInterface) -> WifiConfig | None:
|
||||||
@ -222,15 +273,22 @@ class Interface:
|
|||||||
return None
|
return None
|
||||||
|
|
||||||
# Authentication and PSK
|
# Authentication and PSK
|
||||||
auth = None
|
auth = AuthMethod.OPEN
|
||||||
psk = None
|
psk = None
|
||||||
if not inet.settings.wireless_security:
|
if inet.settings.wireless_security:
|
||||||
auth = AuthMethod.OPEN
|
match inet.settings.wireless_security.key_mgmt:
|
||||||
elif inet.settings.wireless_security.key_mgmt == "none":
|
case "none":
|
||||||
auth = AuthMethod.WEP
|
auth = AuthMethod.WEP
|
||||||
elif inet.settings.wireless_security.key_mgmt == "wpa-psk":
|
case "wpa-psk":
|
||||||
auth = AuthMethod.WPA_PSK
|
auth = AuthMethod.WPA_PSK
|
||||||
psk = inet.settings.wireless_security.psk
|
psk = inet.settings.wireless_security.psk
|
||||||
|
case _:
|
||||||
|
_LOGGER.warning(
|
||||||
|
"Auth method %s for network interface %s unsupported, skipping",
|
||||||
|
inet.settings.wireless_security.key_mgmt,
|
||||||
|
inet.interface_name,
|
||||||
|
)
|
||||||
|
return None
|
||||||
|
|
||||||
# WifiMode
|
# WifiMode
|
||||||
mode = WifiMode.INFRASTRUCTURE
|
mode = WifiMode.INFRASTRUCTURE
|
||||||
@ -244,17 +302,17 @@ class Interface:
|
|||||||
signal = None
|
signal = None
|
||||||
|
|
||||||
return WifiConfig(
|
return WifiConfig(
|
||||||
mode,
|
mode=mode,
|
||||||
inet.settings.wireless.ssid,
|
ssid=inet.settings.wireless.ssid if inet.settings.wireless else "",
|
||||||
auth,
|
auth=auth,
|
||||||
psk,
|
psk=psk,
|
||||||
signal,
|
signal=signal,
|
||||||
)
|
)
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def _map_nm_vlan(inet: NetworkInterface) -> WifiConfig | None:
|
def _map_nm_vlan(inet: NetworkInterface) -> VlanConfig | None:
|
||||||
"""Create mapping to nm vlan property."""
|
"""Create mapping to nm vlan property."""
|
||||||
if inet.type != DeviceType.VLAN or not inet.settings:
|
if inet.type != DeviceType.VLAN or not inet.settings or not inet.settings.vlan:
|
||||||
return None
|
return None
|
||||||
|
|
||||||
return VlanConfig(inet.settings.vlan.id, inet.settings.vlan.parent)
|
return VlanConfig(inet.settings.vlan.id, inet.settings.vlan.parent)
|
||||||
|
@ -15,6 +15,24 @@ class InterfaceMethod(StrEnum):
|
|||||||
AUTO = "auto"
|
AUTO = "auto"
|
||||||
|
|
||||||
|
|
||||||
|
class InterfaceAddrGenMode(StrEnum):
|
||||||
|
"""Configuration of an interface."""
|
||||||
|
|
||||||
|
EUI64 = "eui64"
|
||||||
|
STABLE_PRIVACY = "stable-privacy"
|
||||||
|
DEFAULT_OR_EUI64 = "default-or-eui64"
|
||||||
|
DEFAULT = "default"
|
||||||
|
|
||||||
|
|
||||||
|
class InterfaceIp6Privacy(StrEnum):
|
||||||
|
"""Configuration of an interface."""
|
||||||
|
|
||||||
|
DEFAULT = "default"
|
||||||
|
DISABLED = "disabled"
|
||||||
|
ENABLED_PREFER_PUBLIC = "enabled-prefer-public"
|
||||||
|
ENABLED = "enabled"
|
||||||
|
|
||||||
|
|
||||||
class InterfaceType(StrEnum):
|
class InterfaceType(StrEnum):
|
||||||
"""Configuration of an interface."""
|
"""Configuration of an interface."""
|
||||||
|
|
||||||
@ -62,6 +80,7 @@ class LogFormat(StrEnum):
|
|||||||
|
|
||||||
JOURNAL = "application/vnd.fdo.journal"
|
JOURNAL = "application/vnd.fdo.journal"
|
||||||
JSON = "application/json"
|
JSON = "application/json"
|
||||||
|
JSON_SEQ = "application/json-seq"
|
||||||
TEXT = "text/plain"
|
TEXT = "text/plain"
|
||||||
|
|
||||||
|
|
||||||
|
@ -2,7 +2,7 @@
|
|||||||
|
|
||||||
from __future__ import annotations
|
from __future__ import annotations
|
||||||
|
|
||||||
from collections.abc import AsyncGenerator
|
from collections.abc import AsyncGenerator, Mapping
|
||||||
from contextlib import asynccontextmanager
|
from contextlib import asynccontextmanager
|
||||||
import json
|
import json
|
||||||
import logging
|
import logging
|
||||||
@ -25,6 +25,7 @@ from ..exceptions import (
|
|||||||
HostServiceError,
|
HostServiceError,
|
||||||
)
|
)
|
||||||
from ..utils.json import read_json_file
|
from ..utils.json import read_json_file
|
||||||
|
from ..utils.systemd_journal import journal_boots_reader
|
||||||
from .const import PARAM_BOOT_ID, PARAM_SYSLOG_IDENTIFIER, LogFormat
|
from .const import PARAM_BOOT_ID, PARAM_SYSLOG_IDENTIFIER, LogFormat
|
||||||
|
|
||||||
_LOGGER: logging.Logger = logging.getLogger(__name__)
|
_LOGGER: logging.Logger = logging.getLogger(__name__)
|
||||||
@ -108,12 +109,8 @@ class LogsControl(CoreSysAttributes):
|
|||||||
|
|
||||||
return boot_ids[offset]
|
return boot_ids[offset]
|
||||||
|
|
||||||
async def get_boot_ids(self) -> list[str]:
|
async def _get_boot_ids_legacy(self) -> list[str]:
|
||||||
"""Get boot IDs from oldest to newest."""
|
"""Get boots IDs using suboptimal method where /boots is not available."""
|
||||||
if self._boot_ids:
|
|
||||||
# Doesn't change without a reboot, no reason to query again once cached
|
|
||||||
return self._boot_ids
|
|
||||||
|
|
||||||
try:
|
try:
|
||||||
async with self.journald_logs(
|
async with self.journald_logs(
|
||||||
params=BOOT_IDS_QUERY,
|
params=BOOT_IDS_QUERY,
|
||||||
@ -142,13 +139,51 @@ class LogsControl(CoreSysAttributes):
|
|||||||
_LOGGER.error,
|
_LOGGER.error,
|
||||||
) from err
|
) from err
|
||||||
|
|
||||||
self._boot_ids = []
|
_boot_ids = []
|
||||||
for entry in text.split("\n"):
|
for entry in text.split("\n"):
|
||||||
if (
|
if entry and (boot_id := json.loads(entry)[PARAM_BOOT_ID]) not in _boot_ids:
|
||||||
entry
|
_boot_ids.append(boot_id)
|
||||||
and (boot_id := json.loads(entry)[PARAM_BOOT_ID]) not in self._boot_ids
|
|
||||||
):
|
return _boot_ids
|
||||||
self._boot_ids.append(boot_id)
|
|
||||||
|
async def _get_boot_ids_native(self):
|
||||||
|
"""Get boot IDs using /boots endpoint."""
|
||||||
|
try:
|
||||||
|
async with self.journald_logs(
|
||||||
|
path="/boots",
|
||||||
|
accept=LogFormat.JSON_SEQ,
|
||||||
|
timeout=ClientTimeout(total=20),
|
||||||
|
) as resp:
|
||||||
|
if resp.status != 200:
|
||||||
|
raise HostLogError(
|
||||||
|
f"Got HTTP {resp.status} from /boots.",
|
||||||
|
_LOGGER.debug,
|
||||||
|
)
|
||||||
|
# Don't rely solely on the order of boots in the response,
|
||||||
|
# sort the boots by index returned in the response.
|
||||||
|
boot_id_tuples = [boot async for boot in journal_boots_reader(resp)]
|
||||||
|
return [
|
||||||
|
boot_id for _, boot_id in sorted(boot_id_tuples, key=lambda x: x[0])
|
||||||
|
]
|
||||||
|
except (ClientError, TimeoutError) as err:
|
||||||
|
raise HostLogError(
|
||||||
|
"Could not get a list of boot IDs from systemd-journal-gatewayd",
|
||||||
|
_LOGGER.error,
|
||||||
|
) from err
|
||||||
|
|
||||||
|
async def get_boot_ids(self) -> list[str]:
|
||||||
|
"""Get boot IDs from oldest to newest."""
|
||||||
|
if self._boot_ids:
|
||||||
|
# Doesn't change without a reboot, no reason to query again once cached
|
||||||
|
return self._boot_ids
|
||||||
|
|
||||||
|
try:
|
||||||
|
self._boot_ids = await self._get_boot_ids_native()
|
||||||
|
except HostLogError:
|
||||||
|
_LOGGER.info(
|
||||||
|
"Could not get /boots from systemd-journal-gatewayd, using fallback."
|
||||||
|
)
|
||||||
|
self._boot_ids = await self._get_boot_ids_legacy()
|
||||||
|
|
||||||
return self._boot_ids
|
return self._boot_ids
|
||||||
|
|
||||||
@ -170,7 +205,7 @@ class LogsControl(CoreSysAttributes):
|
|||||||
async def journald_logs(
|
async def journald_logs(
|
||||||
self,
|
self,
|
||||||
path: str = "/entries",
|
path: str = "/entries",
|
||||||
params: dict[str, str | list[str]] | None = None,
|
params: Mapping[str, str | list[str]] | None = None,
|
||||||
range_header: str | None = None,
|
range_header: str | None = None,
|
||||||
accept: LogFormat = LogFormat.TEXT,
|
accept: LogFormat = LogFormat.TEXT,
|
||||||
timeout: ClientTimeout | None = None,
|
timeout: ClientTimeout | None = None,
|
||||||
@ -191,7 +226,7 @@ class LogsControl(CoreSysAttributes):
|
|||||||
base_url = "http://localhost/"
|
base_url = "http://localhost/"
|
||||||
connector = UnixConnector(path=str(SYSTEMD_JOURNAL_GATEWAYD_SOCKET))
|
connector = UnixConnector(path=str(SYSTEMD_JOURNAL_GATEWAYD_SOCKET))
|
||||||
async with ClientSession(base_url=base_url, connector=connector) as session:
|
async with ClientSession(base_url=base_url, connector=connector) as session:
|
||||||
headers = {ACCEPT: accept}
|
headers = {ACCEPT: accept.value}
|
||||||
if range_header:
|
if range_header:
|
||||||
if range_header.endswith(":"):
|
if range_header.endswith(":"):
|
||||||
# Make sure that num_entries is always set - before Systemd v256 it was
|
# Make sure that num_entries is always set - before Systemd v256 it was
|
||||||
|
@ -8,11 +8,11 @@ from typing import Any
|
|||||||
from ..const import ATTR_HOST_INTERNET
|
from ..const import ATTR_HOST_INTERNET
|
||||||
from ..coresys import CoreSys, CoreSysAttributes
|
from ..coresys import CoreSys, CoreSysAttributes
|
||||||
from ..dbus.const import (
|
from ..dbus.const import (
|
||||||
|
DBUS_ATTR_CONFIGURATION,
|
||||||
DBUS_ATTR_CONNECTION_ENABLED,
|
DBUS_ATTR_CONNECTION_ENABLED,
|
||||||
DBUS_ATTR_CONNECTIVITY,
|
DBUS_ATTR_CONNECTIVITY,
|
||||||
DBUS_ATTR_PRIMARY_CONNECTION,
|
DBUS_IFACE_DNS,
|
||||||
DBUS_IFACE_NM,
|
DBUS_IFACE_NM,
|
||||||
DBUS_OBJECT_BASE,
|
|
||||||
DBUS_SIGNAL_NM_CONNECTION_ACTIVE_CHANGED,
|
DBUS_SIGNAL_NM_CONNECTION_ACTIVE_CHANGED,
|
||||||
ConnectionStateType,
|
ConnectionStateType,
|
||||||
ConnectivityState,
|
ConnectivityState,
|
||||||
@ -46,6 +46,8 @@ class NetworkManager(CoreSysAttributes):
|
|||||||
"""Initialize system center handling."""
|
"""Initialize system center handling."""
|
||||||
self.coresys: CoreSys = coresys
|
self.coresys: CoreSys = coresys
|
||||||
self._connectivity: bool | None = None
|
self._connectivity: bool | None = None
|
||||||
|
# No event need on initial change (NetworkManager initializes with empty list)
|
||||||
|
self._dns_configuration: list = []
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def connectivity(self) -> bool | None:
|
def connectivity(self) -> bool | None:
|
||||||
@ -87,7 +89,7 @@ class NetworkManager(CoreSysAttributes):
|
|||||||
for config in self.sys_dbus.network.dns.configuration:
|
for config in self.sys_dbus.network.dns.configuration:
|
||||||
if config.vpn or not config.nameservers:
|
if config.vpn or not config.nameservers:
|
||||||
continue
|
continue
|
||||||
servers.extend(config.nameservers)
|
servers.extend([str(ns) for ns in config.nameservers])
|
||||||
|
|
||||||
return list(dict.fromkeys(servers))
|
return list(dict.fromkeys(servers))
|
||||||
|
|
||||||
@ -138,8 +140,12 @@ class NetworkManager(CoreSysAttributes):
|
|||||||
]
|
]
|
||||||
)
|
)
|
||||||
|
|
||||||
self.sys_dbus.network.dbus.properties.on_properties_changed(
|
self.sys_dbus.network.dbus.properties.on(
|
||||||
self._check_connectivity_changed
|
"properties_changed", self._check_connectivity_changed
|
||||||
|
)
|
||||||
|
|
||||||
|
self.sys_dbus.network.dns.dbus.properties.on(
|
||||||
|
"properties_changed", self._check_dns_changed
|
||||||
)
|
)
|
||||||
|
|
||||||
async def _check_connectivity_changed(
|
async def _check_connectivity_changed(
|
||||||
@ -152,15 +158,6 @@ class NetworkManager(CoreSysAttributes):
|
|||||||
connectivity_check: bool | None = changed.get(DBUS_ATTR_CONNECTION_ENABLED)
|
connectivity_check: bool | None = changed.get(DBUS_ATTR_CONNECTION_ENABLED)
|
||||||
connectivity: int | None = changed.get(DBUS_ATTR_CONNECTIVITY)
|
connectivity: int | None = changed.get(DBUS_ATTR_CONNECTIVITY)
|
||||||
|
|
||||||
# This potentially updated the DNS configuration. Make sure the DNS plug-in
|
|
||||||
# picks up the latest settings.
|
|
||||||
if (
|
|
||||||
DBUS_ATTR_PRIMARY_CONNECTION in changed
|
|
||||||
and changed[DBUS_ATTR_PRIMARY_CONNECTION]
|
|
||||||
and changed[DBUS_ATTR_PRIMARY_CONNECTION] != DBUS_OBJECT_BASE
|
|
||||||
):
|
|
||||||
await self.sys_plugins.dns.restart()
|
|
||||||
|
|
||||||
if (
|
if (
|
||||||
connectivity_check is True
|
connectivity_check is True
|
||||||
or DBUS_ATTR_CONNECTION_ENABLED in invalidated
|
or DBUS_ATTR_CONNECTION_ENABLED in invalidated
|
||||||
@ -174,6 +171,20 @@ class NetworkManager(CoreSysAttributes):
|
|||||||
elif connectivity is not None:
|
elif connectivity is not None:
|
||||||
self.connectivity = connectivity == ConnectivityState.CONNECTIVITY_FULL
|
self.connectivity = connectivity == ConnectivityState.CONNECTIVITY_FULL
|
||||||
|
|
||||||
|
async def _check_dns_changed(
|
||||||
|
self, interface: str, changed: dict[str, Any], invalidated: list[str]
|
||||||
|
):
|
||||||
|
"""Check if DNS properties have changed."""
|
||||||
|
if interface != DBUS_IFACE_DNS:
|
||||||
|
return
|
||||||
|
|
||||||
|
if (
|
||||||
|
DBUS_ATTR_CONFIGURATION in changed
|
||||||
|
and self._dns_configuration != changed[DBUS_ATTR_CONFIGURATION]
|
||||||
|
):
|
||||||
|
self._dns_configuration = changed[DBUS_ATTR_CONFIGURATION]
|
||||||
|
self.sys_plugins.dns.notify_locals_changed()
|
||||||
|
|
||||||
async def update(self, *, force_connectivity_check: bool = False):
|
async def update(self, *, force_connectivity_check: bool = False):
|
||||||
"""Update properties over dbus."""
|
"""Update properties over dbus."""
|
||||||
_LOGGER.info("Updating local network information")
|
_LOGGER.info("Updating local network information")
|
||||||
@ -196,10 +207,16 @@ class NetworkManager(CoreSysAttributes):
|
|||||||
with suppress(NetworkInterfaceNotFound):
|
with suppress(NetworkInterfaceNotFound):
|
||||||
inet = self.sys_dbus.network.get(interface.name)
|
inet = self.sys_dbus.network.get(interface.name)
|
||||||
|
|
||||||
con: NetworkConnection = None
|
con: NetworkConnection | None = None
|
||||||
|
|
||||||
# Update exist configuration
|
# Update exist configuration
|
||||||
if inet and interface.equals_dbus_interface(inet) and interface.enabled:
|
if (
|
||||||
|
inet
|
||||||
|
and inet.settings
|
||||||
|
and inet.settings.connection
|
||||||
|
and interface.equals_dbus_interface(inet)
|
||||||
|
and interface.enabled
|
||||||
|
):
|
||||||
_LOGGER.debug("Updating existing configuration for %s", interface.name)
|
_LOGGER.debug("Updating existing configuration for %s", interface.name)
|
||||||
settings = get_connection_from_interface(
|
settings = get_connection_from_interface(
|
||||||
interface,
|
interface,
|
||||||
@ -210,12 +227,12 @@ class NetworkManager(CoreSysAttributes):
|
|||||||
|
|
||||||
try:
|
try:
|
||||||
await inet.settings.update(settings)
|
await inet.settings.update(settings)
|
||||||
con = await self.sys_dbus.network.activate_connection(
|
con = activated = await self.sys_dbus.network.activate_connection(
|
||||||
inet.settings.object_path, inet.object_path
|
inet.settings.object_path, inet.object_path
|
||||||
)
|
)
|
||||||
_LOGGER.debug(
|
_LOGGER.debug(
|
||||||
"activate_connection returns %s",
|
"activate_connection returns %s",
|
||||||
con.object_path,
|
activated.object_path,
|
||||||
)
|
)
|
||||||
except DBusError as err:
|
except DBusError as err:
|
||||||
raise HostNetworkError(
|
raise HostNetworkError(
|
||||||
@ -235,12 +252,16 @@ class NetworkManager(CoreSysAttributes):
|
|||||||
settings = get_connection_from_interface(interface, self.sys_dbus.network)
|
settings = get_connection_from_interface(interface, self.sys_dbus.network)
|
||||||
|
|
||||||
try:
|
try:
|
||||||
settings, con = await self.sys_dbus.network.add_and_activate_connection(
|
(
|
||||||
|
settings,
|
||||||
|
activated,
|
||||||
|
) = await self.sys_dbus.network.add_and_activate_connection(
|
||||||
settings, inet.object_path
|
settings, inet.object_path
|
||||||
)
|
)
|
||||||
|
con = activated
|
||||||
_LOGGER.debug(
|
_LOGGER.debug(
|
||||||
"add_and_activate_connection returns %s",
|
"add_and_activate_connection returns %s",
|
||||||
con.object_path,
|
activated.object_path,
|
||||||
)
|
)
|
||||||
except DBusError as err:
|
except DBusError as err:
|
||||||
raise HostNetworkError(
|
raise HostNetworkError(
|
||||||
@ -276,7 +297,7 @@ class NetworkManager(CoreSysAttributes):
|
|||||||
)
|
)
|
||||||
|
|
||||||
if con:
|
if con:
|
||||||
async with con.dbus.signal(
|
async with con.connected_dbus.signal(
|
||||||
DBUS_SIGNAL_NM_CONNECTION_ACTIVE_CHANGED
|
DBUS_SIGNAL_NM_CONNECTION_ACTIVE_CHANGED
|
||||||
) as signal:
|
) as signal:
|
||||||
# From this point we monitor signals. However, it might be that
|
# From this point we monitor signals. However, it might be that
|
||||||
@ -302,7 +323,7 @@ class NetworkManager(CoreSysAttributes):
|
|||||||
"""Scan on Interface for AccessPoint."""
|
"""Scan on Interface for AccessPoint."""
|
||||||
inet = self.sys_dbus.network.get(interface.name)
|
inet = self.sys_dbus.network.get(interface.name)
|
||||||
|
|
||||||
if inet.type != DeviceType.WIRELESS:
|
if inet.type != DeviceType.WIRELESS or not inet.wireless:
|
||||||
raise HostNotSupportedError(
|
raise HostNotSupportedError(
|
||||||
f"Can only scan with wireless card - {interface.name}", _LOGGER.error
|
f"Can only scan with wireless card - {interface.name}", _LOGGER.error
|
||||||
)
|
)
|
||||||
|
@ -12,6 +12,7 @@ from .const import (
|
|||||||
ATTR_SESSION_DATA,
|
ATTR_SESSION_DATA,
|
||||||
FILE_HASSIO_INGRESS,
|
FILE_HASSIO_INGRESS,
|
||||||
IngressSessionData,
|
IngressSessionData,
|
||||||
|
IngressSessionDataDict,
|
||||||
)
|
)
|
||||||
from .coresys import CoreSys, CoreSysAttributes
|
from .coresys import CoreSys, CoreSysAttributes
|
||||||
from .utils import check_port
|
from .utils import check_port
|
||||||
@ -35,7 +36,7 @@ class Ingress(FileConfiguration, CoreSysAttributes):
|
|||||||
"""Return addon they have this ingress token."""
|
"""Return addon they have this ingress token."""
|
||||||
if token not in self.tokens:
|
if token not in self.tokens:
|
||||||
return None
|
return None
|
||||||
return self.sys_addons.get(self.tokens[token], local_only=True)
|
return self.sys_addons.get_local_only(self.tokens[token])
|
||||||
|
|
||||||
def get_session_data(self, session_id: str) -> IngressSessionData | None:
|
def get_session_data(self, session_id: str) -> IngressSessionData | None:
|
||||||
"""Return complementary data of current session or None."""
|
"""Return complementary data of current session or None."""
|
||||||
@ -49,7 +50,7 @@ class Ingress(FileConfiguration, CoreSysAttributes):
|
|||||||
return self._data[ATTR_SESSION]
|
return self._data[ATTR_SESSION]
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def sessions_data(self) -> dict[str, dict[str, str | None]]:
|
def sessions_data(self) -> dict[str, IngressSessionDataDict]:
|
||||||
"""Return sessions_data."""
|
"""Return sessions_data."""
|
||||||
return self._data[ATTR_SESSION_DATA]
|
return self._data[ATTR_SESSION_DATA]
|
||||||
|
|
||||||
@ -89,7 +90,7 @@ class Ingress(FileConfiguration, CoreSysAttributes):
|
|||||||
now = utcnow()
|
now = utcnow()
|
||||||
|
|
||||||
sessions = {}
|
sessions = {}
|
||||||
sessions_data: dict[str, dict[str, str | None]] = {}
|
sessions_data: dict[str, IngressSessionDataDict] = {}
|
||||||
for session, valid in self.sessions.items():
|
for session, valid in self.sessions.items():
|
||||||
# check if timestamp valid, to avoid crash on malformed timestamp
|
# check if timestamp valid, to avoid crash on malformed timestamp
|
||||||
try:
|
try:
|
||||||
@ -118,7 +119,8 @@ class Ingress(FileConfiguration, CoreSysAttributes):
|
|||||||
|
|
||||||
# Read all ingress token and build a map
|
# Read all ingress token and build a map
|
||||||
for addon in self.addons:
|
for addon in self.addons:
|
||||||
self.tokens[addon.ingress_token] = addon.slug
|
if addon.ingress_token:
|
||||||
|
self.tokens[addon.ingress_token] = addon.slug
|
||||||
|
|
||||||
def create_session(self, data: IngressSessionData | None = None) -> str:
|
def create_session(self, data: IngressSessionData | None = None) -> str:
|
||||||
"""Create new session."""
|
"""Create new session."""
|
||||||
@ -141,7 +143,7 @@ class Ingress(FileConfiguration, CoreSysAttributes):
|
|||||||
try:
|
try:
|
||||||
valid_until = utc_from_timestamp(self.sessions[session])
|
valid_until = utc_from_timestamp(self.sessions[session])
|
||||||
except OverflowError:
|
except OverflowError:
|
||||||
self.sessions[session] = utcnow() + timedelta(minutes=15)
|
self.sessions[session] = (utcnow() + timedelta(minutes=15)).timestamp()
|
||||||
return True
|
return True
|
||||||
|
|
||||||
# Is still valid?
|
# Is still valid?
|
||||||
|
@ -1,13 +1,13 @@
|
|||||||
"""Supervisor job manager."""
|
"""Supervisor job manager."""
|
||||||
|
|
||||||
import asyncio
|
import asyncio
|
||||||
from collections.abc import Awaitable, Callable
|
from collections.abc import Callable, Coroutine, Generator
|
||||||
from contextlib import contextmanager
|
from contextlib import contextmanager, suppress
|
||||||
from contextvars import Context, ContextVar, Token
|
from contextvars import Context, ContextVar, Token
|
||||||
from dataclasses import dataclass
|
from dataclasses import dataclass
|
||||||
from datetime import datetime
|
from datetime import datetime
|
||||||
import logging
|
import logging
|
||||||
from typing import Any
|
from typing import Any, Self
|
||||||
from uuid import uuid4
|
from uuid import uuid4
|
||||||
|
|
||||||
from attrs import Attribute, define, field
|
from attrs import Attribute, define, field
|
||||||
@ -27,7 +27,7 @@ from .validate import SCHEMA_JOBS_CONFIG
|
|||||||
# When a new asyncio task is started the current context is copied over.
|
# When a new asyncio task is started the current context is copied over.
|
||||||
# Modifications to it in one task are not visible to others though.
|
# Modifications to it in one task are not visible to others though.
|
||||||
# This allows us to track what job is currently in progress in each task.
|
# This allows us to track what job is currently in progress in each task.
|
||||||
_CURRENT_JOB: ContextVar[str] = ContextVar("current_job")
|
_CURRENT_JOB: ContextVar[str | None] = ContextVar("current_job", default=None)
|
||||||
|
|
||||||
_LOGGER: logging.Logger = logging.getLogger(__name__)
|
_LOGGER: logging.Logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
@ -75,7 +75,7 @@ class SupervisorJobError:
|
|||||||
message: str = "Unknown error, see supervisor logs"
|
message: str = "Unknown error, see supervisor logs"
|
||||||
stage: str | None = None
|
stage: str | None = None
|
||||||
|
|
||||||
def as_dict(self) -> dict[str, str]:
|
def as_dict(self) -> dict[str, str | None]:
|
||||||
"""Return dictionary representation."""
|
"""Return dictionary representation."""
|
||||||
return {
|
return {
|
||||||
"type": self.type_.__name__,
|
"type": self.type_.__name__,
|
||||||
@ -101,9 +101,7 @@ class SupervisorJob:
|
|||||||
stage: str | None = field(
|
stage: str | None = field(
|
||||||
default=None, validator=[_invalid_if_done], on_setattr=_on_change
|
default=None, validator=[_invalid_if_done], on_setattr=_on_change
|
||||||
)
|
)
|
||||||
parent_id: str | None = field(
|
parent_id: str | None = field(factory=_CURRENT_JOB.get, on_setattr=frozen)
|
||||||
factory=lambda: _CURRENT_JOB.get(None), on_setattr=frozen
|
|
||||||
)
|
|
||||||
done: bool | None = field(init=False, default=None, on_setattr=_on_change)
|
done: bool | None = field(init=False, default=None, on_setattr=_on_change)
|
||||||
on_change: Callable[["SupervisorJob", Attribute, Any], None] | None = field(
|
on_change: Callable[["SupervisorJob", Attribute, Any], None] | None = field(
|
||||||
default=None, on_setattr=frozen
|
default=None, on_setattr=frozen
|
||||||
@ -137,7 +135,7 @@ class SupervisorJob:
|
|||||||
self.errors += [new_error]
|
self.errors += [new_error]
|
||||||
|
|
||||||
@contextmanager
|
@contextmanager
|
||||||
def start(self):
|
def start(self) -> Generator[Self]:
|
||||||
"""Start the job in the current task.
|
"""Start the job in the current task.
|
||||||
|
|
||||||
This can only be called if the parent ID matches the job running in the current task.
|
This can only be called if the parent ID matches the job running in the current task.
|
||||||
@ -146,11 +144,11 @@ class SupervisorJob:
|
|||||||
"""
|
"""
|
||||||
if self.done is not None:
|
if self.done is not None:
|
||||||
raise JobStartException("Job has already been started")
|
raise JobStartException("Job has already been started")
|
||||||
if _CURRENT_JOB.get(None) != self.parent_id:
|
if _CURRENT_JOB.get() != self.parent_id:
|
||||||
raise JobStartException("Job has a different parent from current job")
|
raise JobStartException("Job has a different parent from current job")
|
||||||
|
|
||||||
self.done = False
|
self.done = False
|
||||||
token: Token[str] | None = None
|
token: Token[str | None] | None = None
|
||||||
try:
|
try:
|
||||||
token = _CURRENT_JOB.set(self.uuid)
|
token = _CURRENT_JOB.set(self.uuid)
|
||||||
yield self
|
yield self
|
||||||
@ -193,17 +191,15 @@ class JobManager(FileConfiguration, CoreSysAttributes):
|
|||||||
|
|
||||||
Must be called from within a job. Raises RuntimeError if there is no current job.
|
Must be called from within a job. Raises RuntimeError if there is no current job.
|
||||||
"""
|
"""
|
||||||
try:
|
if job_id := _CURRENT_JOB.get():
|
||||||
return self.get_job(_CURRENT_JOB.get())
|
with suppress(JobNotFound):
|
||||||
except (LookupError, JobNotFound):
|
return self.get_job(job_id)
|
||||||
raise RuntimeError(
|
raise RuntimeError("No job for the current asyncio task!", _LOGGER.critical)
|
||||||
"No job for the current asyncio task!", _LOGGER.critical
|
|
||||||
) from None
|
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def is_job(self) -> bool:
|
def is_job(self) -> bool:
|
||||||
"""Return true if there is an active job for the current asyncio task."""
|
"""Return true if there is an active job for the current asyncio task."""
|
||||||
return bool(_CURRENT_JOB.get(None))
|
return _CURRENT_JOB.get() is not None
|
||||||
|
|
||||||
def _notify_on_job_change(
|
def _notify_on_job_change(
|
||||||
self, job: SupervisorJob, attribute: Attribute, value: Any
|
self, job: SupervisorJob, attribute: Attribute, value: Any
|
||||||
@ -265,7 +261,7 @@ class JobManager(FileConfiguration, CoreSysAttributes):
|
|||||||
|
|
||||||
def schedule_job(
|
def schedule_job(
|
||||||
self,
|
self,
|
||||||
job_method: Callable[..., Awaitable[Any]],
|
job_method: Callable[..., Coroutine],
|
||||||
options: JobSchedulerOptions,
|
options: JobSchedulerOptions,
|
||||||
*args,
|
*args,
|
||||||
**kwargs,
|
**kwargs,
|
||||||
|
@ -34,8 +34,60 @@ class JobCondition(StrEnum):
|
|||||||
SUPERVISOR_UPDATED = "supervisor_updated"
|
SUPERVISOR_UPDATED = "supervisor_updated"
|
||||||
|
|
||||||
|
|
||||||
|
class JobConcurrency(StrEnum):
|
||||||
|
"""Job concurrency control.
|
||||||
|
|
||||||
|
Controls how many instances of a job can run simultaneously.
|
||||||
|
|
||||||
|
Individual Concurrency (applies to each method separately):
|
||||||
|
- REJECT: Fail immediately if another instance is already running
|
||||||
|
- QUEUE: Wait for the current instance to finish, then run
|
||||||
|
|
||||||
|
Group Concurrency (applies across all methods on a JobGroup):
|
||||||
|
- GROUP_REJECT: Fail if ANY job is running on the JobGroup
|
||||||
|
- GROUP_QUEUE: Wait for ANY running job on the JobGroup to finish
|
||||||
|
|
||||||
|
JobGroup Behavior:
|
||||||
|
- All methods on the same JobGroup instance share a single lock
|
||||||
|
- Methods can call other methods on the same group without deadlock
|
||||||
|
- Uses the JobGroup.group_name for coordination
|
||||||
|
- Requires the class to inherit from JobGroup
|
||||||
|
"""
|
||||||
|
|
||||||
|
REJECT = "reject" # Fail if already running (was ONCE)
|
||||||
|
QUEUE = "queue" # Wait if already running (was SINGLE_WAIT)
|
||||||
|
GROUP_REJECT = "group_reject" # Was GROUP_ONCE
|
||||||
|
GROUP_QUEUE = "group_queue" # Was GROUP_WAIT
|
||||||
|
|
||||||
|
|
||||||
|
class JobThrottle(StrEnum):
|
||||||
|
"""Job throttling control.
|
||||||
|
|
||||||
|
Controls how frequently jobs can be executed.
|
||||||
|
|
||||||
|
Individual Throttling (each method has its own throttle state):
|
||||||
|
- THROTTLE: Skip execution if called within throttle_period
|
||||||
|
- RATE_LIMIT: Allow up to throttle_max_calls within throttle_period, then fail
|
||||||
|
|
||||||
|
Group Throttling (all methods on a JobGroup share throttle state):
|
||||||
|
- GROUP_THROTTLE: Skip if ANY method was called within throttle_period
|
||||||
|
- GROUP_RATE_LIMIT: Allow up to throttle_max_calls total across ALL methods
|
||||||
|
|
||||||
|
JobGroup Behavior:
|
||||||
|
- All methods on the same JobGroup instance share throttle counters/timers
|
||||||
|
- Uses the JobGroup.group_name as the key for tracking state
|
||||||
|
- If one method is throttled, other methods may also be throttled
|
||||||
|
- Requires the class to inherit from JobGroup
|
||||||
|
"""
|
||||||
|
|
||||||
|
THROTTLE = "throttle" # Skip if called too frequently
|
||||||
|
RATE_LIMIT = "rate_limit" # Rate limiting with max calls per period
|
||||||
|
GROUP_THROTTLE = "group_throttle" # Group version of THROTTLE
|
||||||
|
GROUP_RATE_LIMIT = "group_rate_limit" # Group version of RATE_LIMIT
|
||||||
|
|
||||||
|
|
||||||
class JobExecutionLimit(StrEnum):
|
class JobExecutionLimit(StrEnum):
|
||||||
"""Job Execution limits."""
|
"""Job Execution limits - DEPRECATED: Use JobConcurrency and JobThrottle instead."""
|
||||||
|
|
||||||
ONCE = "once"
|
ONCE = "once"
|
||||||
SINGLE_WAIT = "single_wait"
|
SINGLE_WAIT = "single_wait"
|
||||||
|
@ -1,12 +1,12 @@
|
|||||||
"""Job decorator."""
|
"""Job decorator."""
|
||||||
|
|
||||||
import asyncio
|
import asyncio
|
||||||
from collections.abc import Callable
|
from collections.abc import Awaitable, Callable
|
||||||
from contextlib import suppress
|
from contextlib import suppress
|
||||||
from datetime import datetime, timedelta
|
from datetime import datetime, timedelta
|
||||||
from functools import wraps
|
from functools import wraps
|
||||||
import logging
|
import logging
|
||||||
from typing import Any
|
from typing import Any, cast
|
||||||
|
|
||||||
from ..const import CoreState
|
from ..const import CoreState
|
||||||
from ..coresys import CoreSys, CoreSysAttributes
|
from ..coresys import CoreSys, CoreSysAttributes
|
||||||
@ -20,7 +20,7 @@ from ..host.const import HostFeature
|
|||||||
from ..resolution.const import MINIMUM_FREE_SPACE_THRESHOLD, ContextType, IssueType
|
from ..resolution.const import MINIMUM_FREE_SPACE_THRESHOLD, ContextType, IssueType
|
||||||
from ..utils.sentry import async_capture_exception
|
from ..utils.sentry import async_capture_exception
|
||||||
from . import SupervisorJob
|
from . import SupervisorJob
|
||||||
from .const import JobCondition, JobExecutionLimit
|
from .const import JobConcurrency, JobCondition, JobExecutionLimit, JobThrottle
|
||||||
from .job_group import JobGroup
|
from .job_group import JobGroup
|
||||||
|
|
||||||
_LOGGER: logging.Logger = logging.getLogger(__package__)
|
_LOGGER: logging.Logger = logging.getLogger(__package__)
|
||||||
@ -36,14 +36,34 @@ class Job(CoreSysAttributes):
|
|||||||
conditions: list[JobCondition] | None = None,
|
conditions: list[JobCondition] | None = None,
|
||||||
cleanup: bool = True,
|
cleanup: bool = True,
|
||||||
on_condition: type[JobException] | None = None,
|
on_condition: type[JobException] | None = None,
|
||||||
limit: JobExecutionLimit | None = None,
|
concurrency: JobConcurrency | None = None,
|
||||||
|
throttle: JobThrottle | None = None,
|
||||||
throttle_period: timedelta
|
throttle_period: timedelta
|
||||||
| Callable[[CoreSys, datetime, list[datetime] | None], timedelta]
|
| Callable[[CoreSys, datetime, list[datetime] | None], timedelta]
|
||||||
| None = None,
|
| None = None,
|
||||||
throttle_max_calls: int | None = None,
|
throttle_max_calls: int | None = None,
|
||||||
internal: bool = False,
|
internal: bool = False,
|
||||||
):
|
# Backward compatibility - DEPRECATED
|
||||||
"""Initialize the Job class."""
|
limit: JobExecutionLimit | None = None,
|
||||||
|
): # pylint: disable=too-many-positional-arguments
|
||||||
|
"""Initialize the Job decorator.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
name (str): Unique name for the job. Must not be duplicated.
|
||||||
|
conditions (list[JobCondition] | None): List of conditions that must be met before the job runs.
|
||||||
|
cleanup (bool): Whether to clean up the job after execution. Defaults to True. If set to False, the job will remain accessible through the Supervisor API until the next restart.
|
||||||
|
on_condition (type[JobException] | None): Exception type to raise if a job condition fails. If None, logs the failure.
|
||||||
|
concurrency (JobConcurrency | None): Concurrency control policy (e.g., reject, queue, group-based).
|
||||||
|
throttle (JobThrottle | None): Throttling policy (e.g., throttle, rate_limit, group-based).
|
||||||
|
throttle_period (timedelta | Callable | None): Throttle period as a timedelta or a callable returning a timedelta (for throttled jobs).
|
||||||
|
throttle_max_calls (int | None): Maximum number of calls allowed within the throttle period (for rate-limited jobs).
|
||||||
|
internal (bool): Whether the job is internal (not exposed through the Supervisor API). Defaults to False.
|
||||||
|
limit (JobExecutionLimit | None): DEPRECATED - Use concurrency and throttle instead.
|
||||||
|
|
||||||
|
Raises:
|
||||||
|
RuntimeError: If job name is not unique, or required throttle parameters are missing for the selected throttle policy.
|
||||||
|
|
||||||
|
"""
|
||||||
if name in _JOB_NAMES:
|
if name in _JOB_NAMES:
|
||||||
raise RuntimeError(f"A job already exists with name {name}!")
|
raise RuntimeError(f"A job already exists with name {name}!")
|
||||||
|
|
||||||
@ -52,43 +72,114 @@ class Job(CoreSysAttributes):
|
|||||||
self.conditions = conditions
|
self.conditions = conditions
|
||||||
self.cleanup = cleanup
|
self.cleanup = cleanup
|
||||||
self.on_condition = on_condition
|
self.on_condition = on_condition
|
||||||
self.limit = limit
|
|
||||||
self._throttle_period = throttle_period
|
self._throttle_period = throttle_period
|
||||||
self.throttle_max_calls = throttle_max_calls
|
self._throttle_max_calls = throttle_max_calls
|
||||||
self._lock: asyncio.Semaphore | None = None
|
self._lock: asyncio.Semaphore | None = None
|
||||||
self._method = None
|
|
||||||
self._last_call: dict[str | None, datetime] = {}
|
self._last_call: dict[str | None, datetime] = {}
|
||||||
self._rate_limited_calls: dict[str, list[datetime]] | None = None
|
self._rate_limited_calls: dict[str | None, list[datetime]] | None = None
|
||||||
self._internal = internal
|
self._internal = internal
|
||||||
|
|
||||||
|
# Handle backward compatibility with limit parameter
|
||||||
|
if limit is not None:
|
||||||
|
if concurrency is not None or throttle is not None:
|
||||||
|
raise RuntimeError(
|
||||||
|
f"Job {name} cannot specify both 'limit' (deprecated) and 'concurrency'/'throttle' parameters!"
|
||||||
|
)
|
||||||
|
# Map old limit values to new parameters
|
||||||
|
concurrency, throttle = self._map_limit_to_new_params(limit)
|
||||||
|
|
||||||
|
self.concurrency = concurrency
|
||||||
|
self.throttle = throttle
|
||||||
|
|
||||||
# Validate Options
|
# Validate Options
|
||||||
|
self._validate_parameters()
|
||||||
|
|
||||||
|
def _map_limit_to_new_params(
|
||||||
|
self, limit: JobExecutionLimit
|
||||||
|
) -> tuple[JobConcurrency | None, JobThrottle | None]:
|
||||||
|
"""Map old limit parameter to new concurrency and throttle parameters."""
|
||||||
|
mapping = {
|
||||||
|
JobExecutionLimit.ONCE: (JobConcurrency.REJECT, None),
|
||||||
|
JobExecutionLimit.SINGLE_WAIT: (JobConcurrency.QUEUE, None),
|
||||||
|
JobExecutionLimit.THROTTLE: (None, JobThrottle.THROTTLE),
|
||||||
|
JobExecutionLimit.THROTTLE_WAIT: (
|
||||||
|
JobConcurrency.QUEUE,
|
||||||
|
JobThrottle.THROTTLE,
|
||||||
|
),
|
||||||
|
JobExecutionLimit.THROTTLE_RATE_LIMIT: (None, JobThrottle.RATE_LIMIT),
|
||||||
|
JobExecutionLimit.GROUP_ONCE: (JobConcurrency.GROUP_REJECT, None),
|
||||||
|
JobExecutionLimit.GROUP_WAIT: (JobConcurrency.GROUP_QUEUE, None),
|
||||||
|
JobExecutionLimit.GROUP_THROTTLE: (None, JobThrottle.GROUP_THROTTLE),
|
||||||
|
JobExecutionLimit.GROUP_THROTTLE_WAIT: (
|
||||||
|
# Seems a bit counter intuitive, but GROUP_QUEUE deadlocks
|
||||||
|
# tests/jobs/test_job_decorator.py::test_execution_limit_group_throttle_wait
|
||||||
|
# The reason this deadlocks is because when using GROUP_QUEUE and the
|
||||||
|
# throttle limit is hit, the group lock is trying to be unlocked outside
|
||||||
|
# of the job context. The current implementation doesn't allow to unlock
|
||||||
|
# the group lock when the job is not running.
|
||||||
|
JobConcurrency.QUEUE,
|
||||||
|
JobThrottle.GROUP_THROTTLE,
|
||||||
|
),
|
||||||
|
JobExecutionLimit.GROUP_THROTTLE_RATE_LIMIT: (
|
||||||
|
None,
|
||||||
|
JobThrottle.GROUP_RATE_LIMIT,
|
||||||
|
),
|
||||||
|
}
|
||||||
|
return mapping.get(limit, (None, None))
|
||||||
|
|
||||||
|
def _validate_parameters(self) -> None:
|
||||||
|
"""Validate job parameters."""
|
||||||
|
# Validate throttle parameters
|
||||||
if (
|
if (
|
||||||
self.limit
|
self.throttle
|
||||||
in (
|
in (
|
||||||
JobExecutionLimit.THROTTLE,
|
JobThrottle.THROTTLE,
|
||||||
JobExecutionLimit.THROTTLE_WAIT,
|
JobThrottle.GROUP_THROTTLE,
|
||||||
JobExecutionLimit.THROTTLE_RATE_LIMIT,
|
JobThrottle.RATE_LIMIT,
|
||||||
JobExecutionLimit.GROUP_THROTTLE,
|
JobThrottle.GROUP_RATE_LIMIT,
|
||||||
JobExecutionLimit.GROUP_THROTTLE_WAIT,
|
|
||||||
JobExecutionLimit.GROUP_THROTTLE_RATE_LIMIT,
|
|
||||||
)
|
)
|
||||||
and self._throttle_period is None
|
and self._throttle_period is None
|
||||||
):
|
):
|
||||||
raise RuntimeError(
|
raise RuntimeError(
|
||||||
f"Job {name} is using execution limit {limit} without a throttle period!"
|
f"Job {self.name} is using throttle {self.throttle} without a throttle period!"
|
||||||
)
|
)
|
||||||
|
|
||||||
if self.limit in (
|
if self.throttle in (
|
||||||
JobExecutionLimit.THROTTLE_RATE_LIMIT,
|
JobThrottle.RATE_LIMIT,
|
||||||
JobExecutionLimit.GROUP_THROTTLE_RATE_LIMIT,
|
JobThrottle.GROUP_RATE_LIMIT,
|
||||||
):
|
):
|
||||||
if self.throttle_max_calls is None:
|
if self._throttle_max_calls is None:
|
||||||
raise RuntimeError(
|
raise RuntimeError(
|
||||||
f"Job {name} is using execution limit {limit} without throttle max calls!"
|
f"Job {self.name} is using throttle {self.throttle} without throttle max calls!"
|
||||||
)
|
)
|
||||||
|
|
||||||
self._rate_limited_calls = {}
|
self._rate_limited_calls = {}
|
||||||
|
|
||||||
|
if self.throttle is not None and self.concurrency in (
|
||||||
|
JobConcurrency.GROUP_REJECT,
|
||||||
|
JobConcurrency.GROUP_QUEUE,
|
||||||
|
):
|
||||||
|
# We cannot release group locks when Job is not running (e.g. throttled)
|
||||||
|
# which makes these combinations impossible to use currently.
|
||||||
|
raise RuntimeError(
|
||||||
|
f"Job {self.name} is using throttling ({self.throttle}) with group concurrency ({self.concurrency}), which is not allowed!"
|
||||||
|
)
|
||||||
|
|
||||||
|
@property
|
||||||
|
def throttle_max_calls(self) -> int:
|
||||||
|
"""Return max calls for throttle."""
|
||||||
|
if self._throttle_max_calls is None:
|
||||||
|
raise RuntimeError("No throttle max calls set for job!")
|
||||||
|
return self._throttle_max_calls
|
||||||
|
|
||||||
|
@property
|
||||||
|
def lock(self) -> asyncio.Semaphore:
|
||||||
|
"""Return lock for limits."""
|
||||||
|
# asyncio.Semaphore objects must be created in event loop
|
||||||
|
# Since this is sync code it is not safe to create if missing here
|
||||||
|
if not self._lock:
|
||||||
|
raise RuntimeError("Lock has not been created yet!")
|
||||||
|
return self._lock
|
||||||
|
|
||||||
def last_call(self, group_name: str | None = None) -> datetime:
|
def last_call(self, group_name: str | None = None) -> datetime:
|
||||||
"""Return last call datetime."""
|
"""Return last call datetime."""
|
||||||
return self._last_call.get(group_name, datetime.min)
|
return self._last_call.get(group_name, datetime.min)
|
||||||
@ -97,12 +188,12 @@ class Job(CoreSysAttributes):
|
|||||||
"""Set last call datetime."""
|
"""Set last call datetime."""
|
||||||
self._last_call[group_name] = value
|
self._last_call[group_name] = value
|
||||||
|
|
||||||
def rate_limited_calls(
|
def rate_limited_calls(self, group_name: str | None = None) -> list[datetime]:
|
||||||
self, group_name: str | None = None
|
|
||||||
) -> list[datetime] | None:
|
|
||||||
"""Return rate limited calls if used."""
|
"""Return rate limited calls if used."""
|
||||||
if self._rate_limited_calls is None:
|
if self._rate_limited_calls is None:
|
||||||
return None
|
raise RuntimeError(
|
||||||
|
"Rate limited calls not available for this throttle type"
|
||||||
|
)
|
||||||
|
|
||||||
return self._rate_limited_calls.get(group_name, [])
|
return self._rate_limited_calls.get(group_name, [])
|
||||||
|
|
||||||
@ -112,7 +203,7 @@ class Job(CoreSysAttributes):
|
|||||||
"""Add a rate limited call to list if used."""
|
"""Add a rate limited call to list if used."""
|
||||||
if self._rate_limited_calls is None:
|
if self._rate_limited_calls is None:
|
||||||
raise RuntimeError(
|
raise RuntimeError(
|
||||||
f"Rate limited calls not available for limit type {self.limit}"
|
"Rate limited calls not available for this throttle type"
|
||||||
)
|
)
|
||||||
|
|
||||||
if group_name in self._rate_limited_calls:
|
if group_name in self._rate_limited_calls:
|
||||||
@ -126,15 +217,15 @@ class Job(CoreSysAttributes):
|
|||||||
"""Set rate limited calls if used."""
|
"""Set rate limited calls if used."""
|
||||||
if self._rate_limited_calls is None:
|
if self._rate_limited_calls is None:
|
||||||
raise RuntimeError(
|
raise RuntimeError(
|
||||||
f"Rate limited calls not available for limit type {self.limit}"
|
"Rate limited calls not available for this throttle type"
|
||||||
)
|
)
|
||||||
|
|
||||||
self._rate_limited_calls[group_name] = value
|
self._rate_limited_calls[group_name] = value
|
||||||
|
|
||||||
def throttle_period(self, group_name: str | None = None) -> timedelta | None:
|
def throttle_period(self, group_name: str | None = None) -> timedelta:
|
||||||
"""Return throttle period."""
|
"""Return throttle period."""
|
||||||
if self._throttle_period is None:
|
if self._throttle_period is None:
|
||||||
return None
|
raise RuntimeError("No throttle period set for Job!")
|
||||||
|
|
||||||
if isinstance(self._throttle_period, timedelta):
|
if isinstance(self._throttle_period, timedelta):
|
||||||
return self._throttle_period
|
return self._throttle_period
|
||||||
@ -142,7 +233,7 @@ class Job(CoreSysAttributes):
|
|||||||
return self._throttle_period(
|
return self._throttle_period(
|
||||||
self.coresys,
|
self.coresys,
|
||||||
self.last_call(group_name),
|
self.last_call(group_name),
|
||||||
self.rate_limited_calls(group_name),
|
self.rate_limited_calls(group_name) if self._rate_limited_calls else None,
|
||||||
)
|
)
|
||||||
|
|
||||||
def _post_init(self, obj: JobGroup | CoreSysAttributes) -> JobGroup | None:
|
def _post_init(self, obj: JobGroup | CoreSysAttributes) -> JobGroup | None:
|
||||||
@ -158,23 +249,31 @@ class Job(CoreSysAttributes):
|
|||||||
self._lock = asyncio.Semaphore()
|
self._lock = asyncio.Semaphore()
|
||||||
|
|
||||||
# Job groups
|
# Job groups
|
||||||
try:
|
job_group: JobGroup | None = None
|
||||||
is_job_group = obj.acquire and obj.release
|
with suppress(AttributeError):
|
||||||
except AttributeError:
|
if obj.acquire and obj.release: # type: ignore
|
||||||
is_job_group = False
|
job_group = cast(JobGroup, obj)
|
||||||
|
|
||||||
if not is_job_group and self.limit in (
|
# Check for group-based parameters
|
||||||
JobExecutionLimit.GROUP_ONCE,
|
if not job_group:
|
||||||
JobExecutionLimit.GROUP_WAIT,
|
if self.concurrency in (
|
||||||
JobExecutionLimit.GROUP_THROTTLE,
|
JobConcurrency.GROUP_REJECT,
|
||||||
JobExecutionLimit.GROUP_THROTTLE_WAIT,
|
JobConcurrency.GROUP_QUEUE,
|
||||||
JobExecutionLimit.GROUP_THROTTLE_RATE_LIMIT,
|
):
|
||||||
):
|
raise RuntimeError(
|
||||||
raise RuntimeError(
|
f"Job {self.name} uses group concurrency ({self.concurrency}) but is not on a JobGroup! "
|
||||||
f"Job on {self.name} need to be a JobGroup to use group based limits!"
|
f"The class must inherit from JobGroup to use GROUP_REJECT or GROUP_QUEUE."
|
||||||
) from None
|
) from None
|
||||||
|
if self.throttle in (
|
||||||
|
JobThrottle.GROUP_THROTTLE,
|
||||||
|
JobThrottle.GROUP_RATE_LIMIT,
|
||||||
|
):
|
||||||
|
raise RuntimeError(
|
||||||
|
f"Job {self.name} uses group throttling ({self.throttle}) but is not on a JobGroup! "
|
||||||
|
f"The class must inherit from JobGroup to use GROUP_THROTTLE or GROUP_RATE_LIMIT."
|
||||||
|
) from None
|
||||||
|
|
||||||
return obj if is_job_group else None
|
return job_group
|
||||||
|
|
||||||
def _handle_job_condition_exception(self, err: JobConditionException) -> None:
|
def _handle_job_condition_exception(self, err: JobConditionException) -> None:
|
||||||
"""Handle a job condition failure."""
|
"""Handle a job condition failure."""
|
||||||
@ -184,9 +283,8 @@ class Job(CoreSysAttributes):
|
|||||||
return
|
return
|
||||||
raise self.on_condition(error_msg, _LOGGER.warning) from None
|
raise self.on_condition(error_msg, _LOGGER.warning) from None
|
||||||
|
|
||||||
def __call__(self, method):
|
def __call__(self, method: Callable[..., Awaitable]):
|
||||||
"""Call the wrapper logic."""
|
"""Call the wrapper logic."""
|
||||||
self._method = method
|
|
||||||
|
|
||||||
@wraps(method)
|
@wraps(method)
|
||||||
async def wrapper(
|
async def wrapper(
|
||||||
@ -221,87 +319,31 @@ class Job(CoreSysAttributes):
|
|||||||
if self.conditions:
|
if self.conditions:
|
||||||
try:
|
try:
|
||||||
await Job.check_conditions(
|
await Job.check_conditions(
|
||||||
self, set(self.conditions), self._method.__qualname__
|
self, set(self.conditions), method.__qualname__
|
||||||
)
|
)
|
||||||
except JobConditionException as err:
|
except JobConditionException as err:
|
||||||
return self._handle_job_condition_exception(err)
|
return self._handle_job_condition_exception(err)
|
||||||
|
|
||||||
# Handle exection limits
|
# Handle execution limits
|
||||||
if self.limit in (
|
await self._handle_concurrency_control(job_group, job)
|
||||||
JobExecutionLimit.SINGLE_WAIT,
|
try:
|
||||||
JobExecutionLimit.ONCE,
|
if not await self._handle_throttling(group_name):
|
||||||
):
|
self._release_concurrency_control(job_group)
|
||||||
await self._acquire_exection_limit()
|
return # Job was throttled, exit early
|
||||||
elif self.limit in (
|
except Exception:
|
||||||
JobExecutionLimit.GROUP_ONCE,
|
self._release_concurrency_control(job_group)
|
||||||
JobExecutionLimit.GROUP_WAIT,
|
raise
|
||||||
):
|
|
||||||
try:
|
|
||||||
await obj.acquire(
|
|
||||||
job, self.limit == JobExecutionLimit.GROUP_WAIT
|
|
||||||
)
|
|
||||||
except JobGroupExecutionLimitExceeded as err:
|
|
||||||
if self.on_condition:
|
|
||||||
raise self.on_condition(str(err)) from err
|
|
||||||
raise err
|
|
||||||
elif self.limit in (
|
|
||||||
JobExecutionLimit.THROTTLE,
|
|
||||||
JobExecutionLimit.GROUP_THROTTLE,
|
|
||||||
):
|
|
||||||
time_since_last_call = datetime.now() - self.last_call(group_name)
|
|
||||||
if time_since_last_call < self.throttle_period(group_name):
|
|
||||||
return
|
|
||||||
elif self.limit in (
|
|
||||||
JobExecutionLimit.THROTTLE_WAIT,
|
|
||||||
JobExecutionLimit.GROUP_THROTTLE_WAIT,
|
|
||||||
):
|
|
||||||
await self._acquire_exection_limit()
|
|
||||||
time_since_last_call = datetime.now() - self.last_call(group_name)
|
|
||||||
if time_since_last_call < self.throttle_period(group_name):
|
|
||||||
self._release_exception_limits()
|
|
||||||
return
|
|
||||||
elif self.limit in (
|
|
||||||
JobExecutionLimit.THROTTLE_RATE_LIMIT,
|
|
||||||
JobExecutionLimit.GROUP_THROTTLE_RATE_LIMIT,
|
|
||||||
):
|
|
||||||
# Only reprocess array when necessary (at limit)
|
|
||||||
if (
|
|
||||||
len(self.rate_limited_calls(group_name))
|
|
||||||
>= self.throttle_max_calls
|
|
||||||
):
|
|
||||||
self.set_rate_limited_calls(
|
|
||||||
[
|
|
||||||
call
|
|
||||||
for call in self.rate_limited_calls(group_name)
|
|
||||||
if call
|
|
||||||
> datetime.now() - self.throttle_period(group_name)
|
|
||||||
],
|
|
||||||
group_name,
|
|
||||||
)
|
|
||||||
|
|
||||||
if (
|
|
||||||
len(self.rate_limited_calls(group_name))
|
|
||||||
>= self.throttle_max_calls
|
|
||||||
):
|
|
||||||
on_condition = (
|
|
||||||
JobException
|
|
||||||
if self.on_condition is None
|
|
||||||
else self.on_condition
|
|
||||||
)
|
|
||||||
raise on_condition(
|
|
||||||
f"Rate limit exceeded, more than {self.throttle_max_calls} calls in {self.throttle_period(group_name)}",
|
|
||||||
)
|
|
||||||
|
|
||||||
# Execute Job
|
# Execute Job
|
||||||
with job.start():
|
with job.start():
|
||||||
try:
|
try:
|
||||||
self.set_last_call(datetime.now(), group_name)
|
self.set_last_call(datetime.now(), group_name)
|
||||||
if self.rate_limited_calls(group_name) is not None:
|
if self._rate_limited_calls is not None:
|
||||||
self.add_rate_limited_call(
|
self.add_rate_limited_call(
|
||||||
self.last_call(group_name), group_name
|
self.last_call(group_name), group_name
|
||||||
)
|
)
|
||||||
|
|
||||||
return await self._method(obj, *args, **kwargs)
|
return await method(obj, *args, **kwargs)
|
||||||
|
|
||||||
# If a method has a conditional JobCondition, they must check it in the method
|
# If a method has a conditional JobCondition, they must check it in the method
|
||||||
# These should be handled like normal JobConditions as much as possible
|
# These should be handled like normal JobConditions as much as possible
|
||||||
@ -316,12 +358,7 @@ class Job(CoreSysAttributes):
|
|||||||
await async_capture_exception(err)
|
await async_capture_exception(err)
|
||||||
raise JobException() from err
|
raise JobException() from err
|
||||||
finally:
|
finally:
|
||||||
self._release_exception_limits()
|
self._release_concurrency_control(job_group)
|
||||||
if self.limit in (
|
|
||||||
JobExecutionLimit.GROUP_ONCE,
|
|
||||||
JobExecutionLimit.GROUP_WAIT,
|
|
||||||
):
|
|
||||||
obj.release()
|
|
||||||
|
|
||||||
# Jobs that weren't started are always cleaned up. Also clean up done jobs if required
|
# Jobs that weren't started are always cleaned up. Also clean up done jobs if required
|
||||||
finally:
|
finally:
|
||||||
@ -463,31 +500,75 @@ class Job(CoreSysAttributes):
|
|||||||
f"'{method_name}' blocked from execution, mounting not supported on system"
|
f"'{method_name}' blocked from execution, mounting not supported on system"
|
||||||
)
|
)
|
||||||
|
|
||||||
async def _acquire_exection_limit(self) -> None:
|
def _release_concurrency_control(self, job_group: JobGroup | None) -> None:
|
||||||
"""Process exection limits."""
|
"""Release concurrency control locks."""
|
||||||
if self.limit not in (
|
if self.concurrency == JobConcurrency.REJECT:
|
||||||
JobExecutionLimit.SINGLE_WAIT,
|
if self.lock.locked():
|
||||||
JobExecutionLimit.ONCE,
|
self.lock.release()
|
||||||
JobExecutionLimit.THROTTLE_WAIT,
|
elif self.concurrency == JobConcurrency.QUEUE:
|
||||||
JobExecutionLimit.GROUP_THROTTLE_WAIT,
|
if self.lock.locked():
|
||||||
|
self.lock.release()
|
||||||
|
elif self.concurrency in (
|
||||||
|
JobConcurrency.GROUP_REJECT,
|
||||||
|
JobConcurrency.GROUP_QUEUE,
|
||||||
):
|
):
|
||||||
return
|
if job_group and job_group.has_lock:
|
||||||
|
job_group.release()
|
||||||
|
|
||||||
if self.limit == JobExecutionLimit.ONCE and self._lock.locked():
|
async def _handle_concurrency_control(
|
||||||
on_condition = (
|
self, job_group: JobGroup | None, job: SupervisorJob
|
||||||
JobException if self.on_condition is None else self.on_condition
|
) -> None:
|
||||||
)
|
"""Handle concurrency control limits."""
|
||||||
raise on_condition("Another job is running")
|
if self.concurrency == JobConcurrency.REJECT:
|
||||||
|
if self.lock.locked():
|
||||||
|
on_condition = (
|
||||||
|
JobException if self.on_condition is None else self.on_condition
|
||||||
|
)
|
||||||
|
raise on_condition("Another job is running")
|
||||||
|
await self.lock.acquire()
|
||||||
|
elif self.concurrency == JobConcurrency.QUEUE:
|
||||||
|
await self.lock.acquire()
|
||||||
|
elif self.concurrency == JobConcurrency.GROUP_REJECT:
|
||||||
|
try:
|
||||||
|
await cast(JobGroup, job_group).acquire(job, wait=False)
|
||||||
|
except JobGroupExecutionLimitExceeded as err:
|
||||||
|
if self.on_condition:
|
||||||
|
raise self.on_condition(str(err)) from err
|
||||||
|
raise err
|
||||||
|
elif self.concurrency == JobConcurrency.GROUP_QUEUE:
|
||||||
|
try:
|
||||||
|
await cast(JobGroup, job_group).acquire(job, wait=True)
|
||||||
|
except JobGroupExecutionLimitExceeded as err:
|
||||||
|
if self.on_condition:
|
||||||
|
raise self.on_condition(str(err)) from err
|
||||||
|
raise err
|
||||||
|
|
||||||
await self._lock.acquire()
|
async def _handle_throttling(self, group_name: str | None) -> bool:
|
||||||
|
"""Handle throttling limits. Returns True if job should continue, False if throttled."""
|
||||||
|
if self.throttle in (JobThrottle.THROTTLE, JobThrottle.GROUP_THROTTLE):
|
||||||
|
time_since_last_call = datetime.now() - self.last_call(group_name)
|
||||||
|
throttle_period = self.throttle_period(group_name)
|
||||||
|
if time_since_last_call < throttle_period:
|
||||||
|
# Always return False when throttled (skip execution)
|
||||||
|
return False
|
||||||
|
elif self.throttle in (JobThrottle.RATE_LIMIT, JobThrottle.GROUP_RATE_LIMIT):
|
||||||
|
# Only reprocess array when necessary (at limit)
|
||||||
|
if len(self.rate_limited_calls(group_name)) >= self.throttle_max_calls:
|
||||||
|
self.set_rate_limited_calls(
|
||||||
|
[
|
||||||
|
call
|
||||||
|
for call in self.rate_limited_calls(group_name)
|
||||||
|
if call > datetime.now() - self.throttle_period(group_name)
|
||||||
|
],
|
||||||
|
group_name,
|
||||||
|
)
|
||||||
|
|
||||||
def _release_exception_limits(self) -> None:
|
if len(self.rate_limited_calls(group_name)) >= self.throttle_max_calls:
|
||||||
"""Release possible exception limits."""
|
on_condition = (
|
||||||
if self.limit not in (
|
JobException if self.on_condition is None else self.on_condition
|
||||||
JobExecutionLimit.SINGLE_WAIT,
|
)
|
||||||
JobExecutionLimit.ONCE,
|
raise on_condition(
|
||||||
JobExecutionLimit.THROTTLE_WAIT,
|
f"Rate limit exceeded, more than {self.throttle_max_calls} calls in {self.throttle_period(group_name)}",
|
||||||
JobExecutionLimit.GROUP_THROTTLE_WAIT,
|
)
|
||||||
):
|
|
||||||
return
|
return True
|
||||||
self._lock.release()
|
|
||||||
|
@ -41,7 +41,7 @@ class JobGroup(CoreSysAttributes):
|
|||||||
def has_lock(self) -> bool:
|
def has_lock(self) -> bool:
|
||||||
"""Return true if current task has the lock on this job group."""
|
"""Return true if current task has the lock on this job group."""
|
||||||
return (
|
return (
|
||||||
self.active_job
|
self.active_job is not None
|
||||||
and self.sys_jobs.is_job
|
and self.sys_jobs.is_job
|
||||||
and self.active_job == self.sys_jobs.current
|
and self.active_job == self.sys_jobs.current
|
||||||
)
|
)
|
||||||
|
@ -9,7 +9,7 @@ from aiohttp import hdrs
|
|||||||
import attr
|
import attr
|
||||||
from sentry_sdk.types import Event, Hint
|
from sentry_sdk.types import Event, Hint
|
||||||
|
|
||||||
from ..const import DOCKER_NETWORK_MASK, HEADER_TOKEN, HEADER_TOKEN_OLD, CoreState
|
from ..const import DOCKER_IPV4_NETWORK_MASK, HEADER_TOKEN, HEADER_TOKEN_OLD, CoreState
|
||||||
from ..coresys import CoreSys
|
from ..coresys import CoreSys
|
||||||
from ..exceptions import AddonConfigurationError
|
from ..exceptions import AddonConfigurationError
|
||||||
|
|
||||||
@ -21,7 +21,7 @@ def sanitize_host(host: str) -> str:
|
|||||||
try:
|
try:
|
||||||
# Allow internal URLs
|
# Allow internal URLs
|
||||||
ip = ipaddress.ip_address(host)
|
ip = ipaddress.ip_address(host)
|
||||||
if ip in ipaddress.ip_network(DOCKER_NETWORK_MASK):
|
if ip in ipaddress.ip_network(DOCKER_IPV4_NETWORK_MASK):
|
||||||
return host
|
return host
|
||||||
except ValueError:
|
except ValueError:
|
||||||
pass
|
pass
|
||||||
|
@ -1,13 +1,12 @@
|
|||||||
"""A collection of tasks."""
|
"""A collection of tasks."""
|
||||||
|
|
||||||
import asyncio
|
|
||||||
from collections.abc import Awaitable
|
|
||||||
from datetime import datetime, timedelta
|
from datetime import datetime, timedelta
|
||||||
import logging
|
import logging
|
||||||
|
from typing import cast
|
||||||
|
|
||||||
from ..addons.const import ADDON_UPDATE_CONDITIONS
|
from ..addons.const import ADDON_UPDATE_CONDITIONS
|
||||||
from ..backups.const import LOCATION_CLOUD_BACKUP
|
from ..backups.const import LOCATION_CLOUD_BACKUP, LOCATION_TYPE
|
||||||
from ..const import AddonState
|
from ..const import ATTR_TYPE, AddonState
|
||||||
from ..coresys import CoreSysAttributes
|
from ..coresys import CoreSysAttributes
|
||||||
from ..exceptions import (
|
from ..exceptions import (
|
||||||
AddonsError,
|
AddonsError,
|
||||||
@ -15,7 +14,7 @@ from ..exceptions import (
|
|||||||
HomeAssistantError,
|
HomeAssistantError,
|
||||||
ObserverError,
|
ObserverError,
|
||||||
)
|
)
|
||||||
from ..homeassistant.const import LANDINGPAGE
|
from ..homeassistant.const import LANDINGPAGE, WSType
|
||||||
from ..jobs.decorator import Job, JobCondition, JobExecutionLimit
|
from ..jobs.decorator import Job, JobCondition, JobExecutionLimit
|
||||||
from ..plugins.const import PLUGIN_UPDATE_CONDITIONS
|
from ..plugins.const import PLUGIN_UPDATE_CONDITIONS
|
||||||
from ..utils.dt import utcnow
|
from ..utils.dt import utcnow
|
||||||
@ -106,7 +105,6 @@ class Tasks(CoreSysAttributes):
|
|||||||
)
|
)
|
||||||
async def _update_addons(self):
|
async def _update_addons(self):
|
||||||
"""Check if an update is available for an Add-on and update it."""
|
"""Check if an update is available for an Add-on and update it."""
|
||||||
start_tasks: list[Awaitable[None]] = []
|
|
||||||
for addon in self.sys_addons.all:
|
for addon in self.sys_addons.all:
|
||||||
if not addon.is_installed or not addon.auto_update:
|
if not addon.is_installed or not addon.auto_update:
|
||||||
continue
|
continue
|
||||||
@ -124,6 +122,12 @@ class Tasks(CoreSysAttributes):
|
|||||||
continue
|
continue
|
||||||
# Delay auto-updates for a day in case of issues
|
# Delay auto-updates for a day in case of issues
|
||||||
if utcnow() < addon.latest_version_timestamp + timedelta(days=1):
|
if utcnow() < addon.latest_version_timestamp + timedelta(days=1):
|
||||||
|
_LOGGER.debug(
|
||||||
|
"Not updating add-on %s from %s to %s as the latest version is less than a day old",
|
||||||
|
addon.slug,
|
||||||
|
addon.version,
|
||||||
|
addon.latest_version,
|
||||||
|
)
|
||||||
continue
|
continue
|
||||||
if not addon.test_update_schema():
|
if not addon.test_update_schema():
|
||||||
_LOGGER.warning(
|
_LOGGER.warning(
|
||||||
@ -131,16 +135,21 @@ class Tasks(CoreSysAttributes):
|
|||||||
)
|
)
|
||||||
continue
|
continue
|
||||||
|
|
||||||
# Run Add-on update sequential
|
|
||||||
# avoid issue on slow IO
|
|
||||||
_LOGGER.info("Add-on auto update process %s", addon.slug)
|
_LOGGER.info("Add-on auto update process %s", addon.slug)
|
||||||
try:
|
# Call Home Assistant Core to update add-on to make sure that backups
|
||||||
if start_task := await self.sys_addons.update(addon.slug, backup=True):
|
# get created through the Home Assistant Core API (categorized correctly).
|
||||||
start_tasks.append(start_task)
|
# Ultimately auto updates should be handled by Home Assistant Core itself
|
||||||
except AddonsError:
|
# through a update entity feature.
|
||||||
_LOGGER.error("Can't auto update Add-on %s", addon.slug)
|
message = {
|
||||||
|
ATTR_TYPE: WSType.HASSIO_UPDATE_ADDON,
|
||||||
await asyncio.gather(*start_tasks)
|
"addon": addon.slug,
|
||||||
|
"backup": True,
|
||||||
|
}
|
||||||
|
_LOGGER.debug(
|
||||||
|
"Sending update add-on WebSocket command to Home Assistant Core: %s",
|
||||||
|
message,
|
||||||
|
)
|
||||||
|
await self.sys_homeassistant.websocket.async_send_command(message)
|
||||||
|
|
||||||
@Job(
|
@Job(
|
||||||
name="tasks_update_supervisor",
|
name="tasks_update_supervisor",
|
||||||
@ -370,6 +379,8 @@ class Tasks(CoreSysAttributes):
|
|||||||
]
|
]
|
||||||
for backup in old_backups:
|
for backup in old_backups:
|
||||||
try:
|
try:
|
||||||
await self.sys_backups.remove(backup, [LOCATION_CLOUD_BACKUP])
|
await self.sys_backups.remove(
|
||||||
|
backup, [cast(LOCATION_TYPE, LOCATION_CLOUD_BACKUP)]
|
||||||
|
)
|
||||||
except BackupFileNotFoundError as err:
|
except BackupFileNotFoundError as err:
|
||||||
_LOGGER.debug("Can't remove backup %s: %s", backup.slug, err)
|
_LOGGER.debug("Can't remove backup %s: %s", backup.slug, err)
|
||||||
|
@ -56,7 +56,7 @@ class MountManager(FileConfiguration, CoreSysAttributes):
|
|||||||
async def load_config(self) -> Self:
|
async def load_config(self) -> Self:
|
||||||
"""Load config in executor."""
|
"""Load config in executor."""
|
||||||
await super().load_config()
|
await super().load_config()
|
||||||
self._mounts: dict[str, Mount] = {
|
self._mounts = {
|
||||||
mount[ATTR_NAME]: Mount.from_dict(self.coresys, mount)
|
mount[ATTR_NAME]: Mount.from_dict(self.coresys, mount)
|
||||||
for mount in self._data[ATTR_MOUNTS]
|
for mount in self._data[ATTR_MOUNTS]
|
||||||
}
|
}
|
||||||
@ -172,12 +172,12 @@ class MountManager(FileConfiguration, CoreSysAttributes):
|
|||||||
errors = await asyncio.gather(*mount_tasks, return_exceptions=True)
|
errors = await asyncio.gather(*mount_tasks, return_exceptions=True)
|
||||||
|
|
||||||
for i in range(len(errors)): # pylint: disable=consider-using-enumerate
|
for i in range(len(errors)): # pylint: disable=consider-using-enumerate
|
||||||
if not errors[i]:
|
if not (err := errors[i]):
|
||||||
continue
|
continue
|
||||||
if mounts[i].failed_issue in self.sys_resolution.issues:
|
if mounts[i].failed_issue in self.sys_resolution.issues:
|
||||||
continue
|
continue
|
||||||
if not isinstance(errors[i], MountError):
|
if not isinstance(err, MountError):
|
||||||
await async_capture_exception(errors[i])
|
await async_capture_exception(err)
|
||||||
|
|
||||||
self.sys_resolution.add_issue(
|
self.sys_resolution.add_issue(
|
||||||
evolve(mounts[i].failed_issue),
|
evolve(mounts[i].failed_issue),
|
||||||
@ -219,7 +219,7 @@ class MountManager(FileConfiguration, CoreSysAttributes):
|
|||||||
conditions=[JobCondition.MOUNT_AVAILABLE],
|
conditions=[JobCondition.MOUNT_AVAILABLE],
|
||||||
on_condition=MountJobError,
|
on_condition=MountJobError,
|
||||||
)
|
)
|
||||||
async def remove_mount(self, name: str, *, retain_entry: bool = False) -> None:
|
async def remove_mount(self, name: str, *, retain_entry: bool = False) -> Mount:
|
||||||
"""Remove a mount."""
|
"""Remove a mount."""
|
||||||
# Add mount name to job
|
# Add mount name to job
|
||||||
self.sys_jobs.current.reference = name
|
self.sys_jobs.current.reference = name
|
||||||
|
@ -2,6 +2,7 @@
|
|||||||
|
|
||||||
from abc import ABC, abstractmethod
|
from abc import ABC, abstractmethod
|
||||||
import asyncio
|
import asyncio
|
||||||
|
from collections.abc import Callable
|
||||||
from functools import cached_property
|
from functools import cached_property
|
||||||
import logging
|
import logging
|
||||||
from pathlib import Path, PurePath
|
from pathlib import Path, PurePath
|
||||||
@ -9,14 +10,6 @@ from pathlib import Path, PurePath
|
|||||||
from dbus_fast import Variant
|
from dbus_fast import Variant
|
||||||
from voluptuous import Coerce
|
from voluptuous import Coerce
|
||||||
|
|
||||||
from ..const import (
|
|
||||||
ATTR_NAME,
|
|
||||||
ATTR_PASSWORD,
|
|
||||||
ATTR_PORT,
|
|
||||||
ATTR_TYPE,
|
|
||||||
ATTR_USERNAME,
|
|
||||||
ATTR_VERSION,
|
|
||||||
)
|
|
||||||
from ..coresys import CoreSys, CoreSysAttributes
|
from ..coresys import CoreSys, CoreSysAttributes
|
||||||
from ..dbus.const import (
|
from ..dbus.const import (
|
||||||
DBUS_ATTR_ACTIVE_STATE,
|
DBUS_ATTR_ACTIVE_STATE,
|
||||||
@ -41,22 +34,13 @@ from ..exceptions import (
|
|||||||
from ..resolution.const import ContextType, IssueType
|
from ..resolution.const import ContextType, IssueType
|
||||||
from ..resolution.data import Issue
|
from ..resolution.data import Issue
|
||||||
from ..utils.sentry import async_capture_exception
|
from ..utils.sentry import async_capture_exception
|
||||||
from .const import (
|
from .const import MountCifsVersion, MountType, MountUsage
|
||||||
ATTR_PATH,
|
|
||||||
ATTR_READ_ONLY,
|
|
||||||
ATTR_SERVER,
|
|
||||||
ATTR_SHARE,
|
|
||||||
ATTR_USAGE,
|
|
||||||
MountCifsVersion,
|
|
||||||
MountType,
|
|
||||||
MountUsage,
|
|
||||||
)
|
|
||||||
from .validate import MountData
|
from .validate import MountData
|
||||||
|
|
||||||
_LOGGER: logging.Logger = logging.getLogger(__name__)
|
_LOGGER: logging.Logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
COERCE_MOUNT_TYPE = Coerce(MountType)
|
COERCE_MOUNT_TYPE: Callable[[str], MountType] = Coerce(MountType)
|
||||||
COERCE_MOUNT_USAGE = Coerce(MountUsage)
|
COERCE_MOUNT_USAGE: Callable[[str], MountUsage] = Coerce(MountUsage)
|
||||||
|
|
||||||
|
|
||||||
class Mount(CoreSysAttributes, ABC):
|
class Mount(CoreSysAttributes, ABC):
|
||||||
@ -80,7 +64,7 @@ class Mount(CoreSysAttributes, ABC):
|
|||||||
if cls not in [Mount, NetworkMount]:
|
if cls not in [Mount, NetworkMount]:
|
||||||
return cls(coresys, data)
|
return cls(coresys, data)
|
||||||
|
|
||||||
type_ = COERCE_MOUNT_TYPE(data[ATTR_TYPE])
|
type_ = COERCE_MOUNT_TYPE(data["type"])
|
||||||
if type_ == MountType.CIFS:
|
if type_ == MountType.CIFS:
|
||||||
return CIFSMount(coresys, data)
|
return CIFSMount(coresys, data)
|
||||||
if type_ == MountType.NFS:
|
if type_ == MountType.NFS:
|
||||||
@ -90,32 +74,33 @@ class Mount(CoreSysAttributes, ABC):
|
|||||||
def to_dict(self, *, skip_secrets: bool = True) -> MountData:
|
def to_dict(self, *, skip_secrets: bool = True) -> MountData:
|
||||||
"""Return dictionary representation."""
|
"""Return dictionary representation."""
|
||||||
return MountData(
|
return MountData(
|
||||||
name=self.name, type=self.type, usage=self.usage, read_only=self.read_only
|
name=self.name,
|
||||||
|
type=self.type,
|
||||||
|
usage=self.usage and self.usage.value,
|
||||||
|
read_only=self.read_only,
|
||||||
)
|
)
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def name(self) -> str:
|
def name(self) -> str:
|
||||||
"""Get name."""
|
"""Get name."""
|
||||||
return self._data[ATTR_NAME]
|
return self._data["name"]
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def type(self) -> MountType:
|
def type(self) -> MountType:
|
||||||
"""Get mount type."""
|
"""Get mount type."""
|
||||||
return COERCE_MOUNT_TYPE(self._data[ATTR_TYPE])
|
return COERCE_MOUNT_TYPE(self._data["type"])
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def usage(self) -> MountUsage | None:
|
def usage(self) -> MountUsage | None:
|
||||||
"""Get mount usage."""
|
"""Get mount usage."""
|
||||||
return (
|
if self._data["usage"] is None:
|
||||||
COERCE_MOUNT_USAGE(self._data[ATTR_USAGE])
|
return None
|
||||||
if ATTR_USAGE in self._data
|
return COERCE_MOUNT_USAGE(self._data["usage"])
|
||||||
else None
|
|
||||||
)
|
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def read_only(self) -> bool:
|
def read_only(self) -> bool:
|
||||||
"""Is mount read-only."""
|
"""Is mount read-only."""
|
||||||
return self._data.get(ATTR_READ_ONLY, False)
|
return self._data.get("read_only", False)
|
||||||
|
|
||||||
@property
|
@property
|
||||||
@abstractmethod
|
@abstractmethod
|
||||||
@ -186,20 +171,20 @@ class Mount(CoreSysAttributes, ABC):
|
|||||||
async def load(self) -> None:
|
async def load(self) -> None:
|
||||||
"""Initialize object."""
|
"""Initialize object."""
|
||||||
# If there's no mount unit, mount it to make one
|
# If there's no mount unit, mount it to make one
|
||||||
if not await self._update_unit():
|
if not (unit := await self._update_unit()):
|
||||||
await self.mount()
|
await self.mount()
|
||||||
return
|
return
|
||||||
|
|
||||||
await self._update_state_await(not_state=UnitActiveState.ACTIVATING)
|
await self._update_state_await(unit, not_state=UnitActiveState.ACTIVATING)
|
||||||
|
|
||||||
# If mount is not available, try to reload it
|
# If mount is not available, try to reload it
|
||||||
if not await self.is_mounted():
|
if not await self.is_mounted():
|
||||||
await self.reload()
|
await self.reload()
|
||||||
|
|
||||||
async def _update_state(self) -> UnitActiveState | None:
|
async def _update_state(self, unit: SystemdUnit) -> None:
|
||||||
"""Update mount unit state."""
|
"""Update mount unit state."""
|
||||||
try:
|
try:
|
||||||
self._state = await self.unit.get_active_state()
|
self._state = await unit.get_active_state()
|
||||||
except DBusError as err:
|
except DBusError as err:
|
||||||
await async_capture_exception(err)
|
await async_capture_exception(err)
|
||||||
raise MountError(
|
raise MountError(
|
||||||
@ -220,10 +205,10 @@ class Mount(CoreSysAttributes, ABC):
|
|||||||
|
|
||||||
async def update(self) -> bool:
|
async def update(self) -> bool:
|
||||||
"""Update info about mount from dbus. Return true if it is mounted and available."""
|
"""Update info about mount from dbus. Return true if it is mounted and available."""
|
||||||
if not await self._update_unit():
|
if not (unit := await self._update_unit()):
|
||||||
return False
|
return False
|
||||||
|
|
||||||
await self._update_state()
|
await self._update_state(unit)
|
||||||
|
|
||||||
# If active, dismiss corresponding failed mount issue if found
|
# If active, dismiss corresponding failed mount issue if found
|
||||||
if (
|
if (
|
||||||
@ -235,16 +220,14 @@ class Mount(CoreSysAttributes, ABC):
|
|||||||
|
|
||||||
async def _update_state_await(
|
async def _update_state_await(
|
||||||
self,
|
self,
|
||||||
|
unit: SystemdUnit,
|
||||||
expected_states: list[UnitActiveState] | None = None,
|
expected_states: list[UnitActiveState] | None = None,
|
||||||
not_state: UnitActiveState = UnitActiveState.ACTIVATING,
|
not_state: UnitActiveState = UnitActiveState.ACTIVATING,
|
||||||
) -> None:
|
) -> None:
|
||||||
"""Update state info about mount from dbus. Wait for one of expected_states to appear or state to change from not_state."""
|
"""Update state info about mount from dbus. Wait for one of expected_states to appear or state to change from not_state."""
|
||||||
if not self.unit:
|
|
||||||
return
|
|
||||||
|
|
||||||
try:
|
try:
|
||||||
async with asyncio.timeout(30), self.unit.properties_changed() as signal:
|
async with asyncio.timeout(30), unit.properties_changed() as signal:
|
||||||
await self._update_state()
|
await self._update_state(unit)
|
||||||
while (
|
while (
|
||||||
expected_states
|
expected_states
|
||||||
and self.state not in expected_states
|
and self.state not in expected_states
|
||||||
@ -312,8 +295,8 @@ class Mount(CoreSysAttributes, ABC):
|
|||||||
f"Could not mount {self.name} due to: {err!s}", _LOGGER.error
|
f"Could not mount {self.name} due to: {err!s}", _LOGGER.error
|
||||||
) from err
|
) from err
|
||||||
|
|
||||||
if await self._update_unit():
|
if unit := await self._update_unit():
|
||||||
await self._update_state_await(not_state=UnitActiveState.ACTIVATING)
|
await self._update_state_await(unit, not_state=UnitActiveState.ACTIVATING)
|
||||||
|
|
||||||
if not await self.is_mounted():
|
if not await self.is_mounted():
|
||||||
raise MountActivationError(
|
raise MountActivationError(
|
||||||
@ -323,17 +306,17 @@ class Mount(CoreSysAttributes, ABC):
|
|||||||
|
|
||||||
async def unmount(self) -> None:
|
async def unmount(self) -> None:
|
||||||
"""Unmount using systemd."""
|
"""Unmount using systemd."""
|
||||||
if not await self._update_unit():
|
if not (unit := await self._update_unit()):
|
||||||
_LOGGER.info("Mount %s is not mounted, skipping unmount", self.name)
|
_LOGGER.info("Mount %s is not mounted, skipping unmount", self.name)
|
||||||
return
|
return
|
||||||
|
|
||||||
await self._update_state()
|
await self._update_state(unit)
|
||||||
try:
|
try:
|
||||||
if self.state != UnitActiveState.FAILED:
|
if self.state != UnitActiveState.FAILED:
|
||||||
await self.sys_dbus.systemd.stop_unit(self.unit_name, StopUnitMode.FAIL)
|
await self.sys_dbus.systemd.stop_unit(self.unit_name, StopUnitMode.FAIL)
|
||||||
|
|
||||||
await self._update_state_await(
|
await self._update_state_await(
|
||||||
[UnitActiveState.INACTIVE, UnitActiveState.FAILED]
|
unit, [UnitActiveState.INACTIVE, UnitActiveState.FAILED]
|
||||||
)
|
)
|
||||||
|
|
||||||
if self.state == UnitActiveState.FAILED:
|
if self.state == UnitActiveState.FAILED:
|
||||||
@ -360,8 +343,10 @@ class Mount(CoreSysAttributes, ABC):
|
|||||||
f"Could not reload mount {self.name} due to: {err!s}", _LOGGER.error
|
f"Could not reload mount {self.name} due to: {err!s}", _LOGGER.error
|
||||||
) from err
|
) from err
|
||||||
else:
|
else:
|
||||||
if await self._update_unit():
|
if unit := await self._update_unit():
|
||||||
await self._update_state_await(not_state=UnitActiveState.ACTIVATING)
|
await self._update_state_await(
|
||||||
|
unit, not_state=UnitActiveState.ACTIVATING
|
||||||
|
)
|
||||||
|
|
||||||
if not await self.is_mounted():
|
if not await self.is_mounted():
|
||||||
raise MountActivationError(
|
raise MountActivationError(
|
||||||
@ -381,18 +366,18 @@ class NetworkMount(Mount, ABC):
|
|||||||
"""Return dictionary representation."""
|
"""Return dictionary representation."""
|
||||||
out = MountData(server=self.server, **super().to_dict())
|
out = MountData(server=self.server, **super().to_dict())
|
||||||
if self.port is not None:
|
if self.port is not None:
|
||||||
out[ATTR_PORT] = self.port
|
out["port"] = self.port
|
||||||
return out
|
return out
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def server(self) -> str:
|
def server(self) -> str:
|
||||||
"""Get server."""
|
"""Get server."""
|
||||||
return self._data[ATTR_SERVER]
|
return self._data["server"]
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def port(self) -> int | None:
|
def port(self) -> int | None:
|
||||||
"""Get port, returns none if using the protocol default."""
|
"""Get port, returns none if using the protocol default."""
|
||||||
return self._data.get(ATTR_PORT)
|
return self._data.get("port")
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def where(self) -> PurePath:
|
def where(self) -> PurePath:
|
||||||
@ -420,31 +405,31 @@ class CIFSMount(NetworkMount):
|
|||||||
def to_dict(self, *, skip_secrets: bool = True) -> MountData:
|
def to_dict(self, *, skip_secrets: bool = True) -> MountData:
|
||||||
"""Return dictionary representation."""
|
"""Return dictionary representation."""
|
||||||
out = MountData(share=self.share, **super().to_dict())
|
out = MountData(share=self.share, **super().to_dict())
|
||||||
if not skip_secrets and self.username is not None:
|
if not skip_secrets and self.username is not None and self.password is not None:
|
||||||
out[ATTR_USERNAME] = self.username
|
out["username"] = self.username
|
||||||
out[ATTR_PASSWORD] = self.password
|
out["password"] = self.password
|
||||||
out[ATTR_VERSION] = self.version
|
out["version"] = self.version
|
||||||
return out
|
return out
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def share(self) -> str:
|
def share(self) -> str:
|
||||||
"""Get share."""
|
"""Get share."""
|
||||||
return self._data[ATTR_SHARE]
|
return self._data["share"]
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def username(self) -> str | None:
|
def username(self) -> str | None:
|
||||||
"""Get username, returns none if auth is not used."""
|
"""Get username, returns none if auth is not used."""
|
||||||
return self._data.get(ATTR_USERNAME)
|
return self._data.get("username")
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def password(self) -> str | None:
|
def password(self) -> str | None:
|
||||||
"""Get password, returns none if auth is not used."""
|
"""Get password, returns none if auth is not used."""
|
||||||
return self._data.get(ATTR_PASSWORD)
|
return self._data.get("password")
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def version(self) -> str | None:
|
def version(self) -> str | None:
|
||||||
"""Get password, returns none if auth is not used."""
|
"""Get cifs version, returns none if using default."""
|
||||||
version = self._data.get(ATTR_VERSION)
|
version = self._data.get("version")
|
||||||
if version == MountCifsVersion.LEGACY_1_0:
|
if version == MountCifsVersion.LEGACY_1_0:
|
||||||
return "1.0"
|
return "1.0"
|
||||||
if version == MountCifsVersion.LEGACY_2_0:
|
if version == MountCifsVersion.LEGACY_2_0:
|
||||||
@ -513,7 +498,7 @@ class NFSMount(NetworkMount):
|
|||||||
@property
|
@property
|
||||||
def path(self) -> PurePath:
|
def path(self) -> PurePath:
|
||||||
"""Get path."""
|
"""Get path."""
|
||||||
return PurePath(self._data[ATTR_PATH])
|
return PurePath(self._data["path"])
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def what(self) -> str:
|
def what(self) -> str:
|
||||||
@ -543,7 +528,7 @@ class BindMount(Mount):
|
|||||||
def create(
|
def create(
|
||||||
coresys: CoreSys,
|
coresys: CoreSys,
|
||||||
name: str,
|
name: str,
|
||||||
path: Path,
|
path: PurePath,
|
||||||
usage: MountUsage | None = None,
|
usage: MountUsage | None = None,
|
||||||
where: PurePath | None = None,
|
where: PurePath | None = None,
|
||||||
read_only: bool = False,
|
read_only: bool = False,
|
||||||
@ -568,7 +553,7 @@ class BindMount(Mount):
|
|||||||
@property
|
@property
|
||||||
def path(self) -> PurePath:
|
def path(self) -> PurePath:
|
||||||
"""Get path."""
|
"""Get path."""
|
||||||
return PurePath(self._data[ATTR_PATH])
|
return PurePath(self._data["path"])
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def what(self) -> str:
|
def what(self) -> str:
|
||||||
|
@ -103,7 +103,7 @@ class MountData(TypedDict):
|
|||||||
name: str
|
name: str
|
||||||
type: str
|
type: str
|
||||||
read_only: bool
|
read_only: bool
|
||||||
usage: NotRequired[str]
|
usage: str | None
|
||||||
|
|
||||||
# CIFS and NFS fields
|
# CIFS and NFS fields
|
||||||
server: NotRequired[str]
|
server: NotRequired[str]
|
||||||
@ -113,6 +113,7 @@ class MountData(TypedDict):
|
|||||||
share: NotRequired[str]
|
share: NotRequired[str]
|
||||||
username: NotRequired[str]
|
username: NotRequired[str]
|
||||||
password: NotRequired[str]
|
password: NotRequired[str]
|
||||||
|
version: NotRequired[str | None]
|
||||||
|
|
||||||
# NFS and Bind fields
|
# NFS and Bind fields
|
||||||
path: NotRequired[str]
|
path: NotRequired[str]
|
||||||
|
@ -5,7 +5,7 @@ from contextlib import suppress
|
|||||||
from dataclasses import dataclass
|
from dataclasses import dataclass
|
||||||
import logging
|
import logging
|
||||||
from pathlib import Path
|
from pathlib import Path
|
||||||
from typing import Any, Final
|
from typing import Any, Final, cast
|
||||||
|
|
||||||
from awesomeversion import AwesomeVersion
|
from awesomeversion import AwesomeVersion
|
||||||
|
|
||||||
@ -24,6 +24,7 @@ from ..exceptions import (
|
|||||||
)
|
)
|
||||||
from ..jobs.const import JobCondition, JobExecutionLimit
|
from ..jobs.const import JobCondition, JobExecutionLimit
|
||||||
from ..jobs.decorator import Job
|
from ..jobs.decorator import Job
|
||||||
|
from ..resolution.checks.base import CheckBase
|
||||||
from ..resolution.checks.disabled_data_disk import CheckDisabledDataDisk
|
from ..resolution.checks.disabled_data_disk import CheckDisabledDataDisk
|
||||||
from ..resolution.checks.multiple_data_disks import CheckMultipleDataDisks
|
from ..resolution.checks.multiple_data_disks import CheckMultipleDataDisks
|
||||||
from ..utils.sentry import async_capture_exception
|
from ..utils.sentry import async_capture_exception
|
||||||
@ -149,7 +150,7 @@ class DataDisk(CoreSysAttributes):
|
|||||||
Available disks are drives where nothing on it has been mounted
|
Available disks are drives where nothing on it has been mounted
|
||||||
and it can be formatted.
|
and it can be formatted.
|
||||||
"""
|
"""
|
||||||
available: list[UDisks2Drive] = []
|
available: list[Disk] = []
|
||||||
for drive in self.sys_dbus.udisks2.drives:
|
for drive in self.sys_dbus.udisks2.drives:
|
||||||
block_devices = self._get_block_devices_for_drive(drive)
|
block_devices = self._get_block_devices_for_drive(drive)
|
||||||
primary = _get_primary_block_device(block_devices)
|
primary = _get_primary_block_device(block_devices)
|
||||||
@ -166,12 +167,16 @@ class DataDisk(CoreSysAttributes):
|
|||||||
@property
|
@property
|
||||||
def check_multiple_data_disks(self) -> CheckMultipleDataDisks:
|
def check_multiple_data_disks(self) -> CheckMultipleDataDisks:
|
||||||
"""Resolution center check for multiple data disks."""
|
"""Resolution center check for multiple data disks."""
|
||||||
return self.sys_resolution.check.get("multiple_data_disks")
|
return cast(
|
||||||
|
CheckMultipleDataDisks, self.sys_resolution.check.get("multiple_data_disks")
|
||||||
|
)
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def check_disabled_data_disk(self) -> CheckDisabledDataDisk:
|
def check_disabled_data_disk(self) -> CheckDisabledDataDisk:
|
||||||
"""Resolution center check for disabled data disk."""
|
"""Resolution center check for disabled data disk."""
|
||||||
return self.sys_resolution.check.get("disabled_data_disk")
|
return cast(
|
||||||
|
CheckDisabledDataDisk, self.sys_resolution.check.get("disabled_data_disk")
|
||||||
|
)
|
||||||
|
|
||||||
def _get_block_devices_for_drive(self, drive: UDisks2Drive) -> list[UDisks2Block]:
|
def _get_block_devices_for_drive(self, drive: UDisks2Drive) -> list[UDisks2Block]:
|
||||||
"""Get block devices for a drive."""
|
"""Get block devices for a drive."""
|
||||||
@ -361,7 +366,7 @@ class DataDisk(CoreSysAttributes):
|
|||||||
|
|
||||||
try:
|
try:
|
||||||
partition_block = await UDisks2Block.new(
|
partition_block = await UDisks2Block.new(
|
||||||
partition, self.sys_dbus.bus, sync_properties=False
|
partition, self.sys_dbus.connected_bus, sync_properties=False
|
||||||
)
|
)
|
||||||
except DBusError as err:
|
except DBusError as err:
|
||||||
raise HassOSDataDiskError(
|
raise HassOSDataDiskError(
|
||||||
@ -388,7 +393,7 @@ class DataDisk(CoreSysAttributes):
|
|||||||
properties[DBUS_IFACE_BLOCK][DBUS_ATTR_ID_LABEL]
|
properties[DBUS_IFACE_BLOCK][DBUS_ATTR_ID_LABEL]
|
||||||
== FILESYSTEM_LABEL_DATA_DISK
|
== FILESYSTEM_LABEL_DATA_DISK
|
||||||
):
|
):
|
||||||
check = self.check_multiple_data_disks
|
check: CheckBase = self.check_multiple_data_disks
|
||||||
elif (
|
elif (
|
||||||
properties[DBUS_IFACE_BLOCK][DBUS_ATTR_ID_LABEL]
|
properties[DBUS_IFACE_BLOCK][DBUS_ATTR_ID_LABEL]
|
||||||
== FILESYSTEM_LABEL_DISABLED_DATA_DISK
|
== FILESYSTEM_LABEL_DISABLED_DATA_DISK
|
||||||
@ -411,7 +416,7 @@ class DataDisk(CoreSysAttributes):
|
|||||||
and issue.context == self.check_multiple_data_disks.context
|
and issue.context == self.check_multiple_data_disks.context
|
||||||
for issue in self.sys_resolution.issues
|
for issue in self.sys_resolution.issues
|
||||||
):
|
):
|
||||||
check = self.check_multiple_data_disks
|
check: CheckBase = self.check_multiple_data_disks
|
||||||
elif any(
|
elif any(
|
||||||
issue.type == self.check_disabled_data_disk.issue
|
issue.type == self.check_disabled_data_disk.issue
|
||||||
and issue.context == self.check_disabled_data_disk.context
|
and issue.context == self.check_disabled_data_disk.context
|
||||||
|
@ -1,11 +1,11 @@
|
|||||||
"""OS support on supervisor."""
|
"""OS support on supervisor."""
|
||||||
|
|
||||||
from collections.abc import Awaitable
|
|
||||||
from dataclasses import dataclass
|
from dataclasses import dataclass
|
||||||
from datetime import datetime
|
from datetime import datetime
|
||||||
import errno
|
import errno
|
||||||
import logging
|
import logging
|
||||||
from pathlib import Path, PurePath
|
from pathlib import Path, PurePath
|
||||||
|
from typing import cast
|
||||||
|
|
||||||
import aiohttp
|
import aiohttp
|
||||||
from awesomeversion import AwesomeVersion, AwesomeVersionException
|
from awesomeversion import AwesomeVersion, AwesomeVersionException
|
||||||
@ -61,8 +61,8 @@ class SlotStatus:
|
|||||||
device=PurePath(data["device"]),
|
device=PurePath(data["device"]),
|
||||||
bundle_compatible=data.get("bundle.compatible"),
|
bundle_compatible=data.get("bundle.compatible"),
|
||||||
sha256=data.get("sha256"),
|
sha256=data.get("sha256"),
|
||||||
size=data.get("size"),
|
size=cast(int | None, data.get("size")),
|
||||||
installed_count=data.get("installed.count"),
|
installed_count=cast(int | None, data.get("installed.count")),
|
||||||
bundle_version=AwesomeVersion(data["bundle.version"])
|
bundle_version=AwesomeVersion(data["bundle.version"])
|
||||||
if "bundle.version" in data
|
if "bundle.version" in data
|
||||||
else None,
|
else None,
|
||||||
@ -70,51 +70,17 @@ class SlotStatus:
|
|||||||
if "installed.timestamp" in data
|
if "installed.timestamp" in data
|
||||||
else None,
|
else None,
|
||||||
status=data.get("status"),
|
status=data.get("status"),
|
||||||
activated_count=data.get("activated.count"),
|
activated_count=cast(int | None, data.get("activated.count")),
|
||||||
activated_timestamp=datetime.fromisoformat(data["activated.timestamp"])
|
activated_timestamp=datetime.fromisoformat(data["activated.timestamp"])
|
||||||
if "activated.timestamp" in data
|
if "activated.timestamp" in data
|
||||||
else None,
|
else None,
|
||||||
boot_status=data.get("boot-status"),
|
boot_status=RaucState(data["boot-status"])
|
||||||
|
if "boot-status" in data
|
||||||
|
else None,
|
||||||
bootname=data.get("bootname"),
|
bootname=data.get("bootname"),
|
||||||
parent=data.get("parent"),
|
parent=data.get("parent"),
|
||||||
)
|
)
|
||||||
|
|
||||||
def to_dict(self) -> SlotStatusDataType:
|
|
||||||
"""Get dictionary representation."""
|
|
||||||
out: SlotStatusDataType = {
|
|
||||||
"class": self.class_,
|
|
||||||
"type": self.type_,
|
|
||||||
"state": self.state,
|
|
||||||
"device": self.device.as_posix(),
|
|
||||||
}
|
|
||||||
|
|
||||||
if self.bundle_compatible is not None:
|
|
||||||
out["bundle.compatible"] = self.bundle_compatible
|
|
||||||
if self.sha256 is not None:
|
|
||||||
out["sha256"] = self.sha256
|
|
||||||
if self.size is not None:
|
|
||||||
out["size"] = self.size
|
|
||||||
if self.installed_count is not None:
|
|
||||||
out["installed.count"] = self.installed_count
|
|
||||||
if self.bundle_version is not None:
|
|
||||||
out["bundle.version"] = str(self.bundle_version)
|
|
||||||
if self.installed_timestamp is not None:
|
|
||||||
out["installed.timestamp"] = str(self.installed_timestamp)
|
|
||||||
if self.status is not None:
|
|
||||||
out["status"] = self.status
|
|
||||||
if self.activated_count is not None:
|
|
||||||
out["activated.count"] = self.activated_count
|
|
||||||
if self.activated_timestamp:
|
|
||||||
out["activated.timestamp"] = str(self.activated_timestamp)
|
|
||||||
if self.boot_status:
|
|
||||||
out["boot-status"] = self.boot_status
|
|
||||||
if self.bootname is not None:
|
|
||||||
out["bootname"] = self.bootname
|
|
||||||
if self.parent is not None:
|
|
||||||
out["parent"] = self.parent
|
|
||||||
|
|
||||||
return out
|
|
||||||
|
|
||||||
|
|
||||||
class OSManager(CoreSysAttributes):
|
class OSManager(CoreSysAttributes):
|
||||||
"""OS interface inside supervisor."""
|
"""OS interface inside supervisor."""
|
||||||
@ -148,7 +114,11 @@ class OSManager(CoreSysAttributes):
|
|||||||
def need_update(self) -> bool:
|
def need_update(self) -> bool:
|
||||||
"""Return true if a HassOS update is available."""
|
"""Return true if a HassOS update is available."""
|
||||||
try:
|
try:
|
||||||
return self.version < self.latest_version
|
return (
|
||||||
|
self.version is not None
|
||||||
|
and self.latest_version is not None
|
||||||
|
and self.version < self.latest_version
|
||||||
|
)
|
||||||
except (AwesomeVersionException, TypeError):
|
except (AwesomeVersionException, TypeError):
|
||||||
return False
|
return False
|
||||||
|
|
||||||
@ -176,6 +146,9 @@ class OSManager(CoreSysAttributes):
|
|||||||
|
|
||||||
def get_slot_name(self, boot_name: str) -> str:
|
def get_slot_name(self, boot_name: str) -> str:
|
||||||
"""Get slot name from boot name."""
|
"""Get slot name from boot name."""
|
||||||
|
if not self._slots:
|
||||||
|
raise HassOSSlotNotFound()
|
||||||
|
|
||||||
for name, status in self._slots.items():
|
for name, status in self._slots.items():
|
||||||
if status.bootname == boot_name:
|
if status.bootname == boot_name:
|
||||||
return name
|
return name
|
||||||
@ -288,11 +261,8 @@ class OSManager(CoreSysAttributes):
|
|||||||
conditions=[JobCondition.HAOS],
|
conditions=[JobCondition.HAOS],
|
||||||
on_condition=HassOSJobError,
|
on_condition=HassOSJobError,
|
||||||
)
|
)
|
||||||
async def config_sync(self) -> Awaitable[None]:
|
async def config_sync(self) -> None:
|
||||||
"""Trigger a host config reload from usb.
|
"""Trigger a host config reload from usb."""
|
||||||
|
|
||||||
Return a coroutine.
|
|
||||||
"""
|
|
||||||
_LOGGER.info(
|
_LOGGER.info(
|
||||||
"Synchronizing configuration from USB with Home Assistant Operating System."
|
"Synchronizing configuration from USB with Home Assistant Operating System."
|
||||||
)
|
)
|
||||||
@ -314,6 +284,10 @@ class OSManager(CoreSysAttributes):
|
|||||||
version = version or self.latest_version
|
version = version or self.latest_version
|
||||||
|
|
||||||
# Check installed version
|
# Check installed version
|
||||||
|
if not version:
|
||||||
|
raise HassOSUpdateError(
|
||||||
|
"No version information available, cannot update", _LOGGER.error
|
||||||
|
)
|
||||||
if version == self.version:
|
if version == self.version:
|
||||||
raise HassOSUpdateError(
|
raise HassOSUpdateError(
|
||||||
f"Version {version!s} is already installed", _LOGGER.warning
|
f"Version {version!s} is already installed", _LOGGER.warning
|
||||||
|
@ -22,6 +22,7 @@ from ..exceptions import (
|
|||||||
AudioUpdateError,
|
AudioUpdateError,
|
||||||
ConfigurationFileError,
|
ConfigurationFileError,
|
||||||
DockerError,
|
DockerError,
|
||||||
|
PluginError,
|
||||||
)
|
)
|
||||||
from ..jobs.const import JobExecutionLimit
|
from ..jobs.const import JobExecutionLimit
|
||||||
from ..jobs.decorator import Job
|
from ..jobs.decorator import Job
|
||||||
@ -127,7 +128,7 @@ class PluginAudio(PluginBase):
|
|||||||
"""Update Audio plugin."""
|
"""Update Audio plugin."""
|
||||||
try:
|
try:
|
||||||
await super().update(version)
|
await super().update(version)
|
||||||
except DockerError as err:
|
except (DockerError, PluginError) as err:
|
||||||
raise AudioUpdateError("Audio update failed", _LOGGER.error) from err
|
raise AudioUpdateError("Audio update failed", _LOGGER.error) from err
|
||||||
|
|
||||||
async def restart(self) -> None:
|
async def restart(self) -> None:
|
||||||
|
@ -63,7 +63,11 @@ class PluginBase(ABC, FileConfiguration, CoreSysAttributes):
|
|||||||
def need_update(self) -> bool:
|
def need_update(self) -> bool:
|
||||||
"""Return True if an update is available."""
|
"""Return True if an update is available."""
|
||||||
try:
|
try:
|
||||||
return self.version < self.latest_version
|
return (
|
||||||
|
self.version is not None
|
||||||
|
and self.latest_version is not None
|
||||||
|
and self.version < self.latest_version
|
||||||
|
)
|
||||||
except (AwesomeVersionException, TypeError):
|
except (AwesomeVersionException, TypeError):
|
||||||
return False
|
return False
|
||||||
|
|
||||||
@ -153,6 +157,10 @@ class PluginBase(ABC, FileConfiguration, CoreSysAttributes):
|
|||||||
async def start(self) -> None:
|
async def start(self) -> None:
|
||||||
"""Start system plugin."""
|
"""Start system plugin."""
|
||||||
|
|
||||||
|
@abstractmethod
|
||||||
|
async def stop(self) -> None:
|
||||||
|
"""Stop system plugin."""
|
||||||
|
|
||||||
async def load(self) -> None:
|
async def load(self) -> None:
|
||||||
"""Load system plugin."""
|
"""Load system plugin."""
|
||||||
self.start_watchdog()
|
self.start_watchdog()
|
||||||
@ -160,14 +168,14 @@ class PluginBase(ABC, FileConfiguration, CoreSysAttributes):
|
|||||||
# Check plugin state
|
# Check plugin state
|
||||||
try:
|
try:
|
||||||
# Evaluate Version if we lost this information
|
# Evaluate Version if we lost this information
|
||||||
if not self.version:
|
if self.version:
|
||||||
self.version = await self.instance.get_latest_version()
|
version = self.version
|
||||||
|
else:
|
||||||
|
self.version = version = await self.instance.get_latest_version()
|
||||||
|
|
||||||
await self.instance.attach(
|
await self.instance.attach(version=version, skip_state_event_if_down=True)
|
||||||
version=self.version, skip_state_event_if_down=True
|
|
||||||
)
|
|
||||||
|
|
||||||
await self.instance.check_image(self.version, self.default_image)
|
await self.instance.check_image(version, self.default_image)
|
||||||
except DockerError:
|
except DockerError:
|
||||||
_LOGGER.info(
|
_LOGGER.info(
|
||||||
"No %s plugin Docker image %s found.", self.slug, self.instance.image
|
"No %s plugin Docker image %s found.", self.slug, self.instance.image
|
||||||
@ -177,7 +185,7 @@ class PluginBase(ABC, FileConfiguration, CoreSysAttributes):
|
|||||||
with suppress(PluginError):
|
with suppress(PluginError):
|
||||||
await self.install()
|
await self.install()
|
||||||
else:
|
else:
|
||||||
self.version = self.instance.version
|
self.version = self.instance.version or version
|
||||||
self.image = self.default_image
|
self.image = self.default_image
|
||||||
await self.save_data()
|
await self.save_data()
|
||||||
|
|
||||||
@ -194,11 +202,10 @@ class PluginBase(ABC, FileConfiguration, CoreSysAttributes):
|
|||||||
if not self.latest_version:
|
if not self.latest_version:
|
||||||
await self.sys_updater.reload()
|
await self.sys_updater.reload()
|
||||||
|
|
||||||
if self.latest_version:
|
if to_version := self.latest_version:
|
||||||
with suppress(DockerError):
|
with suppress(DockerError):
|
||||||
await self.instance.install(
|
await self.instance.install(to_version, image=self.default_image)
|
||||||
self.latest_version, image=self.default_image
|
self.version = self.instance.version or to_version
|
||||||
)
|
|
||||||
break
|
break
|
||||||
_LOGGER.warning(
|
_LOGGER.warning(
|
||||||
"Error on installing %s plugin, retrying in 30sec", self.slug
|
"Error on installing %s plugin, retrying in 30sec", self.slug
|
||||||
@ -206,23 +213,28 @@ class PluginBase(ABC, FileConfiguration, CoreSysAttributes):
|
|||||||
await asyncio.sleep(30)
|
await asyncio.sleep(30)
|
||||||
|
|
||||||
_LOGGER.info("%s plugin now installed", self.slug)
|
_LOGGER.info("%s plugin now installed", self.slug)
|
||||||
self.version = self.instance.version
|
|
||||||
self.image = self.default_image
|
self.image = self.default_image
|
||||||
await self.save_data()
|
await self.save_data()
|
||||||
|
|
||||||
async def update(self, version: str | None = None) -> None:
|
async def update(self, version: str | None = None) -> None:
|
||||||
"""Update system plugin."""
|
"""Update system plugin."""
|
||||||
version = version or self.latest_version
|
to_version = AwesomeVersion(version) if version else self.latest_version
|
||||||
|
if not to_version:
|
||||||
|
raise PluginError(
|
||||||
|
f"Cannot determine latest version of plugin {self.slug} for update",
|
||||||
|
_LOGGER.error,
|
||||||
|
)
|
||||||
|
|
||||||
old_image = self.image
|
old_image = self.image
|
||||||
|
|
||||||
if version == self.version:
|
if to_version == self.version:
|
||||||
_LOGGER.warning(
|
_LOGGER.warning(
|
||||||
"Version %s is already installed for %s", version, self.slug
|
"Version %s is already installed for %s", to_version, self.slug
|
||||||
)
|
)
|
||||||
return
|
return
|
||||||
|
|
||||||
await self.instance.update(version, image=self.default_image)
|
await self.instance.update(to_version, image=self.default_image)
|
||||||
self.version = self.instance.version
|
self.version = self.instance.version or to_version
|
||||||
self.image = self.default_image
|
self.image = self.default_image
|
||||||
await self.save_data()
|
await self.save_data()
|
||||||
|
|
||||||
|
@ -14,7 +14,7 @@ from ..coresys import CoreSys
|
|||||||
from ..docker.cli import DockerCli
|
from ..docker.cli import DockerCli
|
||||||
from ..docker.const import ContainerState
|
from ..docker.const import ContainerState
|
||||||
from ..docker.stats import DockerStats
|
from ..docker.stats import DockerStats
|
||||||
from ..exceptions import CliError, CliJobError, CliUpdateError, DockerError
|
from ..exceptions import CliError, CliJobError, CliUpdateError, DockerError, PluginError
|
||||||
from ..jobs.const import JobExecutionLimit
|
from ..jobs.const import JobExecutionLimit
|
||||||
from ..jobs.decorator import Job
|
from ..jobs.decorator import Job
|
||||||
from ..utils.sentry import async_capture_exception
|
from ..utils.sentry import async_capture_exception
|
||||||
@ -53,7 +53,7 @@ class PluginCli(PluginBase):
|
|||||||
return self.sys_updater.version_cli
|
return self.sys_updater.version_cli
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def supervisor_token(self) -> str:
|
def supervisor_token(self) -> str | None:
|
||||||
"""Return an access token for the Supervisor API."""
|
"""Return an access token for the Supervisor API."""
|
||||||
return self._data.get(ATTR_ACCESS_TOKEN)
|
return self._data.get(ATTR_ACCESS_TOKEN)
|
||||||
|
|
||||||
@ -66,7 +66,7 @@ class PluginCli(PluginBase):
|
|||||||
"""Update local HA cli."""
|
"""Update local HA cli."""
|
||||||
try:
|
try:
|
||||||
await super().update(version)
|
await super().update(version)
|
||||||
except DockerError as err:
|
except (DockerError, PluginError) as err:
|
||||||
raise CliUpdateError("CLI update failed", _LOGGER.error) from err
|
raise CliUpdateError("CLI update failed", _LOGGER.error) from err
|
||||||
|
|
||||||
async def start(self) -> None:
|
async def start(self) -> None:
|
||||||
|
@ -15,7 +15,8 @@ from awesomeversion import AwesomeVersion
|
|||||||
import jinja2
|
import jinja2
|
||||||
import voluptuous as vol
|
import voluptuous as vol
|
||||||
|
|
||||||
from ..const import ATTR_SERVERS, DNS_SUFFIX, LogLevel
|
from ..bus import EventListener
|
||||||
|
from ..const import ATTR_SERVERS, DNS_SUFFIX, BusEvent, LogLevel
|
||||||
from ..coresys import CoreSys
|
from ..coresys import CoreSys
|
||||||
from ..dbus.const import MulticastProtocolEnabled
|
from ..dbus.const import MulticastProtocolEnabled
|
||||||
from ..docker.const import ContainerState
|
from ..docker.const import ContainerState
|
||||||
@ -28,6 +29,7 @@ from ..exceptions import (
|
|||||||
CoreDNSJobError,
|
CoreDNSJobError,
|
||||||
CoreDNSUpdateError,
|
CoreDNSUpdateError,
|
||||||
DockerError,
|
DockerError,
|
||||||
|
PluginError,
|
||||||
)
|
)
|
||||||
from ..jobs.const import JobExecutionLimit
|
from ..jobs.const import JobExecutionLimit
|
||||||
from ..jobs.decorator import Job
|
from ..jobs.decorator import Job
|
||||||
@ -71,11 +73,17 @@ class PluginDns(PluginBase):
|
|||||||
self.slug = "dns"
|
self.slug = "dns"
|
||||||
self.coresys: CoreSys = coresys
|
self.coresys: CoreSys = coresys
|
||||||
self.instance: DockerDNS = DockerDNS(coresys)
|
self.instance: DockerDNS = DockerDNS(coresys)
|
||||||
self.resolv_template: jinja2.Template | None = None
|
self._resolv_template: jinja2.Template | None = None
|
||||||
self.hosts_template: jinja2.Template | None = None
|
self._hosts_template: jinja2.Template | None = None
|
||||||
|
|
||||||
self._hosts: list[HostEntry] = []
|
self._hosts: list[HostEntry] = []
|
||||||
self._loop: bool = False
|
self._loop: bool = False
|
||||||
|
self._cached_locals: list[str] | None = None
|
||||||
|
|
||||||
|
# Debouncing system for rapid local changes
|
||||||
|
self._locals_changed_handle: asyncio.TimerHandle | None = None
|
||||||
|
self._restart_after_locals_change_handle: asyncio.Task | None = None
|
||||||
|
self._connectivity_check_listener: EventListener | None = None
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def hosts(self) -> Path:
|
def hosts(self) -> Path:
|
||||||
@ -90,6 +98,12 @@ class PluginDns(PluginBase):
|
|||||||
@property
|
@property
|
||||||
def locals(self) -> list[str]:
|
def locals(self) -> list[str]:
|
||||||
"""Return list of local system DNS servers."""
|
"""Return list of local system DNS servers."""
|
||||||
|
if self._cached_locals is None:
|
||||||
|
self._cached_locals = self._compute_locals()
|
||||||
|
return self._cached_locals
|
||||||
|
|
||||||
|
def _compute_locals(self) -> list[str]:
|
||||||
|
"""Compute list of local system DNS servers."""
|
||||||
servers: list[str] = []
|
servers: list[str] = []
|
||||||
for server in [
|
for server in [
|
||||||
f"dns://{server!s}" for server in self.sys_host.network.dns_servers
|
f"dns://{server!s}" for server in self.sys_host.network.dns_servers
|
||||||
@ -99,6 +113,52 @@ class PluginDns(PluginBase):
|
|||||||
|
|
||||||
return servers
|
return servers
|
||||||
|
|
||||||
|
async def _on_dns_container_running(self, event: DockerContainerStateEvent) -> None:
|
||||||
|
"""Handle DNS container state change to running and trigger connectivity check."""
|
||||||
|
if event.name == self.instance.name and event.state == ContainerState.RUNNING:
|
||||||
|
# Wait before CoreDNS actually becomes available
|
||||||
|
await asyncio.sleep(5)
|
||||||
|
|
||||||
|
_LOGGER.debug("CoreDNS started, checking connectivity")
|
||||||
|
await self.sys_supervisor.check_connectivity()
|
||||||
|
|
||||||
|
async def _restart_dns_after_locals_change(self) -> None:
|
||||||
|
"""Restart DNS after a debounced delay for local changes."""
|
||||||
|
old_locals = self._cached_locals
|
||||||
|
new_locals = self._compute_locals()
|
||||||
|
if old_locals == new_locals:
|
||||||
|
return
|
||||||
|
|
||||||
|
_LOGGER.debug("DNS locals changed from %s to %s", old_locals, new_locals)
|
||||||
|
self._cached_locals = new_locals
|
||||||
|
if not await self.instance.is_running():
|
||||||
|
return
|
||||||
|
|
||||||
|
await self.restart()
|
||||||
|
self._restart_after_locals_change_handle = None
|
||||||
|
|
||||||
|
def _trigger_restart_dns_after_locals_change(self) -> None:
|
||||||
|
"""Trigger a restart of DNS after local changes."""
|
||||||
|
# Cancel existing restart task if any
|
||||||
|
if self._restart_after_locals_change_handle:
|
||||||
|
self._restart_after_locals_change_handle.cancel()
|
||||||
|
|
||||||
|
self._restart_after_locals_change_handle = self.sys_create_task(
|
||||||
|
self._restart_dns_after_locals_change()
|
||||||
|
)
|
||||||
|
self._locals_changed_handle = None
|
||||||
|
|
||||||
|
def notify_locals_changed(self) -> None:
|
||||||
|
"""Schedule a debounced DNS restart for local changes."""
|
||||||
|
# Cancel existing timer if any
|
||||||
|
if self._locals_changed_handle:
|
||||||
|
self._locals_changed_handle.cancel()
|
||||||
|
|
||||||
|
# Schedule new timer with 1 second delay
|
||||||
|
self._locals_changed_handle = self.sys_call_later(
|
||||||
|
1.0, self._trigger_restart_dns_after_locals_change
|
||||||
|
)
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def servers(self) -> list[str]:
|
def servers(self) -> list[str]:
|
||||||
"""Return list of DNS servers."""
|
"""Return list of DNS servers."""
|
||||||
@ -147,11 +207,25 @@ class PluginDns(PluginBase):
|
|||||||
"""Set fallback DNS enabled."""
|
"""Set fallback DNS enabled."""
|
||||||
self._data[ATTR_FALLBACK] = value
|
self._data[ATTR_FALLBACK] = value
|
||||||
|
|
||||||
|
@property
|
||||||
|
def hosts_template(self) -> jinja2.Template:
|
||||||
|
"""Get hosts jinja template."""
|
||||||
|
if not self._hosts_template:
|
||||||
|
raise RuntimeError("Hosts template not set!")
|
||||||
|
return self._hosts_template
|
||||||
|
|
||||||
|
@property
|
||||||
|
def resolv_template(self) -> jinja2.Template:
|
||||||
|
"""Get resolv jinja template."""
|
||||||
|
if not self._resolv_template:
|
||||||
|
raise RuntimeError("Resolv template not set!")
|
||||||
|
return self._resolv_template
|
||||||
|
|
||||||
async def load(self) -> None:
|
async def load(self) -> None:
|
||||||
"""Load DNS setup."""
|
"""Load DNS setup."""
|
||||||
# Initialize CoreDNS Template
|
# Initialize CoreDNS Template
|
||||||
try:
|
try:
|
||||||
self.resolv_template = jinja2.Template(
|
self._resolv_template = jinja2.Template(
|
||||||
await self.sys_run_in_executor(RESOLV_TMPL.read_text, encoding="utf-8")
|
await self.sys_run_in_executor(RESOLV_TMPL.read_text, encoding="utf-8")
|
||||||
)
|
)
|
||||||
except OSError as err:
|
except OSError as err:
|
||||||
@ -162,7 +236,7 @@ class PluginDns(PluginBase):
|
|||||||
_LOGGER.error("Can't read resolve.tmpl: %s", err)
|
_LOGGER.error("Can't read resolve.tmpl: %s", err)
|
||||||
|
|
||||||
try:
|
try:
|
||||||
self.hosts_template = jinja2.Template(
|
self._hosts_template = jinja2.Template(
|
||||||
await self.sys_run_in_executor(HOSTS_TMPL.read_text, encoding="utf-8")
|
await self.sys_run_in_executor(HOSTS_TMPL.read_text, encoding="utf-8")
|
||||||
)
|
)
|
||||||
except OSError as err:
|
except OSError as err:
|
||||||
@ -173,10 +247,19 @@ class PluginDns(PluginBase):
|
|||||||
_LOGGER.error("Can't read hosts.tmpl: %s", err)
|
_LOGGER.error("Can't read hosts.tmpl: %s", err)
|
||||||
|
|
||||||
await self._init_hosts()
|
await self._init_hosts()
|
||||||
|
|
||||||
|
# Register Docker event listener for connectivity checks
|
||||||
|
if not self._connectivity_check_listener:
|
||||||
|
self._connectivity_check_listener = self.sys_bus.register_event(
|
||||||
|
BusEvent.DOCKER_CONTAINER_STATE_CHANGE, self._on_dns_container_running
|
||||||
|
)
|
||||||
|
|
||||||
await super().load()
|
await super().load()
|
||||||
|
|
||||||
# Update supervisor
|
# Update supervisor
|
||||||
await self._write_resolv(HOST_RESOLV)
|
# Resolv template should always be set but just in case don't fail load
|
||||||
|
if self._resolv_template:
|
||||||
|
await self._write_resolv(HOST_RESOLV)
|
||||||
|
|
||||||
# Reinitializing aiohttp.ClientSession after DNS setup makes sure that
|
# Reinitializing aiohttp.ClientSession after DNS setup makes sure that
|
||||||
# aiodns is using the right DNS servers (see #5857).
|
# aiodns is using the right DNS servers (see #5857).
|
||||||
@ -201,7 +284,7 @@ class PluginDns(PluginBase):
|
|||||||
"""Update CoreDNS plugin."""
|
"""Update CoreDNS plugin."""
|
||||||
try:
|
try:
|
||||||
await super().update(version)
|
await super().update(version)
|
||||||
except DockerError as err:
|
except (DockerError, PluginError) as err:
|
||||||
raise CoreDNSUpdateError("CoreDNS update failed", _LOGGER.error) from err
|
raise CoreDNSUpdateError("CoreDNS update failed", _LOGGER.error) from err
|
||||||
|
|
||||||
async def restart(self) -> None:
|
async def restart(self) -> None:
|
||||||
@ -211,7 +294,7 @@ class PluginDns(PluginBase):
|
|||||||
try:
|
try:
|
||||||
await self.instance.restart()
|
await self.instance.restart()
|
||||||
except DockerError as err:
|
except DockerError as err:
|
||||||
raise CoreDNSError("Can't start CoreDNS plugin", _LOGGER.error) from err
|
raise CoreDNSError("Can't restart CoreDNS plugin", _LOGGER.error) from err
|
||||||
|
|
||||||
async def start(self) -> None:
|
async def start(self) -> None:
|
||||||
"""Run CoreDNS."""
|
"""Run CoreDNS."""
|
||||||
@ -226,6 +309,16 @@ class PluginDns(PluginBase):
|
|||||||
|
|
||||||
async def stop(self) -> None:
|
async def stop(self) -> None:
|
||||||
"""Stop CoreDNS."""
|
"""Stop CoreDNS."""
|
||||||
|
# Cancel any pending locals change timer
|
||||||
|
if self._locals_changed_handle:
|
||||||
|
self._locals_changed_handle.cancel()
|
||||||
|
self._locals_changed_handle = None
|
||||||
|
|
||||||
|
# Wait for any pending restart before stopping
|
||||||
|
if self._restart_after_locals_change_handle:
|
||||||
|
self._restart_after_locals_change_handle.cancel()
|
||||||
|
self._restart_after_locals_change_handle = None
|
||||||
|
|
||||||
_LOGGER.info("Stopping CoreDNS plugin")
|
_LOGGER.info("Stopping CoreDNS plugin")
|
||||||
try:
|
try:
|
||||||
await self.instance.stop()
|
await self.instance.stop()
|
||||||
@ -428,12 +521,6 @@ class PluginDns(PluginBase):
|
|||||||
|
|
||||||
async def _write_resolv(self, resolv_conf: Path) -> None:
|
async def _write_resolv(self, resolv_conf: Path) -> None:
|
||||||
"""Update/Write resolv.conf file."""
|
"""Update/Write resolv.conf file."""
|
||||||
if not self.resolv_template:
|
|
||||||
_LOGGER.warning(
|
|
||||||
"Resolv template is missing, cannot write/update %s", resolv_conf
|
|
||||||
)
|
|
||||||
return
|
|
||||||
|
|
||||||
nameservers = [str(self.sys_docker.network.dns), "127.0.0.11"]
|
nameservers = [str(self.sys_docker.network.dns), "127.0.0.11"]
|
||||||
|
|
||||||
# Read resolv config
|
# Read resolv config
|
||||||
|
@ -16,6 +16,7 @@ from ..exceptions import (
|
|||||||
MulticastError,
|
MulticastError,
|
||||||
MulticastJobError,
|
MulticastJobError,
|
||||||
MulticastUpdateError,
|
MulticastUpdateError,
|
||||||
|
PluginError,
|
||||||
)
|
)
|
||||||
from ..jobs.const import JobExecutionLimit
|
from ..jobs.const import JobExecutionLimit
|
||||||
from ..jobs.decorator import Job
|
from ..jobs.decorator import Job
|
||||||
@ -63,7 +64,7 @@ class PluginMulticast(PluginBase):
|
|||||||
"""Update Multicast plugin."""
|
"""Update Multicast plugin."""
|
||||||
try:
|
try:
|
||||||
await super().update(version)
|
await super().update(version)
|
||||||
except DockerError as err:
|
except (DockerError, PluginError) as err:
|
||||||
raise MulticastUpdateError(
|
raise MulticastUpdateError(
|
||||||
"Multicast update failed", _LOGGER.error
|
"Multicast update failed", _LOGGER.error
|
||||||
) from err
|
) from err
|
||||||
|
@ -19,6 +19,7 @@ from ..exceptions import (
|
|||||||
ObserverError,
|
ObserverError,
|
||||||
ObserverJobError,
|
ObserverJobError,
|
||||||
ObserverUpdateError,
|
ObserverUpdateError,
|
||||||
|
PluginError,
|
||||||
)
|
)
|
||||||
from ..jobs.const import JobExecutionLimit
|
from ..jobs.const import JobExecutionLimit
|
||||||
from ..jobs.decorator import Job
|
from ..jobs.decorator import Job
|
||||||
@ -58,7 +59,7 @@ class PluginObserver(PluginBase):
|
|||||||
return self.sys_updater.version_observer
|
return self.sys_updater.version_observer
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def supervisor_token(self) -> str:
|
def supervisor_token(self) -> str | None:
|
||||||
"""Return an access token for the Observer API."""
|
"""Return an access token for the Observer API."""
|
||||||
return self._data.get(ATTR_ACCESS_TOKEN)
|
return self._data.get(ATTR_ACCESS_TOKEN)
|
||||||
|
|
||||||
@ -71,7 +72,7 @@ class PluginObserver(PluginBase):
|
|||||||
"""Update local HA observer."""
|
"""Update local HA observer."""
|
||||||
try:
|
try:
|
||||||
await super().update(version)
|
await super().update(version)
|
||||||
except DockerError as err:
|
except (DockerError, PluginError) as err:
|
||||||
raise ObserverUpdateError(
|
raise ObserverUpdateError(
|
||||||
"HA observer update failed", _LOGGER.error
|
"HA observer update failed", _LOGGER.error
|
||||||
) from err
|
) from err
|
||||||
@ -90,6 +91,10 @@ class PluginObserver(PluginBase):
|
|||||||
_LOGGER.error("Can't start observer plugin")
|
_LOGGER.error("Can't start observer plugin")
|
||||||
raise ObserverError() from err
|
raise ObserverError() from err
|
||||||
|
|
||||||
|
async def stop(self) -> None:
|
||||||
|
"""Raise. Supervisor should not stop observer."""
|
||||||
|
raise RuntimeError("Stopping observer without a restart is not supported!")
|
||||||
|
|
||||||
async def stats(self) -> DockerStats:
|
async def stats(self) -> DockerStats:
|
||||||
"""Return stats of observer."""
|
"""Return stats of observer."""
|
||||||
try:
|
try:
|
||||||
|
@ -67,10 +67,11 @@ class CheckAddonPwned(CheckBase):
|
|||||||
@Job(name="check_addon_pwned_approve", conditions=[JobCondition.INTERNET_SYSTEM])
|
@Job(name="check_addon_pwned_approve", conditions=[JobCondition.INTERNET_SYSTEM])
|
||||||
async def approve_check(self, reference: str | None = None) -> bool:
|
async def approve_check(self, reference: str | None = None) -> bool:
|
||||||
"""Approve check if it is affected by issue."""
|
"""Approve check if it is affected by issue."""
|
||||||
addon = self.sys_addons.get(reference)
|
if not reference:
|
||||||
|
return False
|
||||||
|
|
||||||
# Uninstalled
|
# Uninstalled
|
||||||
if not addon or not addon.is_installed:
|
if not (addon := self.sys_addons.get_local_only(reference)):
|
||||||
return False
|
return False
|
||||||
|
|
||||||
# Not in use anymore
|
# Not in use anymore
|
||||||
|
@ -29,9 +29,11 @@ class CheckDetachedAddonMissing(CheckBase):
|
|||||||
|
|
||||||
async def approve_check(self, reference: str | None = None) -> bool:
|
async def approve_check(self, reference: str | None = None) -> bool:
|
||||||
"""Approve check if it is affected by issue."""
|
"""Approve check if it is affected by issue."""
|
||||||
return (
|
if not reference:
|
||||||
addon := self.sys_addons.get(reference, local_only=True)
|
return False
|
||||||
) and addon.is_detached
|
|
||||||
|
addon = self.sys_addons.get_local_only(reference)
|
||||||
|
return addon is not None and addon.is_detached
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def issue(self) -> IssueType:
|
def issue(self) -> IssueType:
|
||||||
|
@ -27,9 +27,11 @@ class CheckDetachedAddonRemoved(CheckBase):
|
|||||||
|
|
||||||
async def approve_check(self, reference: str | None = None) -> bool:
|
async def approve_check(self, reference: str | None = None) -> bool:
|
||||||
"""Approve check if it is affected by issue."""
|
"""Approve check if it is affected by issue."""
|
||||||
return (
|
if not reference:
|
||||||
addon := self.sys_addons.get(reference, local_only=True)
|
return False
|
||||||
) and addon.is_detached
|
|
||||||
|
addon = self.sys_addons.get_local_only(reference)
|
||||||
|
return addon is not None and addon.is_detached
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def issue(self) -> IssueType:
|
def issue(self) -> IssueType:
|
||||||
|
@ -35,6 +35,9 @@ class CheckDisabledDataDisk(CheckBase):
|
|||||||
|
|
||||||
async def approve_check(self, reference: str | None = None) -> bool:
|
async def approve_check(self, reference: str | None = None) -> bool:
|
||||||
"""Approve check if it is affected by issue."""
|
"""Approve check if it is affected by issue."""
|
||||||
|
if not reference:
|
||||||
|
return False
|
||||||
|
|
||||||
resolved = await self.sys_dbus.udisks2.resolve_device(
|
resolved = await self.sys_dbus.udisks2.resolve_device(
|
||||||
DeviceSpecification(path=Path(reference))
|
DeviceSpecification(path=Path(reference))
|
||||||
)
|
)
|
||||||
@ -43,7 +46,7 @@ class CheckDisabledDataDisk(CheckBase):
|
|||||||
def _is_disabled_data_disk(self, block_device: UDisks2Block) -> bool:
|
def _is_disabled_data_disk(self, block_device: UDisks2Block) -> bool:
|
||||||
"""Return true if filesystem block device has name indicating it was disabled by OS."""
|
"""Return true if filesystem block device has name indicating it was disabled by OS."""
|
||||||
return (
|
return (
|
||||||
block_device.filesystem
|
block_device.filesystem is not None
|
||||||
and block_device.id_label == FILESYSTEM_LABEL_DISABLED_DATA_DISK
|
and block_device.id_label == FILESYSTEM_LABEL_DISABLED_DATA_DISK
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -2,6 +2,7 @@
|
|||||||
|
|
||||||
import asyncio
|
import asyncio
|
||||||
from datetime import timedelta
|
from datetime import timedelta
|
||||||
|
from typing import Literal
|
||||||
|
|
||||||
from aiodns import DNSResolver
|
from aiodns import DNSResolver
|
||||||
from aiodns.error import DNSError
|
from aiodns.error import DNSError
|
||||||
@ -15,6 +16,15 @@ from ..const import DNS_CHECK_HOST, ContextType, IssueType
|
|||||||
from .base import CheckBase
|
from .base import CheckBase
|
||||||
|
|
||||||
|
|
||||||
|
async def check_server(
|
||||||
|
loop: asyncio.AbstractEventLoop, server: str, qtype: Literal["A"] | Literal["AAAA"]
|
||||||
|
) -> None:
|
||||||
|
"""Check a DNS server and report issues."""
|
||||||
|
ip_addr = server[6:] if server.startswith("dns://") else server
|
||||||
|
async with DNSResolver(loop=loop, nameservers=[ip_addr]) as resolver:
|
||||||
|
await resolver.query(DNS_CHECK_HOST, qtype)
|
||||||
|
|
||||||
|
|
||||||
def setup(coresys: CoreSys) -> CheckBase:
|
def setup(coresys: CoreSys) -> CheckBase:
|
||||||
"""Check setup function."""
|
"""Check setup function."""
|
||||||
return CheckDNSServer(coresys)
|
return CheckDNSServer(coresys)
|
||||||
@ -33,16 +43,18 @@ class CheckDNSServer(CheckBase):
|
|||||||
"""Run check if not affected by issue."""
|
"""Run check if not affected by issue."""
|
||||||
dns_servers = self.dns_servers
|
dns_servers = self.dns_servers
|
||||||
results = await asyncio.gather(
|
results = await asyncio.gather(
|
||||||
*[self._check_server(server) for server in dns_servers],
|
*[check_server(self.sys_loop, server, "A") for server in dns_servers],
|
||||||
return_exceptions=True,
|
return_exceptions=True,
|
||||||
)
|
)
|
||||||
for i in (r for r in range(len(results)) if isinstance(results[r], DNSError)):
|
# pylint: disable-next=consider-using-enumerate
|
||||||
self.sys_resolution.create_issue(
|
for i in range(len(results)):
|
||||||
IssueType.DNS_SERVER_FAILED,
|
if isinstance(result := results[i], DNSError):
|
||||||
ContextType.DNS_SERVER,
|
self.sys_resolution.create_issue(
|
||||||
reference=dns_servers[i],
|
IssueType.DNS_SERVER_FAILED,
|
||||||
)
|
ContextType.DNS_SERVER,
|
||||||
await async_capture_exception(results[i])
|
reference=dns_servers[i],
|
||||||
|
)
|
||||||
|
await async_capture_exception(result)
|
||||||
|
|
||||||
@Job(name="check_dns_server_approve", conditions=[JobCondition.INTERNET_SYSTEM])
|
@Job(name="check_dns_server_approve", conditions=[JobCondition.INTERNET_SYSTEM])
|
||||||
async def approve_check(self, reference: str | None = None) -> bool:
|
async def approve_check(self, reference: str | None = None) -> bool:
|
||||||
@ -51,18 +63,12 @@ class CheckDNSServer(CheckBase):
|
|||||||
return False
|
return False
|
||||||
|
|
||||||
try:
|
try:
|
||||||
await self._check_server(reference)
|
await check_server(self.sys_loop, reference, "A")
|
||||||
except DNSError:
|
except DNSError:
|
||||||
return True
|
return True
|
||||||
|
|
||||||
return False
|
return False
|
||||||
|
|
||||||
async def _check_server(self, server: str):
|
|
||||||
"""Check a DNS server and report issues."""
|
|
||||||
ip_addr = server[6:] if server.startswith("dns://") else server
|
|
||||||
resolver = DNSResolver(nameservers=[ip_addr])
|
|
||||||
await resolver.query(DNS_CHECK_HOST, "A")
|
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def dns_servers(self) -> list[str]:
|
def dns_servers(self) -> list[str]:
|
||||||
"""All user and system provided dns servers."""
|
"""All user and system provided dns servers."""
|
||||||
|
@ -3,7 +3,6 @@
|
|||||||
import asyncio
|
import asyncio
|
||||||
from datetime import timedelta
|
from datetime import timedelta
|
||||||
|
|
||||||
from aiodns import DNSResolver
|
|
||||||
from aiodns.error import DNSError
|
from aiodns.error import DNSError
|
||||||
|
|
||||||
from ...const import CoreState
|
from ...const import CoreState
|
||||||
@ -11,8 +10,9 @@ from ...coresys import CoreSys
|
|||||||
from ...jobs.const import JobCondition, JobExecutionLimit
|
from ...jobs.const import JobCondition, JobExecutionLimit
|
||||||
from ...jobs.decorator import Job
|
from ...jobs.decorator import Job
|
||||||
from ...utils.sentry import async_capture_exception
|
from ...utils.sentry import async_capture_exception
|
||||||
from ..const import DNS_CHECK_HOST, DNS_ERROR_NO_DATA, ContextType, IssueType
|
from ..const import DNS_ERROR_NO_DATA, ContextType, IssueType
|
||||||
from .base import CheckBase
|
from .base import CheckBase
|
||||||
|
from .dns_server import check_server
|
||||||
|
|
||||||
|
|
||||||
def setup(coresys: CoreSys) -> CheckBase:
|
def setup(coresys: CoreSys) -> CheckBase:
|
||||||
@ -33,21 +33,21 @@ class CheckDNSServerIPv6(CheckBase):
|
|||||||
"""Run check if not affected by issue."""
|
"""Run check if not affected by issue."""
|
||||||
dns_servers = self.dns_servers
|
dns_servers = self.dns_servers
|
||||||
results = await asyncio.gather(
|
results = await asyncio.gather(
|
||||||
*[self._check_server(server) for server in dns_servers],
|
*[check_server(self.sys_loop, server, "AAAA") for server in dns_servers],
|
||||||
return_exceptions=True,
|
return_exceptions=True,
|
||||||
)
|
)
|
||||||
for i in (
|
# pylint: disable-next=consider-using-enumerate
|
||||||
r
|
for i in range(len(results)):
|
||||||
for r in range(len(results))
|
if (
|
||||||
if isinstance(results[r], DNSError)
|
isinstance(result := results[i], DNSError)
|
||||||
and results[r].args[0] != DNS_ERROR_NO_DATA
|
and result.args[0] != DNS_ERROR_NO_DATA
|
||||||
):
|
):
|
||||||
self.sys_resolution.create_issue(
|
self.sys_resolution.create_issue(
|
||||||
IssueType.DNS_SERVER_IPV6_ERROR,
|
IssueType.DNS_SERVER_IPV6_ERROR,
|
||||||
ContextType.DNS_SERVER,
|
ContextType.DNS_SERVER,
|
||||||
reference=dns_servers[i],
|
reference=dns_servers[i],
|
||||||
)
|
)
|
||||||
await async_capture_exception(results[i])
|
await async_capture_exception(result)
|
||||||
|
|
||||||
@Job(
|
@Job(
|
||||||
name="check_dns_server_ipv6_approve", conditions=[JobCondition.INTERNET_SYSTEM]
|
name="check_dns_server_ipv6_approve", conditions=[JobCondition.INTERNET_SYSTEM]
|
||||||
@ -58,19 +58,13 @@ class CheckDNSServerIPv6(CheckBase):
|
|||||||
return False
|
return False
|
||||||
|
|
||||||
try:
|
try:
|
||||||
await self._check_server(reference)
|
await check_server(self.sys_loop, reference, "AAAA")
|
||||||
except DNSError as dns_error:
|
except DNSError as dns_error:
|
||||||
if dns_error.args[0] != DNS_ERROR_NO_DATA:
|
if dns_error.args[0] != DNS_ERROR_NO_DATA:
|
||||||
return True
|
return True
|
||||||
|
|
||||||
return False
|
return False
|
||||||
|
|
||||||
async def _check_server(self, server: str):
|
|
||||||
"""Check a DNS server and report issues."""
|
|
||||||
ip_addr = server[6:] if server.startswith("dns://") else server
|
|
||||||
resolver = DNSResolver(nameservers=[ip_addr])
|
|
||||||
await resolver.query(DNS_CHECK_HOST, "AAAA")
|
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def dns_servers(self) -> list[str]:
|
def dns_servers(self) -> list[str]:
|
||||||
"""All user and system provided dns servers."""
|
"""All user and system provided dns servers."""
|
||||||
|
108
supervisor/resolution/checks/duplicate_os_installation.py
Normal file
108
supervisor/resolution/checks/duplicate_os_installation.py
Normal file
@ -0,0 +1,108 @@
|
|||||||
|
"""Helpers to check for duplicate OS installations."""
|
||||||
|
|
||||||
|
import logging
|
||||||
|
|
||||||
|
from ...const import CoreState
|
||||||
|
from ...coresys import CoreSys
|
||||||
|
from ...dbus.udisks2.data import DeviceSpecification
|
||||||
|
from ..const import ContextType, IssueType, UnhealthyReason
|
||||||
|
from .base import CheckBase
|
||||||
|
|
||||||
|
_LOGGER: logging.Logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
# Partition labels to check for duplicates (GPT-based installations)
|
||||||
|
HAOS_PARTITIONS = [
|
||||||
|
"hassos-boot",
|
||||||
|
"hassos-kernel0",
|
||||||
|
"hassos-kernel1",
|
||||||
|
"hassos-system0",
|
||||||
|
"hassos-system1",
|
||||||
|
]
|
||||||
|
|
||||||
|
# Partition UUIDs to check for duplicates (MBR-based installations)
|
||||||
|
HAOS_PARTITION_UUIDS = [
|
||||||
|
"48617373-01", # hassos-boot
|
||||||
|
"48617373-05", # hassos-kernel0
|
||||||
|
"48617373-06", # hassos-system0
|
||||||
|
"48617373-07", # hassos-kernel1
|
||||||
|
"48617373-08", # hassos-system1
|
||||||
|
]
|
||||||
|
|
||||||
|
|
||||||
|
def _get_device_specifications():
|
||||||
|
"""Generate DeviceSpecification objects for both GPT and MBR partitions."""
|
||||||
|
# GPT-based installations (partition labels)
|
||||||
|
for partition_label in HAOS_PARTITIONS:
|
||||||
|
yield (
|
||||||
|
DeviceSpecification(partlabel=partition_label),
|
||||||
|
"partition",
|
||||||
|
partition_label,
|
||||||
|
)
|
||||||
|
|
||||||
|
# MBR-based installations (partition UUIDs)
|
||||||
|
for partition_uuid in HAOS_PARTITION_UUIDS:
|
||||||
|
yield (
|
||||||
|
DeviceSpecification(partuuid=partition_uuid),
|
||||||
|
"partition UUID",
|
||||||
|
partition_uuid,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def setup(coresys: CoreSys) -> CheckBase:
|
||||||
|
"""Check setup function."""
|
||||||
|
return CheckDuplicateOSInstallation(coresys)
|
||||||
|
|
||||||
|
|
||||||
|
class CheckDuplicateOSInstallation(CheckBase):
|
||||||
|
"""CheckDuplicateOSInstallation class for check."""
|
||||||
|
|
||||||
|
async def run_check(self) -> None:
|
||||||
|
"""Run check if not affected by issue."""
|
||||||
|
if not self.sys_os.available:
|
||||||
|
_LOGGER.debug(
|
||||||
|
"Skipping duplicate OS installation check, OS is not available"
|
||||||
|
)
|
||||||
|
return
|
||||||
|
|
||||||
|
for device_spec, spec_type, identifier in _get_device_specifications():
|
||||||
|
resolved = await self.sys_dbus.udisks2.resolve_device(device_spec)
|
||||||
|
if resolved and len(resolved) > 1:
|
||||||
|
_LOGGER.warning(
|
||||||
|
"Found duplicate OS installation: %s %s exists on %d devices (%s)",
|
||||||
|
identifier,
|
||||||
|
spec_type,
|
||||||
|
len(resolved),
|
||||||
|
", ".join(str(device.device) for device in resolved),
|
||||||
|
)
|
||||||
|
self.sys_resolution.add_unhealthy_reason(
|
||||||
|
UnhealthyReason.DUPLICATE_OS_INSTALLATION
|
||||||
|
)
|
||||||
|
self.sys_resolution.create_issue(
|
||||||
|
IssueType.DUPLICATE_OS_INSTALLATION,
|
||||||
|
ContextType.SYSTEM,
|
||||||
|
)
|
||||||
|
return
|
||||||
|
|
||||||
|
async def approve_check(self, reference: str | None = None) -> bool:
|
||||||
|
"""Approve check if it is affected by issue."""
|
||||||
|
# Check all partitions for duplicates since issue is created without reference
|
||||||
|
for device_spec, _, _ in _get_device_specifications():
|
||||||
|
resolved = await self.sys_dbus.udisks2.resolve_device(device_spec)
|
||||||
|
if resolved and len(resolved) > 1:
|
||||||
|
return True
|
||||||
|
return False
|
||||||
|
|
||||||
|
@property
|
||||||
|
def issue(self) -> IssueType:
|
||||||
|
"""Return a IssueType enum."""
|
||||||
|
return IssueType.DUPLICATE_OS_INSTALLATION
|
||||||
|
|
||||||
|
@property
|
||||||
|
def context(self) -> ContextType:
|
||||||
|
"""Return a ContextType enum."""
|
||||||
|
return ContextType.SYSTEM
|
||||||
|
|
||||||
|
@property
|
||||||
|
def states(self) -> list[CoreState]:
|
||||||
|
"""Return a list of valid states when this check can run."""
|
||||||
|
return [CoreState.SETUP]
|
@ -21,6 +21,9 @@ class CheckMultipleDataDisks(CheckBase):
|
|||||||
|
|
||||||
async def run_check(self) -> None:
|
async def run_check(self) -> None:
|
||||||
"""Run check if not affected by issue."""
|
"""Run check if not affected by issue."""
|
||||||
|
if not self.sys_os.available:
|
||||||
|
return
|
||||||
|
|
||||||
for block_device in self.sys_dbus.udisks2.block_devices:
|
for block_device in self.sys_dbus.udisks2.block_devices:
|
||||||
if self._block_device_has_name_issue(block_device):
|
if self._block_device_has_name_issue(block_device):
|
||||||
self.sys_resolution.create_issue(
|
self.sys_resolution.create_issue(
|
||||||
@ -35,6 +38,9 @@ class CheckMultipleDataDisks(CheckBase):
|
|||||||
|
|
||||||
async def approve_check(self, reference: str | None = None) -> bool:
|
async def approve_check(self, reference: str | None = None) -> bool:
|
||||||
"""Approve check if it is affected by issue."""
|
"""Approve check if it is affected by issue."""
|
||||||
|
if not reference:
|
||||||
|
return False
|
||||||
|
|
||||||
resolved = await self.sys_dbus.udisks2.resolve_device(
|
resolved = await self.sys_dbus.udisks2.resolve_device(
|
||||||
DeviceSpecification(path=Path(reference))
|
DeviceSpecification(path=Path(reference))
|
||||||
)
|
)
|
||||||
@ -43,7 +49,7 @@ class CheckMultipleDataDisks(CheckBase):
|
|||||||
def _block_device_has_name_issue(self, block_device: UDisks2Block) -> bool:
|
def _block_device_has_name_issue(self, block_device: UDisks2Block) -> bool:
|
||||||
"""Return true if filesystem block device incorrectly has data disk name."""
|
"""Return true if filesystem block device incorrectly has data disk name."""
|
||||||
return (
|
return (
|
||||||
block_device.filesystem
|
block_device.filesystem is not None
|
||||||
and block_device.id_label == FILESYSTEM_LABEL_DATA_DISK
|
and block_device.id_label == FILESYSTEM_LABEL_DATA_DISK
|
||||||
and block_device.device != self.sys_dbus.agent.datadisk.current_device
|
and block_device.device != self.sys_dbus.agent.datadisk.current_device
|
||||||
)
|
)
|
||||||
|
@ -19,12 +19,12 @@ class CheckNetworkInterfaceIPV4(CheckBase):
|
|||||||
|
|
||||||
async def run_check(self) -> None:
|
async def run_check(self) -> None:
|
||||||
"""Run check if not affected by issue."""
|
"""Run check if not affected by issue."""
|
||||||
for interface in self.sys_dbus.network.interfaces:
|
for inet in self.sys_dbus.network.interfaces:
|
||||||
if CheckNetworkInterfaceIPV4.check_interface(interface):
|
if CheckNetworkInterfaceIPV4.check_interface(inet):
|
||||||
self.sys_resolution.create_issue(
|
self.sys_resolution.create_issue(
|
||||||
IssueType.IPV4_CONNECTION_PROBLEM,
|
IssueType.IPV4_CONNECTION_PROBLEM,
|
||||||
ContextType.SYSTEM,
|
ContextType.SYSTEM,
|
||||||
interface.name,
|
inet.interface_name,
|
||||||
)
|
)
|
||||||
|
|
||||||
async def approve_check(self, reference: str | None = None) -> bool:
|
async def approve_check(self, reference: str | None = None) -> bool:
|
||||||
|
@ -64,10 +64,11 @@ class UnhealthyReason(StrEnum):
|
|||||||
"""Reasons for unsupported status."""
|
"""Reasons for unsupported status."""
|
||||||
|
|
||||||
DOCKER = "docker"
|
DOCKER = "docker"
|
||||||
|
DUPLICATE_OS_INSTALLATION = "duplicate_os_installation"
|
||||||
OSERROR_BAD_MESSAGE = "oserror_bad_message"
|
OSERROR_BAD_MESSAGE = "oserror_bad_message"
|
||||||
PRIVILEGED = "privileged"
|
PRIVILEGED = "privileged"
|
||||||
SUPERVISOR = "supervisor"
|
|
||||||
SETUP = "setup"
|
SETUP = "setup"
|
||||||
|
SUPERVISOR = "supervisor"
|
||||||
UNTRUSTED = "untrusted"
|
UNTRUSTED = "untrusted"
|
||||||
|
|
||||||
|
|
||||||
@ -83,6 +84,7 @@ class IssueType(StrEnum):
|
|||||||
DEVICE_ACCESS_MISSING = "device_access_missing"
|
DEVICE_ACCESS_MISSING = "device_access_missing"
|
||||||
DISABLED_DATA_DISK = "disabled_data_disk"
|
DISABLED_DATA_DISK = "disabled_data_disk"
|
||||||
DNS_LOOP = "dns_loop"
|
DNS_LOOP = "dns_loop"
|
||||||
|
DUPLICATE_OS_INSTALLATION = "duplicate_os_installation"
|
||||||
DNS_SERVER_FAILED = "dns_server_failed"
|
DNS_SERVER_FAILED = "dns_server_failed"
|
||||||
DNS_SERVER_IPV6_ERROR = "dns_server_ipv6_error"
|
DNS_SERVER_IPV6_ERROR = "dns_server_ipv6_error"
|
||||||
DOCKER_CONFIG = "docker_config"
|
DOCKER_CONFIG = "docker_config"
|
||||||
|
@ -1,6 +1,6 @@
|
|||||||
"""Data objects."""
|
"""Data objects."""
|
||||||
|
|
||||||
from uuid import UUID, uuid4
|
from uuid import uuid4
|
||||||
|
|
||||||
import attr
|
import attr
|
||||||
|
|
||||||
@ -20,7 +20,7 @@ class Issue:
|
|||||||
type: IssueType = attr.ib()
|
type: IssueType = attr.ib()
|
||||||
context: ContextType = attr.ib()
|
context: ContextType = attr.ib()
|
||||||
reference: str | None = attr.ib(default=None)
|
reference: str | None = attr.ib(default=None)
|
||||||
uuid: UUID = attr.ib(factory=lambda: uuid4().hex, eq=False, init=False)
|
uuid: str = attr.ib(factory=lambda: uuid4().hex, eq=False, init=False)
|
||||||
|
|
||||||
|
|
||||||
@attr.s(frozen=True, slots=True)
|
@attr.s(frozen=True, slots=True)
|
||||||
@ -30,7 +30,7 @@ class Suggestion:
|
|||||||
type: SuggestionType = attr.ib()
|
type: SuggestionType = attr.ib()
|
||||||
context: ContextType = attr.ib()
|
context: ContextType = attr.ib()
|
||||||
reference: str | None = attr.ib(default=None)
|
reference: str | None = attr.ib(default=None)
|
||||||
uuid: UUID = attr.ib(factory=lambda: uuid4().hex, eq=False, init=False)
|
uuid: str = attr.ib(factory=lambda: uuid4().hex, eq=False, init=False)
|
||||||
|
|
||||||
|
|
||||||
@attr.s(frozen=True, slots=True)
|
@attr.s(frozen=True, slots=True)
|
||||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
x
Reference in New Issue
Block a user