mirror of
https://github.com/home-assistant/supervisor.git
synced 2025-09-26 21:39:28 +00:00
Compare commits
514 Commits
Author | SHA1 | Date | |
---|---|---|---|
![]() |
81b0cf55b0 | ||
![]() |
1d5d2dc731 | ||
![]() |
04f5ee0a80 | ||
![]() |
7a02777cfb | ||
![]() |
7257c44d27 | ||
![]() |
cb15602814 | ||
![]() |
0f2c333484 | ||
![]() |
6f2cf2ef85 | ||
![]() |
70a721a47d | ||
![]() |
b32947af98 | ||
![]() |
94b44ec7fe | ||
![]() |
5c8aa71c31 | ||
![]() |
a6c424b7c8 | ||
![]() |
38e40c342d | ||
![]() |
26d390b66e | ||
![]() |
baddafa552 | ||
![]() |
f443d3052b | ||
![]() |
8fc27ff28e | ||
![]() |
3784d759f5 | ||
![]() |
61037f3852 | ||
![]() |
db8aaecdbe | ||
![]() |
15a4541595 | ||
![]() |
50ae8e2335 | ||
![]() |
279df17ba4 | ||
![]() |
f8e6362283 | ||
![]() |
0c44064926 | ||
![]() |
73c437574c | ||
![]() |
69a2182c04 | ||
![]() |
ce80e6cd32 | ||
![]() |
054def09f7 | ||
![]() |
eebe90bd14 | ||
![]() |
6ea280ce60 | ||
![]() |
e992b70f92 | ||
![]() |
0f58bb35ba | ||
![]() |
56abfb6adc | ||
![]() |
8352d61f8d | ||
![]() |
51d585f299 | ||
![]() |
d017a52922 | ||
![]() |
78ec0d1314 | ||
![]() |
c84151e9e8 | ||
![]() |
e8e599cb8c | ||
![]() |
232b9ea239 | ||
![]() |
1c49351e66 | ||
![]() |
34d1f4725d | ||
![]() |
7cd81dcc95 | ||
![]() |
1bdd3d88de | ||
![]() |
d105552fa9 | ||
![]() |
b5af35bd6c | ||
![]() |
7d46487491 | ||
![]() |
38a599011e | ||
![]() |
e59e2fc8d7 | ||
![]() |
b9ce405ada | ||
![]() |
d7df423deb | ||
![]() |
99eea99e93 | ||
![]() |
63d82ce03e | ||
![]() |
13a2c1ecd9 | ||
![]() |
627ab4ee81 | ||
![]() |
54f45539be | ||
![]() |
53297205c8 | ||
![]() |
0f09fdfcce | ||
![]() |
24db0fdb86 | ||
![]() |
7349234638 | ||
![]() |
c691f2a559 | ||
![]() |
110cd32dc3 | ||
![]() |
26d8dc0ec6 | ||
![]() |
fd41bda828 | ||
![]() |
1e3868bb70 | ||
![]() |
ece6c644cf | ||
![]() |
6a5bd5a014 | ||
![]() |
664334f1ad | ||
![]() |
e5e28747d4 | ||
![]() |
c7956d95ae | ||
![]() |
5ce6abdbb6 | ||
![]() |
fad0185c26 | ||
![]() |
86faf32709 | ||
![]() |
19f413796d | ||
![]() |
8f94b4d63f | ||
![]() |
db263f84af | ||
![]() |
747810b729 | ||
![]() |
d6768f15a1 | ||
![]() |
6c75957578 | ||
![]() |
3a8307acfe | ||
![]() |
f20c7d42ee | ||
![]() |
9419fbff94 | ||
![]() |
3ac6c03637 | ||
![]() |
a95274f1b3 | ||
![]() |
9d2fb87cec | ||
![]() |
ce9c3565b6 | ||
![]() |
b0ec58ed1b | ||
![]() |
893a5f8dd3 | ||
![]() |
98064f6a90 | ||
![]() |
5146f89354 | ||
![]() |
fb46592d48 | ||
![]() |
b4fb5ac681 | ||
![]() |
4b7201dc59 | ||
![]() |
3a5a4e4c27 | ||
![]() |
70104a9280 | ||
![]() |
efbc7b17a1 | ||
![]() |
64c5e20fc4 | ||
![]() |
13498afa97 | ||
![]() |
f6375f1bd6 | ||
![]() |
8fd1599173 | ||
![]() |
63302b73b0 | ||
![]() |
f591f67a2a | ||
![]() |
cda3184a55 | ||
![]() |
afc811e975 | ||
![]() |
2e169dcb42 | ||
![]() |
34e24e184f | ||
![]() |
2e4751ed7d | ||
![]() |
8c82c467d4 | ||
![]() |
f3f6771534 | ||
![]() |
0a75a4dcbc | ||
![]() |
1a4542fc4e | ||
![]() |
7e0525749e | ||
![]() |
b33b26018d | ||
![]() |
66c93e7176 | ||
![]() |
5674d32bad | ||
![]() |
7a84972770 | ||
![]() |
638f0f5371 | ||
![]() |
dca1b6f1d3 | ||
![]() |
2b0ee109d6 | ||
![]() |
e7430d87d7 | ||
![]() |
9751c1de79 | ||
![]() |
c497167b64 | ||
![]() |
7fb2aca88b | ||
![]() |
0d544845b1 | ||
![]() |
602eb472f9 | ||
![]() |
f22fa46bdb | ||
![]() |
4171a28260 | ||
![]() |
55365a631a | ||
![]() |
547415b30b | ||
![]() |
cbf79f1fab | ||
![]() |
31cc1dce82 | ||
![]() |
8a11e6c845 | ||
![]() |
2df4f80aa5 | ||
![]() |
68566ee9e1 | ||
![]() |
fe04b7ec59 | ||
![]() |
38f96d7ddd | ||
![]() |
2b2edd6e98 | ||
![]() |
361969aca2 | ||
![]() |
e61e7f41f2 | ||
![]() |
75150fd149 | ||
![]() |
bd1c8be1e1 | ||
![]() |
f167197640 | ||
![]() |
f084ecc007 | ||
![]() |
65becbd0ae | ||
![]() |
f38e28a4d9 | ||
![]() |
2998cd94ff | ||
![]() |
79e2f3e8ab | ||
![]() |
13291f52f2 | ||
![]() |
4baa80c3de | ||
![]() |
be28a6b012 | ||
![]() |
d94ada6216 | ||
![]() |
b2d7743e06 | ||
![]() |
40324beb72 | ||
![]() |
c02f6913b3 | ||
![]() |
d56af22d5e | ||
![]() |
1795103086 | ||
![]() |
02e1689dd1 | ||
![]() |
ab4d96331f | ||
![]() |
cb881cba28 | ||
![]() |
44b247f397 | ||
![]() |
8bb43daf91 | ||
![]() |
a7e65613d6 | ||
![]() |
3c04c71401 | ||
![]() |
1353d52bd1 | ||
![]() |
7701457791 | ||
![]() |
b7820bc6a6 | ||
![]() |
df66102de0 | ||
![]() |
4b308d0de1 | ||
![]() |
4448ba886b | ||
![]() |
f39006be01 | ||
![]() |
e5204eef8a | ||
![]() |
1f07d47fd6 | ||
![]() |
ba352abf0b | ||
![]() |
2bf440a744 | ||
![]() |
3b26136636 | ||
![]() |
8249f042c0 | ||
![]() |
84bbaeee5f | ||
![]() |
b7620b7adf | ||
![]() |
5a80be9fd4 | ||
![]() |
a733886803 | ||
![]() |
834fd29fab | ||
![]() |
fd1caf8aa6 | ||
![]() |
975c9e8061 | ||
![]() |
0b3c5885ec | ||
![]() |
711b63e2d0 | ||
![]() |
c7b833b5eb | ||
![]() |
fd472b3084 | ||
![]() |
dcbb6a2160 | ||
![]() |
56fa1550d2 | ||
![]() |
e1f97860ee | ||
![]() |
6ab3fe18d9 | ||
![]() |
7969f3dfd7 | ||
![]() |
6f05b90e4e | ||
![]() |
3aa53d99d7 | ||
![]() |
3525f5a02f | ||
![]() |
04514a9f5c | ||
![]() |
1c915ef4cd | ||
![]() |
b03a2c5c5f | ||
![]() |
64988b285e | ||
![]() |
5c69dca7b3 | ||
![]() |
dfda7dc748 | ||
![]() |
cb7710c23f | ||
![]() |
f9b12a2eb2 | ||
![]() |
6a7617faad | ||
![]() |
05554ccf7e | ||
![]() |
a94e6c5303 | ||
![]() |
d6fc8892db | ||
![]() |
fa9b3b939e | ||
![]() |
70685c41be | ||
![]() |
a3209c4bde | ||
![]() |
f3e60f6c28 | ||
![]() |
7798e7cde2 | ||
![]() |
4af92b9d25 | ||
![]() |
eab958860c | ||
![]() |
09bba96940 | ||
![]() |
a34806d4e2 | ||
![]() |
f00b21dc28 | ||
![]() |
021946e181 | ||
![]() |
6cab017042 | ||
![]() |
5999b48be4 | ||
![]() |
57f3178408 | ||
![]() |
14013ac923 | ||
![]() |
d08343d040 | ||
![]() |
2f9f9c6165 | ||
![]() |
8ab0ed5047 | ||
![]() |
0119b52e11 | ||
![]() |
1382a7b36e | ||
![]() |
2eeb8bf388 | ||
![]() |
5af3040223 | ||
![]() |
47491ca55b | ||
![]() |
b06ce9b6b4 | ||
![]() |
38284e036d | ||
![]() |
27a079742d | ||
![]() |
7f33b3b5aa | ||
![]() |
261bda82db | ||
![]() |
c39d6357f3 | ||
![]() |
d1b30a0e95 | ||
![]() |
6a74893a30 | ||
![]() |
b61d5625fe | ||
![]() |
8d468328f3 | ||
![]() |
cd3b382902 | ||
![]() |
99cf44aacd | ||
![]() |
eaa489abec | ||
![]() |
46f323791d | ||
![]() |
ec72d38220 | ||
![]() |
f5b166a7f0 | ||
![]() |
8afde1e881 | ||
![]() |
f751b0e6fc | ||
![]() |
3809f20c6a | ||
![]() |
68390469df | ||
![]() |
4c122a0630 | ||
![]() |
d06696cd94 | ||
![]() |
8d094d5c70 | ||
![]() |
068c463c98 | ||
![]() |
fc95933098 | ||
![]() |
630137a576 | ||
![]() |
857f346b35 | ||
![]() |
d98b4f039f | ||
![]() |
8fee52da5e | ||
![]() |
0f9ad3658b | ||
![]() |
1155ee07e5 | ||
![]() |
fa687e982e | ||
![]() |
4e902af937 | ||
![]() |
6455ad14a7 | ||
![]() |
4753c058a3 | ||
![]() |
1567cbfe37 | ||
![]() |
3ed66c802e | ||
![]() |
980baf23a8 | ||
![]() |
d69af6a62b | ||
![]() |
863456525f | ||
![]() |
dae49df7b1 | ||
![]() |
282fc03687 | ||
![]() |
f9f7e07c52 | ||
![]() |
12a2ccf0ec | ||
![]() |
a98d76618a | ||
![]() |
7a59e7392b | ||
![]() |
446aff3fa6 | ||
![]() |
3272403141 | ||
![]() |
d1f265da9e | ||
![]() |
4915c935dd | ||
![]() |
e78d935824 | ||
![]() |
934ca64a32 | ||
![]() |
0860e6d202 | ||
![]() |
c3e1c8b58e | ||
![]() |
44e48095c7 | ||
![]() |
a13eb7841d | ||
![]() |
b5701c5878 | ||
![]() |
803eb0f8c9 | ||
![]() |
58c5ed7ba1 | ||
![]() |
c4d7d671d1 | ||
![]() |
9d88255225 | ||
![]() |
bfbc366f55 | ||
![]() |
0f30a23f3e | ||
![]() |
7e1bb42bb7 | ||
![]() |
251a43216e | ||
![]() |
4801b9903c | ||
![]() |
cd5a09938f | ||
![]() |
14bf834224 | ||
![]() |
8aec943a5c | ||
![]() |
d817e75d98 | ||
![]() |
fbd8abdcd5 | ||
![]() |
ca02977505 | ||
![]() |
6533b57c6d | ||
![]() |
0a818282d3 | ||
![]() |
ce2f5f9f7a | ||
![]() |
01f767e66c | ||
![]() |
106ab924e3 | ||
![]() |
d031594bf9 | ||
![]() |
f2f146063b | ||
![]() |
5abe7a3fb9 | ||
![]() |
f592971b6e | ||
![]() |
ed2caa0d81 | ||
![]() |
0b04c90b1f | ||
![]() |
2eac4b8d9b | ||
![]() |
143a358b0c | ||
![]() |
fa049066fc | ||
![]() |
3877dcf355 | ||
![]() |
bfa7443ae2 | ||
![]() |
253962df87 | ||
![]() |
f8fbee68f4 | ||
![]() |
3c5d4037f7 | ||
![]() |
772709dd75 | ||
![]() |
bcfd76d33c | ||
![]() |
2bbe7e7dc1 | ||
![]() |
dbcd090244 | ||
![]() |
a0a1fd4875 | ||
![]() |
d978ec00aa | ||
![]() |
e40963a686 | ||
![]() |
55ec1a84fa | ||
![]() |
cf154b57f3 | ||
![]() |
ebf4daf4cc | ||
![]() |
40e8f411ff | ||
![]() |
421b380043 | ||
![]() |
5ebf2068b2 | ||
![]() |
e5fc6846e0 | ||
![]() |
906c4e03fb | ||
![]() |
02c8baef68 | ||
![]() |
a14917e017 | ||
![]() |
7e5b2673dc | ||
![]() |
d31895123e | ||
![]() |
6c1456902e | ||
![]() |
03bed162f4 | ||
![]() |
f798e75e30 | ||
![]() |
710f8570d2 | ||
![]() |
4dfd11ffb4 | ||
![]() |
4e4368debb | ||
![]() |
30c7ddf4ef | ||
![]() |
7186f5a8c0 | ||
![]() |
f52d1c4509 | ||
![]() |
4dbece8e8e | ||
![]() |
f731c630a6 | ||
![]() |
0ac96c207e | ||
![]() |
e2a29b7290 | ||
![]() |
f107a73e28 | ||
![]() |
2c68e5801f | ||
![]() |
91502a0727 | ||
![]() |
872f1d0ae3 | ||
![]() |
3c4240a8a8 | ||
![]() |
7a470bb3ac | ||
![]() |
766a9af54e | ||
![]() |
ca303a62f2 | ||
![]() |
90030d3a28 | ||
![]() |
0ed48a7741 | ||
![]() |
a33d765776 | ||
![]() |
6bb4f0e369 | ||
![]() |
56a9f64730 | ||
![]() |
d5eb66bc0d | ||
![]() |
40343089b5 | ||
![]() |
1b887e38d6 | ||
![]() |
ba96f99cde | ||
![]() |
b7f5cc868b | ||
![]() |
c8343fdfb0 | ||
![]() |
91e4bf1676 | ||
![]() |
6dba8d4ef9 | ||
![]() |
65eaed4f90 | ||
![]() |
8233083392 | ||
![]() |
106378d1d0 | ||
![]() |
01d18d5ff3 | ||
![]() |
6d23f3bd1c | ||
![]() |
ef96579a29 | ||
![]() |
44f0a9f21a | ||
![]() |
d854307acb | ||
![]() |
334b41de71 | ||
![]() |
1da50eab7a | ||
![]() |
b119a42f4d | ||
![]() |
99aa438817 | ||
![]() |
99fa91f480 | ||
![]() |
93969d264d | ||
![]() |
711e199977 | ||
![]() |
4e645332c3 | ||
![]() |
df8afb3337 | ||
![]() |
255a33fc08 | ||
![]() |
d15b6f0294 | ||
![]() |
1aa24e40ae | ||
![]() |
c0bde4a488 | ||
![]() |
2a09b70294 | ||
![]() |
e35b0a54c1 | ||
![]() |
8287330c67 | ||
![]() |
6b16da93cd | ||
![]() |
c1cd9bba45 | ||
![]() |
e33420f26e | ||
![]() |
abd9683e11 | ||
![]() |
8cbeabbe21 | ||
![]() |
df7d988d2f | ||
![]() |
544c009b9c | ||
![]() |
b2e0babc60 | ||
![]() |
f7c79cbd3a | ||
![]() |
587e9618da | ||
![]() |
cb2dd3b81c | ||
![]() |
8d4dd7de3f | ||
![]() |
6927c989d0 | ||
![]() |
97853d1691 | ||
![]() |
0cdef0d118 | ||
![]() |
0b17ffc243 | ||
![]() |
c516d46f16 | ||
![]() |
cb8ec22b6d | ||
![]() |
4a5fbd79c1 | ||
![]() |
b636a03567 | ||
![]() |
c96faf7c0a | ||
![]() |
2e1cd4076a | ||
![]() |
9984a638ba | ||
![]() |
a492bccc03 | ||
![]() |
e7a0e0f565 | ||
![]() |
030e081d45 | ||
![]() |
8537536368 | ||
![]() |
f03f323aac | ||
![]() |
58c0c67796 | ||
![]() |
f5e196a663 | ||
![]() |
808df68e57 | ||
![]() |
fa51c2e6e9 | ||
![]() |
ba3760e770 | ||
![]() |
ad1a8557b8 | ||
![]() |
fe91f812d9 | ||
![]() |
4cc11305c7 | ||
![]() |
898c0330c8 | ||
![]() |
33e5f94f1f | ||
![]() |
da4ee63890 | ||
![]() |
d34203b133 | ||
![]() |
23addfb9a6 | ||
![]() |
81e1227a7b | ||
![]() |
75be8666a6 | ||
![]() |
6031a60084 | ||
![]() |
39d5785118 | ||
![]() |
bddcdcadb2 | ||
![]() |
3eac6a3366 | ||
![]() |
3c7b962cf9 | ||
![]() |
bd756e2a9c | ||
![]() |
e7920bee2a | ||
![]() |
ebcc21370e | ||
![]() |
34c4acf199 | ||
![]() |
47e45dfc9f | ||
![]() |
2ecea7c1b4 | ||
![]() |
5c0eccd12f | ||
![]() |
f34ab9402b | ||
![]() |
2569a82caf | ||
![]() |
4bdd256000 | ||
![]() |
6f4f6338c5 | ||
![]() |
7cb72b55a8 | ||
![]() |
1a9a08cbfb | ||
![]() |
237ee0363d | ||
![]() |
86180ddc34 | ||
![]() |
eed41d30ec | ||
![]() |
0b0fd6b910 | ||
![]() |
1f887b47ab | ||
![]() |
affd8057ca | ||
![]() |
7a8ee2c46a | ||
![]() |
35fe1f464c | ||
![]() |
0955bafebd | ||
![]() |
2e0c540c63 | ||
![]() |
6e9ef17a28 | ||
![]() |
eb3cdbfeb9 | ||
![]() |
f4cb16ad09 | ||
![]() |
956af2bd62 | ||
![]() |
b76cd5c004 | ||
![]() |
61d9301dcc | ||
![]() |
2ded05be83 | ||
![]() |
899d6766c5 | ||
![]() |
c67d57cef4 | ||
![]() |
b5cca7d341 | ||
![]() |
8919f13911 | ||
![]() |
990ae49608 | ||
![]() |
c2ba02722c | ||
![]() |
5bd1957337 | ||
![]() |
f59f0793bc | ||
![]() |
63b96700e0 | ||
![]() |
dffbcc2c7e | ||
![]() |
0dbe1ecc2a | ||
![]() |
da8526fcec | ||
![]() |
933b6f4d1e | ||
![]() |
16f2dfeebd | ||
![]() |
bc6eb5cab4 | ||
![]() |
8833845b2e | ||
![]() |
391be6afac | ||
![]() |
a4f74676b6 | ||
![]() |
600b32f75b | ||
![]() |
f199a5cf95 | ||
![]() |
5896fde441 | ||
![]() |
9998f9720f | ||
![]() |
f37589daa6 | ||
![]() |
ce2513f175 | ||
![]() |
1a4c5d24a4 | ||
![]() |
886d202f39 | ||
![]() |
5a42019ed7 | ||
![]() |
354093c121 | ||
![]() |
aa9c300d7c | ||
![]() |
d9ad5daae3 | ||
![]() |
4680ba6d0d | ||
![]() |
1423062ac3 | ||
![]() |
a036096684 | ||
![]() |
a1c443a6f2 | ||
![]() |
e6e1367cd6 | ||
![]() |
303e741289 |
9
.dockerignore
Normal file
9
.dockerignore
Normal file
@@ -0,0 +1,9 @@
|
||||
# General files
|
||||
.git
|
||||
.github
|
||||
|
||||
# Test related files
|
||||
.tox
|
||||
|
||||
# Temporary files
|
||||
**/__pycache__
|
3
.gitmodules
vendored
Normal file
3
.gitmodules
vendored
Normal file
@@ -0,0 +1,3 @@
|
||||
[submodule "home-assistant-polymer"]
|
||||
path = home-assistant-polymer
|
||||
url = https://github.com/home-assistant/home-assistant-polymer
|
@@ -2,7 +2,7 @@ sudo: false
|
||||
matrix:
|
||||
fast_finish: true
|
||||
include:
|
||||
- python: "3.5"
|
||||
- python: "3.6"
|
||||
|
||||
cache:
|
||||
directories:
|
||||
|
403
API.md
403
API.md
@@ -1,10 +1,11 @@
|
||||
# HassIO Server
|
||||
# Hass.io Server
|
||||
|
||||
## HassIO REST API
|
||||
## Hass.io RESTful API
|
||||
|
||||
Interface for HomeAssistant to control things from supervisor.
|
||||
Interface for Home Assistant to control things from supervisor.
|
||||
|
||||
On error / Code 400:
|
||||
|
||||
On error:
|
||||
```json
|
||||
{
|
||||
"result": "error",
|
||||
@@ -12,7 +13,8 @@ On error:
|
||||
}
|
||||
```
|
||||
|
||||
On success
|
||||
On success / Code 200:
|
||||
|
||||
```json
|
||||
{
|
||||
"result": "ok",
|
||||
@@ -20,10 +22,11 @@ On success
|
||||
}
|
||||
```
|
||||
|
||||
### HassIO
|
||||
For access to API you need set the `X-HASSIO-KEY` they will be available for Add-ons/HomeAssistant with envoriment `HASSIO_TOKEN`.
|
||||
|
||||
### Hass.io
|
||||
|
||||
- GET `/supervisor/ping`
|
||||
|
||||
- GET `/supervisor/info`
|
||||
|
||||
The addons from `addons` are only installed one.
|
||||
@@ -32,16 +35,21 @@ The addons from `addons` are only installed one.
|
||||
{
|
||||
"version": "INSTALL_VERSION",
|
||||
"last_version": "LAST_VERSION",
|
||||
"arch": "armhf|aarch64|i386|amd64",
|
||||
"beta_channel": "true|false",
|
||||
"timezone": "TIMEZONE",
|
||||
"wait_boot": "int",
|
||||
"addons": [
|
||||
{
|
||||
"name": "xy bla",
|
||||
"slug": "xy",
|
||||
"description": "description",
|
||||
"repository": "12345678|null",
|
||||
"version": "LAST_VERSION",
|
||||
"installed": "INSTALL_VERSION",
|
||||
"detached": "bool",
|
||||
"description": "description"
|
||||
"icon": "bool",
|
||||
"logo": "bool",
|
||||
"state": "started|stopped",
|
||||
}
|
||||
],
|
||||
"addons_repositories": [
|
||||
@@ -50,37 +58,10 @@ The addons from `addons` are only installed one.
|
||||
}
|
||||
```
|
||||
|
||||
- GET `/supervisor/addons`
|
||||
|
||||
Get all available addons
|
||||
|
||||
```json
|
||||
{
|
||||
"addons": [
|
||||
{
|
||||
"name": "xy bla",
|
||||
"slug": "xy",
|
||||
"repository": "core|local|REP_ID",
|
||||
"version": "LAST_VERSION",
|
||||
"installed": "none|INSTALL_VERSION",
|
||||
"detached": "bool",
|
||||
"description": "description"
|
||||
}
|
||||
],
|
||||
"repositories": [
|
||||
{
|
||||
"slug": "12345678",
|
||||
"name": "Repitory Name",
|
||||
"source": "URL_OF_REPOSITORY",
|
||||
"url": "null|WEBSITE",
|
||||
"maintainer": "null|BLA BLU <fla@dld.ch>"
|
||||
}
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
- POST `/supervisor/update`
|
||||
|
||||
Optional:
|
||||
|
||||
```json
|
||||
{
|
||||
"version": "VERSION"
|
||||
@@ -88,9 +69,12 @@ Optional:
|
||||
```
|
||||
|
||||
- POST `/supervisor/options`
|
||||
|
||||
```json
|
||||
{
|
||||
"beta_channel": "true|false",
|
||||
"timezone": "TIMEZONE",
|
||||
"wait_boot": "int",
|
||||
"addons_repositories": [
|
||||
"REPO_URL"
|
||||
]
|
||||
@@ -103,64 +87,195 @@ Reload addons/version.
|
||||
|
||||
- GET `/supervisor/logs`
|
||||
|
||||
Output the raw docker log
|
||||
Output is the raw docker log.
|
||||
|
||||
- GET `/supervisor/stats`
|
||||
```json
|
||||
{
|
||||
"cpu_percent": 0.0,
|
||||
"memory_usage": 283123,
|
||||
"memory_limit": 329392,
|
||||
"network_tx": 0,
|
||||
"network_rx": 0,
|
||||
"blk_read": 0,
|
||||
"blk_write": 0
|
||||
}
|
||||
```
|
||||
|
||||
### Snapshot
|
||||
|
||||
- GET `/snapshots`
|
||||
|
||||
```json
|
||||
{
|
||||
"snapshots": [
|
||||
{
|
||||
"slug": "SLUG",
|
||||
"date": "ISO",
|
||||
"name": "Custom name",
|
||||
"type": "full|partial"
|
||||
}
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
- POST `/snapshots/reload`
|
||||
|
||||
- POST `/snapshots/new/full`
|
||||
|
||||
```json
|
||||
{
|
||||
"name": "Optional"
|
||||
}
|
||||
```
|
||||
|
||||
- POST `/snapshots/new/partial`
|
||||
|
||||
```json
|
||||
{
|
||||
"name": "Optional",
|
||||
"addons": ["ADDON_SLUG"],
|
||||
"folders": ["FOLDER_NAME"]
|
||||
}
|
||||
```
|
||||
|
||||
- POST `/snapshots/reload`
|
||||
|
||||
- GET `/snapshots/{slug}/info`
|
||||
|
||||
```json
|
||||
{
|
||||
"slug": "SNAPSHOT ID",
|
||||
"type": "full|partial",
|
||||
"name": "custom snapshot name / description",
|
||||
"date": "ISO",
|
||||
"size": "SIZE_IN_MB",
|
||||
"homeassistant": "version",
|
||||
"addons": [
|
||||
{
|
||||
"slug": "ADDON_SLUG",
|
||||
"name": "NAME",
|
||||
"version": "INSTALLED_VERSION"
|
||||
}
|
||||
],
|
||||
"repositories": ["URL"],
|
||||
"folders": ["NAME"]
|
||||
}
|
||||
```
|
||||
|
||||
- POST `/snapshots/{slug}/remove`
|
||||
- POST `/snapshots/{slug}/restore/full`
|
||||
- POST `/snapshots/{slug}/restore/partial`
|
||||
|
||||
```json
|
||||
{
|
||||
"homeassistant": "bool",
|
||||
"addons": ["ADDON_SLUG"],
|
||||
"folders": ["FOLDER_NAME"]
|
||||
}
|
||||
```
|
||||
|
||||
### Host
|
||||
|
||||
- POST `/host/reload`
|
||||
- POST `/host/shutdown`
|
||||
|
||||
- POST `/host/reboot`
|
||||
|
||||
- GET `/host/info`
|
||||
See HostControl info command.
|
||||
|
||||
```json
|
||||
{
|
||||
"type": "",
|
||||
"version": "",
|
||||
"last_version": "",
|
||||
"features": ["shutdown", "reboot", "update", "network_info", "network_control"],
|
||||
"features": ["shutdown", "reboot", "update", "hostname", "network_info", "network_control"],
|
||||
"hostname": "",
|
||||
"os": ""
|
||||
"os": "",
|
||||
"audio": {
|
||||
"input": "0,0",
|
||||
"output": "0,0"
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
- POST `/host/options`
|
||||
|
||||
```json
|
||||
{
|
||||
"audio_input": "0,0",
|
||||
"audio_output": "0,0"
|
||||
}
|
||||
```
|
||||
|
||||
- POST `/host/update`
|
||||
|
||||
Optional:
|
||||
|
||||
```json
|
||||
{
|
||||
"version": "VERSION"
|
||||
}
|
||||
```
|
||||
|
||||
- GET `/host/hardware`
|
||||
```json
|
||||
{
|
||||
"serial": ["/dev/xy"],
|
||||
"input": ["Input device name"],
|
||||
"disk": ["/dev/sdax"],
|
||||
"gpio": ["gpiochip0", "gpiochip100"],
|
||||
"audio": {
|
||||
"CARD_ID": {
|
||||
"name": "xy",
|
||||
"type": "microphone",
|
||||
"devices": {
|
||||
"DEV_ID": "type of device"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
- POST `/host/reload`
|
||||
|
||||
### Network
|
||||
|
||||
- GET `/network/info`
|
||||
|
||||
- POST `/network/options`
|
||||
```json
|
||||
{
|
||||
"hostname": "",
|
||||
"mode": "dhcp|fixed",
|
||||
"ssid": "",
|
||||
"ip": "",
|
||||
"netmask": "",
|
||||
"gateway": ""
|
||||
"hostname": ""
|
||||
}
|
||||
```
|
||||
|
||||
### HomeAssistant
|
||||
- POST `/network/options`
|
||||
|
||||
```json
|
||||
{
|
||||
"hostname": "",
|
||||
}
|
||||
```
|
||||
|
||||
### Home Assistant
|
||||
|
||||
- GET `/homeassistant/info`
|
||||
|
||||
```json
|
||||
{
|
||||
"version": "INSTALL_VERSION",
|
||||
"last_version": "LAST_VERSION"
|
||||
"last_version": "LAST_VERSION",
|
||||
"image": "str",
|
||||
"custom": "bool -> if custom image",
|
||||
"boot": "bool",
|
||||
"port": 8123,
|
||||
"ssl": "bool",
|
||||
"watchdog": "bool"
|
||||
}
|
||||
```
|
||||
|
||||
- POST `/homeassistant/update`
|
||||
|
||||
Optional:
|
||||
|
||||
```json
|
||||
{
|
||||
"version": "VERSION"
|
||||
@@ -169,60 +284,190 @@ Optional:
|
||||
|
||||
- GET `/homeassistant/logs`
|
||||
|
||||
Output the raw docker log
|
||||
Output is the raw Docker log.
|
||||
|
||||
### REST API addons
|
||||
- POST `/homeassistant/restart`
|
||||
- POST `/homeassistant/check`
|
||||
- POST `/homeassistant/start`
|
||||
- POST `/homeassistant/stop`
|
||||
|
||||
- POST `/homeassistant/options`
|
||||
|
||||
- GET `/addons/{addon}/info`
|
||||
```json
|
||||
{
|
||||
"version": "VERSION",
|
||||
"last_version": "LAST_VERSION",
|
||||
"state": "started|stopped",
|
||||
"boot": "auto|manual",
|
||||
"options": {},
|
||||
"image": "Optional|null",
|
||||
"last_version": "Optional for custom image|null",
|
||||
"port": "port for access hass",
|
||||
"ssl": "bool",
|
||||
"password": "",
|
||||
"watchdog": "bool"
|
||||
}
|
||||
```
|
||||
|
||||
Image with `null` and last_version with `null` reset this options.
|
||||
|
||||
- POST/GET `/homeassistant/api`
|
||||
|
||||
Proxy to real home-assistant instance.
|
||||
|
||||
- GET `/homeassistant/websocket`
|
||||
|
||||
Proxy to real websocket instance.
|
||||
|
||||
- GET `/homeassistant/stats`
|
||||
```json
|
||||
{
|
||||
"cpu_percent": 0.0,
|
||||
"memory_usage": 283123,
|
||||
"memory_limit": 329392,
|
||||
"network_tx": 0,
|
||||
"network_rx": 0,
|
||||
"blk_read": 0,
|
||||
"blk_write": 0
|
||||
}
|
||||
```
|
||||
|
||||
### RESTful for API addons
|
||||
|
||||
- GET `/addons`
|
||||
|
||||
Get all available addons.
|
||||
|
||||
```json
|
||||
{
|
||||
"addons": [
|
||||
{
|
||||
"name": "xy bla",
|
||||
"slug": "xy",
|
||||
"description": "description",
|
||||
"arch": ["armhf", "aarch64", "i386", "amd64"],
|
||||
"repository": "core|local|REP_ID",
|
||||
"version": "LAST_VERSION",
|
||||
"installed": "none|INSTALL_VERSION",
|
||||
"detached": "bool",
|
||||
"build": "bool",
|
||||
"url": "null|url",
|
||||
"icon": "bool",
|
||||
"logo": "bool"
|
||||
}
|
||||
],
|
||||
"repositories": [
|
||||
{
|
||||
"slug": "12345678",
|
||||
"name": "Repitory Name|unknown",
|
||||
"source": "URL_OF_REPOSITORY",
|
||||
"url": "WEBSITE|REPOSITORY",
|
||||
"maintainer": "BLA BLU <fla@dld.ch>|unknown"
|
||||
}
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
- POST `/addons/reload`
|
||||
- GET `/addons/{addon}/info`
|
||||
|
||||
```json
|
||||
{
|
||||
"name": "xy bla",
|
||||
"description": "description",
|
||||
"long_description": "null|markdown",
|
||||
"auto_update": "bool",
|
||||
"url": "null|url of addon",
|
||||
"detached": "bool",
|
||||
"repository": "12345678|null",
|
||||
"version": "null|VERSION_INSTALLED",
|
||||
"last_version": "LAST_VERSION",
|
||||
"state": "none|started|stopped",
|
||||
"boot": "auto|manual",
|
||||
"build": "bool",
|
||||
"options": "{}",
|
||||
"network": "{}|null",
|
||||
"host_network": "bool",
|
||||
"host_ipc": "bool",
|
||||
"host_dbus": "bool",
|
||||
"privileged": ["NET_ADMIN", "SYS_ADMIN"],
|
||||
"devices": ["/dev/xy"],
|
||||
"auto_uart": "bool",
|
||||
"icon": "bool",
|
||||
"logo": "bool",
|
||||
"changelog": "bool",
|
||||
"hassio_api": "bool",
|
||||
"homeassistant_api": "bool",
|
||||
"stdin": "bool",
|
||||
"webui": "null|http(s)://[HOST]:port/xy/zx",
|
||||
"gpio": "bool",
|
||||
"audio": "bool",
|
||||
"audio_input": "null|0,0",
|
||||
"audio_output": "null|0,0"
|
||||
}
|
||||
```
|
||||
|
||||
- GET `/addons/{addon}/icon`
|
||||
|
||||
- GET `/addons/{addon}/logo`
|
||||
|
||||
- GET `/addons/{addon}/changelog`
|
||||
|
||||
- POST `/addons/{addon}/options`
|
||||
|
||||
```json
|
||||
{
|
||||
"boot": "auto|manual",
|
||||
"auto_update": "bool",
|
||||
"network": {
|
||||
"CONTAINER": "port|[ip, port]"
|
||||
},
|
||||
"options": {},
|
||||
"audio_output": "null|0,0",
|
||||
"audio_input": "null|0,0"
|
||||
}
|
||||
```
|
||||
|
||||
Reset custom network/audio/options, set it `null`.
|
||||
|
||||
- POST `/addons/{addon}/start`
|
||||
|
||||
- POST `/addons/{addon}/stop`
|
||||
|
||||
- POST `/addons/{addon}/install`
|
||||
Optional:
|
||||
```json
|
||||
{
|
||||
"version": "VERSION"
|
||||
}
|
||||
```
|
||||
|
||||
- POST `/addons/{addon}/uninstall`
|
||||
|
||||
- POST `/addons/{addon}/update`
|
||||
Optional:
|
||||
```json
|
||||
{
|
||||
"version": "VERSION"
|
||||
}
|
||||
```
|
||||
|
||||
- GET `/addons/{addon}/logs`
|
||||
|
||||
Output the raw docker log
|
||||
Output is the raw Docker log.
|
||||
|
||||
- POST `/addons/{addon}/restart`
|
||||
|
||||
- POST `/addons/{addon}/rebuild`
|
||||
|
||||
Only supported for local build addons
|
||||
|
||||
- POST `/addons/{addon}/stdin`
|
||||
|
||||
Write data to add-on stdin
|
||||
|
||||
- GET `/addons/{addon}/stats`
|
||||
```json
|
||||
{
|
||||
"cpu_percent": 0.0,
|
||||
"memory_usage": 283123,
|
||||
"memory_limit": 329392,
|
||||
"network_tx": 0,
|
||||
"network_rx": 0,
|
||||
"blk_read": 0,
|
||||
"blk_write": 0
|
||||
}
|
||||
```
|
||||
|
||||
## Host Control
|
||||
|
||||
Communicate over unix socket with a host daemon.
|
||||
Communicate over UNIX socket with a host daemon.
|
||||
|
||||
- commands
|
||||
|
||||
```
|
||||
# info
|
||||
-> {'type', 'version', 'last_version', 'features', 'hostname'}
|
||||
@@ -230,8 +475,10 @@ Communicate over unix socket with a host daemon.
|
||||
# shutdown
|
||||
# host-update [v]
|
||||
|
||||
# hostname xy
|
||||
|
||||
# network info
|
||||
# network hostname xy
|
||||
-> {}
|
||||
# network wlan ssd xy
|
||||
# network wlan password xy
|
||||
# network int ip xy
|
||||
@@ -239,10 +486,12 @@ Communicate over unix socket with a host daemon.
|
||||
# network int route xy
|
||||
```
|
||||
|
||||
features:
|
||||
Features:
|
||||
|
||||
- shutdown
|
||||
- reboot
|
||||
- update
|
||||
- hostname
|
||||
- network_info
|
||||
- network_control
|
||||
|
||||
|
27
Dockerfile
Normal file
27
Dockerfile
Normal file
@@ -0,0 +1,27 @@
|
||||
ARG BUILD_FROM
|
||||
FROM $BUILD_FROM
|
||||
|
||||
# Add env
|
||||
ENV LANG C.UTF-8
|
||||
|
||||
# Setup base
|
||||
RUN apk add --no-cache \
|
||||
python3 \
|
||||
git \
|
||||
socat \
|
||||
libstdc++ \
|
||||
&& apk add --no-cache --virtual .build-dependencies \
|
||||
make \
|
||||
python3-dev \
|
||||
g++ \
|
||||
&& pip3 install --no-cache-dir \
|
||||
uvloop \
|
||||
cchardet \
|
||||
&& apk del .build-dependencies
|
||||
|
||||
# Install HassIO
|
||||
COPY . /usr/src/hassio
|
||||
RUN pip3 install --no-cache-dir /usr/src/hassio \
|
||||
&& rm -rf /usr/src/hassio
|
||||
|
||||
CMD [ "python3", "-m", "hassio" ]
|
218
LICENSE
218
LICENSE
@@ -1,29 +1,201 @@
|
||||
BSD 3-Clause License
|
||||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
Copyright (c) 2017, Pascal Vizeli
|
||||
All rights reserved.
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are met:
|
||||
1. Definitions.
|
||||
|
||||
* Redistributions of source code must retain the above copyright notice, this
|
||||
list of conditions and the following disclaimer.
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
* Redistributions in binary form must reproduce the above copyright notice,
|
||||
this list of conditions and the following disclaimer in the documentation
|
||||
and/or other materials provided with the distribution.
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
* Neither the name of the copyright holder nor the names of its
|
||||
contributors may be used to endorse or promote products derived from
|
||||
this software without specific prior written permission.
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
|
||||
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
|
||||
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
|
||||
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
|
||||
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
|
||||
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
APPENDIX: How to apply the Apache License to your work.
|
||||
|
||||
To apply the Apache License to your work, attach the following
|
||||
boilerplate notice, with the fields enclosed by brackets "{}"
|
||||
replaced with your own identifying information. (Don't include
|
||||
the brackets!) The text should be enclosed in the appropriate
|
||||
comment syntax for the file format. We also recommend that a
|
||||
file or class name and description of purpose be included on the
|
||||
same "printed page" as the copyright notice for easier
|
||||
identification within third-party archives.
|
||||
|
||||
Copyright 2017 Pascal Vizeli
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
|
3
MANIFEST.in
Normal file
3
MANIFEST.in
Normal file
@@ -0,0 +1,3 @@
|
||||
include LICENSE.md
|
||||
graft hassio
|
||||
recursive-exclude * *.py[co]
|
36
README.md
36
README.md
@@ -1,30 +1,18 @@
|
||||
# HassIO
|
||||
First private cloud solution for home automation.
|
||||
# Hass.io
|
||||
|
||||
It is a docker image (supervisor) they manage HomeAssistant docker and give a interface to control itself over UI. It have a own eco system with addons to extend the functionality in a easy way.
|
||||
## First private cloud solution for home automation
|
||||
|
||||
Hass.io is a Docker-based system for managing your Home Assistant installation
|
||||
and related applications. The system is controlled via Home Assistant which
|
||||
communicates with the Supervisor. The Supervisor provides an API to manage the
|
||||
installation. This includes changing network settings or installing
|
||||
and updating software.
|
||||
|
||||

|
||||
|
||||
[HassIO-Addons](https://github.com/home-assistant/hassio-addons) | [HassIO-Build](https://github.com/home-assistant/hassio-build)
|
||||
- [Hass.io Addons](https://github.com/home-assistant/hassio-addons)
|
||||
- [Hass.io Build](https://github.com/home-assistant/hassio-build)
|
||||
|
||||
**HassIO is at the moment on development and not ready to use productive!**
|
||||
## Installation
|
||||
|
||||
## Feature in progress
|
||||
- Backup/Restore
|
||||
- DHCP-Server addon
|
||||
|
||||
# HomeAssistant
|
||||
|
||||
## SSL
|
||||
|
||||
All addons they can create SSL certs do that in same schema. So you can put follow lines to your `configuration.yaml`.
|
||||
```yaml
|
||||
http:
|
||||
ssl_certificate: /ssl/fullchain.pem
|
||||
ssl_key: /ssl/privkey.pem
|
||||
```
|
||||
|
||||
## Install on a own System
|
||||
|
||||
- Generic Linux installation: https://github.com/home-assistant/hassio-build/tree/master/install
|
||||
- Hardware Images: https://github.com/home-assistant/hassio-build/blob/master/meta-hassio/
|
||||
Installation instructions can be found at <https://home-assistant.io/hassio>.
|
||||
|
@@ -1,5 +1,6 @@
|
||||
"""Main file for HassIO."""
|
||||
import asyncio
|
||||
from concurrent.futures import ThreadPoolExecutor
|
||||
import logging
|
||||
import sys
|
||||
|
||||
@@ -9,25 +10,48 @@ import hassio.core as core
|
||||
_LOGGER = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def attempt_use_uvloop():
|
||||
"""Attempt to use uvloop."""
|
||||
try:
|
||||
import uvloop
|
||||
asyncio.set_event_loop_policy(uvloop.EventLoopPolicy())
|
||||
except ImportError:
|
||||
pass
|
||||
|
||||
|
||||
# pylint: disable=invalid-name
|
||||
if __name__ == "__main__":
|
||||
bootstrap.initialize_logging()
|
||||
attempt_use_uvloop()
|
||||
loop = asyncio.get_event_loop()
|
||||
|
||||
if not bootstrap.check_environment():
|
||||
exit(1)
|
||||
sys.exit(1)
|
||||
|
||||
loop = asyncio.get_event_loop()
|
||||
hassio = core.HassIO(loop)
|
||||
# init executor pool
|
||||
executor = ThreadPoolExecutor(thread_name_prefix="SyncWorker")
|
||||
loop.set_default_executor(executor)
|
||||
|
||||
_LOGGER.info("Run Hassio setup")
|
||||
_LOGGER.info("Initialize Hassio setup")
|
||||
coresys = bootstrap.initialize_coresys(loop)
|
||||
hassio = core.HassIO(coresys)
|
||||
|
||||
bootstrap.migrate_system_env(coresys)
|
||||
|
||||
_LOGGER.info("Setup HassIO")
|
||||
loop.run_until_complete(hassio.setup())
|
||||
|
||||
_LOGGER.info("Start Hassio task")
|
||||
loop.call_soon_threadsafe(loop.create_task, hassio.start())
|
||||
loop.call_soon_threadsafe(bootstrap.reg_signal, loop, hassio)
|
||||
loop.call_soon_threadsafe(bootstrap.reg_signal, loop)
|
||||
|
||||
loop.run_forever()
|
||||
loop.close()
|
||||
try:
|
||||
_LOGGER.info("Run HassIO")
|
||||
loop.run_forever()
|
||||
finally:
|
||||
_LOGGER.info("Stopping HassIO")
|
||||
loop.run_until_complete(hassio.stop())
|
||||
executor.shutdown(wait=False)
|
||||
loop.close()
|
||||
|
||||
_LOGGER.info("Close Hassio")
|
||||
sys.exit(hassio.exit_code)
|
||||
sys.exit(0)
|
||||
|
@@ -1,206 +1,132 @@
|
||||
"""Init file for HassIO addons."""
|
||||
import asyncio
|
||||
import logging
|
||||
import shutil
|
||||
|
||||
from .data import AddonsData
|
||||
from .git import AddonsRepoHassIO, AddonsRepoCustom
|
||||
from ..const import STATE_STOPPED, STATE_STARTED
|
||||
from ..dock.addon import DockerAddon
|
||||
from .addon import Addon
|
||||
from .repository import Repository
|
||||
from .data import Data
|
||||
from ..const import REPOSITORY_CORE, REPOSITORY_LOCAL, BOOT_AUTO
|
||||
from ..coresys import CoreSysAttributes
|
||||
|
||||
_LOGGER = logging.getLogger(__name__)
|
||||
|
||||
BUILTIN_REPOSITORIES = set((REPOSITORY_CORE, REPOSITORY_LOCAL))
|
||||
|
||||
class AddonManager(AddonsData):
|
||||
|
||||
class AddonManager(CoreSysAttributes):
|
||||
"""Manage addons inside HassIO."""
|
||||
|
||||
def __init__(self, config, loop, dock):
|
||||
def __init__(self, coresys):
|
||||
"""Initialize docker base wrapper."""
|
||||
super().__init__(config)
|
||||
self.coresys = coresys
|
||||
self.data = Data(coresys)
|
||||
self.addons_obj = {}
|
||||
self.repositories_obj = {}
|
||||
|
||||
self.loop = loop
|
||||
self.dock = dock
|
||||
self.repositories = []
|
||||
self.dockers = {}
|
||||
@property
|
||||
def list_addons(self):
|
||||
"""Return a list of all addons."""
|
||||
return list(self.addons_obj.values())
|
||||
|
||||
async def prepare(self, arch):
|
||||
@property
|
||||
def list_repositories(self):
|
||||
"""Return list of addon repositories."""
|
||||
return list(self.repositories_obj.values())
|
||||
|
||||
def get(self, addon_slug):
|
||||
"""Return a adddon from slug."""
|
||||
return self.addons_obj.get(addon_slug)
|
||||
|
||||
async def load(self):
|
||||
"""Startup addon management."""
|
||||
self.arch = arch
|
||||
self.data.reload()
|
||||
|
||||
# init hassio repository
|
||||
self.repositories.append(AddonsRepoHassIO(self.config, self.loop))
|
||||
# init hassio built-in repositories
|
||||
repositories = \
|
||||
set(self._config.addons_repositories) | BUILTIN_REPOSITORIES
|
||||
|
||||
# init custom repositories
|
||||
for url in self.config.addons_repositories:
|
||||
self.repositories.append(
|
||||
AddonsRepoCustom(self.config, self.loop, url))
|
||||
|
||||
# load addon repository
|
||||
tasks = [addon.load() for addon in self.repositories]
|
||||
if tasks:
|
||||
await asyncio.wait(tasks, loop=self.loop)
|
||||
|
||||
# read data from repositories
|
||||
self.read_data_from_repositories()
|
||||
self.merge_update_config()
|
||||
|
||||
# load installed addons
|
||||
for addon in self.list_installed:
|
||||
self.dockers[addon] = DockerAddon(
|
||||
self.config, self.loop, self.dock, self, addon)
|
||||
await self.dockers[addon].attach()
|
||||
|
||||
async def add_git_repository(self, url):
|
||||
"""Add a new custom repository."""
|
||||
if url in self.config.addons_repositories:
|
||||
_LOGGER.warning("Repository already exists %s", url)
|
||||
return False
|
||||
|
||||
repo = AddonsRepoCustom(self.config, self.loop, url)
|
||||
|
||||
if not await repo.load():
|
||||
_LOGGER.error("Can't load from repository %s", url)
|
||||
return False
|
||||
|
||||
self.config.addons_repositories = url
|
||||
self.repositories.append(repo)
|
||||
return True
|
||||
|
||||
def drop_git_repository(self, url):
|
||||
"""Remove a custom repository."""
|
||||
for repo in self.repositories:
|
||||
if repo.url == url:
|
||||
self.repositories.remove(repo)
|
||||
self.config.drop_addon_repository(url)
|
||||
repo.remove()
|
||||
return True
|
||||
|
||||
return False
|
||||
# init custom repositories & load addons
|
||||
await self.load_repositories(repositories)
|
||||
|
||||
async def reload(self):
|
||||
"""Update addons from repo and reload list."""
|
||||
tasks = [addon.pull() for addon in self.repositories]
|
||||
if not tasks:
|
||||
return
|
||||
|
||||
await asyncio.wait(tasks, loop=self.loop)
|
||||
tasks = [repository.update() for repository in
|
||||
self.repositories_obj.values()]
|
||||
if tasks:
|
||||
await asyncio.wait(tasks, loop=self._loop)
|
||||
|
||||
# read data from repositories
|
||||
self.read_data_from_repositories()
|
||||
self.merge_update_config()
|
||||
self.data.reload()
|
||||
|
||||
# remove stalled addons
|
||||
for addon in self.list_detached:
|
||||
_LOGGER.warning("Dedicated addon '%s' found!", addon)
|
||||
# update addons
|
||||
await self.load_addons()
|
||||
|
||||
async def auto_boot(self, start_type):
|
||||
"""Boot addons with mode auto."""
|
||||
boot_list = self.list_startup(start_type)
|
||||
tasks = [self.start(addon) for addon in boot_list]
|
||||
async def load_repositories(self, list_repositories):
|
||||
"""Add a new custom repository."""
|
||||
new_rep = set(list_repositories)
|
||||
old_rep = set(self.repositories_obj)
|
||||
|
||||
_LOGGER.info("Startup %s run %d addons", start_type, len(tasks))
|
||||
# add new repository
|
||||
async def _add_repository(url):
|
||||
"""Helper function to async add repository."""
|
||||
repository = Repository(self.coresys, url)
|
||||
if not await repository.load():
|
||||
_LOGGER.error("Can't load from repository %s", url)
|
||||
return
|
||||
self.repositories_obj[url] = repository
|
||||
|
||||
# don't add built-in repository to config
|
||||
if url not in BUILTIN_REPOSITORIES:
|
||||
self._config.add_addon_repository(url)
|
||||
|
||||
tasks = [_add_repository(url) for url in new_rep - old_rep]
|
||||
if tasks:
|
||||
await asyncio.wait(tasks, loop=self.loop)
|
||||
await asyncio.wait(tasks, loop=self._loop)
|
||||
|
||||
async def install(self, addon, version=None):
|
||||
"""Install a addon."""
|
||||
if not self.exists_addon(addon):
|
||||
_LOGGER.error("Addon %s not exists for install", addon)
|
||||
return False
|
||||
# del new repository
|
||||
for url in old_rep - new_rep - BUILTIN_REPOSITORIES:
|
||||
self.repositories_obj.pop(url).remove()
|
||||
self._config.drop_addon_repository(url)
|
||||
|
||||
if self.is_installed(addon):
|
||||
_LOGGER.error("Addon %s is already installed", addon)
|
||||
return False
|
||||
# update data
|
||||
self.data.reload()
|
||||
await self.load_addons()
|
||||
|
||||
if not self.path_data(addon).is_dir():
|
||||
_LOGGER.info("Create Home-Assistant addon data folder %s",
|
||||
self.path_data(addon))
|
||||
self.path_data(addon).mkdir()
|
||||
async def load_addons(self):
|
||||
"""Update/add internal addon store."""
|
||||
all_addons = set(self.data.system) | set(self.data.cache)
|
||||
|
||||
addon_docker = DockerAddon(
|
||||
self.config, self.loop, self.dock, self, addon)
|
||||
# calc diff
|
||||
add_addons = all_addons - set(self.addons_obj)
|
||||
del_addons = set(self.addons_obj) - all_addons
|
||||
|
||||
version = version or self.get_last_version(addon)
|
||||
if not await addon_docker.install(version):
|
||||
return False
|
||||
_LOGGER.info("Load addons: %d all - %d new - %d remove",
|
||||
len(all_addons), len(add_addons), len(del_addons))
|
||||
|
||||
self.dockers[addon] = addon_docker
|
||||
self.set_addon_install(addon, version)
|
||||
return True
|
||||
# new addons
|
||||
tasks = []
|
||||
for addon_slug in add_addons:
|
||||
addon = Addon(self.coresys, addon_slug)
|
||||
|
||||
async def uninstall(self, addon):
|
||||
"""Remove a addon."""
|
||||
if not self.is_installed(addon):
|
||||
_LOGGER.error("Addon %s is already uninstalled", addon)
|
||||
return False
|
||||
tasks.append(addon.load())
|
||||
self.addons_obj[addon_slug] = addon
|
||||
|
||||
if addon not in self.dockers:
|
||||
_LOGGER.error("No docker found for addon %s", addon)
|
||||
return False
|
||||
if tasks:
|
||||
await asyncio.wait(tasks, loop=self._loop)
|
||||
|
||||
if not await self.dockers[addon].remove():
|
||||
return False
|
||||
# remove
|
||||
for addon_slug in del_addons:
|
||||
self.addons_obj.pop(addon_slug)
|
||||
|
||||
if self.path_data(addon).is_dir():
|
||||
_LOGGER.info("Remove Home-Assistant addon data folder %s",
|
||||
self.path_data(addon))
|
||||
shutil.rmtree(str(self.path_data(addon)))
|
||||
async def auto_boot(self, stage):
|
||||
"""Boot addons with mode auto."""
|
||||
tasks = []
|
||||
for addon in self.addons_obj.values():
|
||||
if addon.is_installed and addon.boot == BOOT_AUTO and \
|
||||
addon.startup == stage:
|
||||
tasks.append(addon.start())
|
||||
|
||||
self.dockers.pop(addon)
|
||||
self.set_addon_uninstall(addon)
|
||||
return True
|
||||
|
||||
async def state(self, addon):
|
||||
"""Return running state of addon."""
|
||||
if addon not in self.dockers:
|
||||
_LOGGER.error("No docker found for addon %s", addon)
|
||||
return
|
||||
|
||||
if await self.dockers[addon].is_running():
|
||||
return STATE_STARTED
|
||||
return STATE_STOPPED
|
||||
|
||||
async def start(self, addon):
|
||||
"""Set options and start addon."""
|
||||
if addon not in self.dockers:
|
||||
_LOGGER.error("No docker found for addon %s", addon)
|
||||
return False
|
||||
|
||||
if not self.write_addon_options(addon):
|
||||
_LOGGER.error("Can't write options for addon %s", addon)
|
||||
return False
|
||||
|
||||
return await self.dockers[addon].run()
|
||||
|
||||
async def stop(self, addon):
|
||||
"""Stop addon."""
|
||||
if addon not in self.dockers:
|
||||
_LOGGER.error("No docker found for addon %s", addon)
|
||||
return False
|
||||
|
||||
return await self.dockers[addon].stop()
|
||||
|
||||
async def update(self, addon, version=None):
|
||||
"""Update addon."""
|
||||
if addon not in self.dockers:
|
||||
_LOGGER.error("No docker found for addon %s", addon)
|
||||
return False
|
||||
|
||||
version = version or self.get_last_version(addon)
|
||||
is_running = self.dockers[addon].is_running()
|
||||
|
||||
# update
|
||||
if await self.dockers[addon].update(version):
|
||||
self.set_addon_update(addon, version)
|
||||
if is_running:
|
||||
await self.start(addon)
|
||||
return True
|
||||
return False
|
||||
|
||||
async def logs(self, addon):
|
||||
"""Return addons log output."""
|
||||
if addon not in self.dockers:
|
||||
_LOGGER.error("No docker found for addon %s", addon)
|
||||
return False
|
||||
|
||||
return await self.dockers[addon].logs()
|
||||
_LOGGER.info("Startup %s run %d addons", stage, len(tasks))
|
||||
if tasks:
|
||||
await asyncio.wait(tasks, loop=self._loop)
|
||||
await asyncio.sleep(self._config.wait_boot, loop=self._loop)
|
||||
|
761
hassio/addons/addon.py
Normal file
761
hassio/addons/addon.py
Normal file
@@ -0,0 +1,761 @@
|
||||
"""Init file for HassIO addons."""
|
||||
from copy import deepcopy
|
||||
import logging
|
||||
import json
|
||||
from pathlib import Path, PurePath
|
||||
import re
|
||||
import shutil
|
||||
import tarfile
|
||||
from tempfile import TemporaryDirectory
|
||||
|
||||
import voluptuous as vol
|
||||
from voluptuous.humanize import humanize_error
|
||||
|
||||
from .validate import (
|
||||
validate_options, SCHEMA_ADDON_SNAPSHOT, RE_VOLUME)
|
||||
from .utils import check_installed
|
||||
from ..const import (
|
||||
ATTR_NAME, ATTR_VERSION, ATTR_SLUG, ATTR_DESCRIPTON, ATTR_BOOT, ATTR_MAP,
|
||||
ATTR_OPTIONS, ATTR_PORTS, ATTR_SCHEMA, ATTR_IMAGE, ATTR_REPOSITORY,
|
||||
ATTR_URL, ATTR_ARCH, ATTR_LOCATON, ATTR_DEVICES, ATTR_ENVIRONMENT,
|
||||
ATTR_HOST_NETWORK, ATTR_TMPFS, ATTR_PRIVILEGED, ATTR_STARTUP, ATTR_UUID,
|
||||
STATE_STARTED, STATE_STOPPED, STATE_NONE, ATTR_USER, ATTR_SYSTEM,
|
||||
ATTR_STATE, ATTR_TIMEOUT, ATTR_AUTO_UPDATE, ATTR_NETWORK, ATTR_WEBUI,
|
||||
ATTR_HASSIO_API, ATTR_AUDIO, ATTR_AUDIO_OUTPUT, ATTR_AUDIO_INPUT,
|
||||
ATTR_GPIO, ATTR_HOMEASSISTANT_API, ATTR_STDIN, ATTR_LEGACY, ATTR_HOST_IPC,
|
||||
ATTR_HOST_DBUS, ATTR_AUTO_UART)
|
||||
from ..coresys import CoreSysAttributes
|
||||
from ..docker.addon import DockerAddon
|
||||
from ..utils.json import write_json_file, read_json_file
|
||||
|
||||
_LOGGER = logging.getLogger(__name__)
|
||||
|
||||
RE_WEBUI = re.compile(
|
||||
r"^(?:(?P<s_prefix>https?)|\[PROTO:(?P<t_proto>\w+)\])"
|
||||
r":\/\/\[HOST\]:\[PORT:(?P<t_port>\d+)\](?P<s_suffix>.*)$")
|
||||
|
||||
|
||||
class Addon(CoreSysAttributes):
|
||||
"""Hold data for addon inside HassIO."""
|
||||
|
||||
def __init__(self, coresys, slug):
|
||||
"""Initialize data holder."""
|
||||
self.coresys = coresys
|
||||
self.instance = DockerAddon(coresys, slug)
|
||||
|
||||
self._id = slug
|
||||
|
||||
async def load(self):
|
||||
"""Async initialize of object."""
|
||||
if self.is_installed:
|
||||
await self.instance.attach()
|
||||
|
||||
@property
|
||||
def slug(self):
|
||||
"""Return slug/id of addon."""
|
||||
return self._id
|
||||
|
||||
@property
|
||||
def _mesh(self):
|
||||
"""Return addon data from system or cache."""
|
||||
return self._data.system.get(self._id, self._data.cache.get(self._id))
|
||||
|
||||
@property
|
||||
def _data(self):
|
||||
"""Return addons data storage."""
|
||||
return self._addons.data
|
||||
|
||||
@property
|
||||
def is_installed(self):
|
||||
"""Return True if a addon is installed."""
|
||||
return self._id in self._data.system
|
||||
|
||||
@property
|
||||
def is_detached(self):
|
||||
"""Return True if addon is detached."""
|
||||
return self._id not in self._data.cache
|
||||
|
||||
@property
|
||||
def version_installed(self):
|
||||
"""Return installed version."""
|
||||
return self._data.user.get(self._id, {}).get(ATTR_VERSION)
|
||||
|
||||
def _set_install(self, version):
|
||||
"""Set addon as installed."""
|
||||
self._data.system[self._id] = deepcopy(self._data.cache[self._id])
|
||||
self._data.user[self._id] = {
|
||||
ATTR_OPTIONS: {},
|
||||
ATTR_VERSION: version,
|
||||
}
|
||||
self._data.save_data()
|
||||
|
||||
def _set_uninstall(self):
|
||||
"""Set addon as uninstalled."""
|
||||
self._data.system.pop(self._id, None)
|
||||
self._data.user.pop(self._id, None)
|
||||
self._data.save_data()
|
||||
|
||||
def _set_update(self, version):
|
||||
"""Update version of addon."""
|
||||
self._data.system[self._id] = deepcopy(self._data.cache[self._id])
|
||||
self._data.user[self._id][ATTR_VERSION] = version
|
||||
self._data.save_data()
|
||||
|
||||
def _restore_data(self, user, system):
|
||||
"""Restore data to addon."""
|
||||
self._data.user[self._id] = deepcopy(user)
|
||||
self._data.system[self._id] = deepcopy(system)
|
||||
self._data.save_data()
|
||||
|
||||
@property
|
||||
def options(self):
|
||||
"""Return options with local changes."""
|
||||
if self.is_installed:
|
||||
return {
|
||||
**self._data.system[self._id][ATTR_OPTIONS],
|
||||
**self._data.user[self._id][ATTR_OPTIONS]
|
||||
}
|
||||
return self._data.cache[self._id][ATTR_OPTIONS]
|
||||
|
||||
@options.setter
|
||||
def options(self, value):
|
||||
"""Store user addon options."""
|
||||
if value is None:
|
||||
self._data.user[self._id][ATTR_OPTIONS] = {}
|
||||
else:
|
||||
self._data.user[self._id][ATTR_OPTIONS] = deepcopy(value)
|
||||
|
||||
@property
|
||||
def boot(self):
|
||||
"""Return boot config with prio local settings."""
|
||||
if ATTR_BOOT in self._data.user.get(self._id, {}):
|
||||
return self._data.user[self._id][ATTR_BOOT]
|
||||
return self._mesh[ATTR_BOOT]
|
||||
|
||||
@boot.setter
|
||||
def boot(self, value):
|
||||
"""Store user boot options."""
|
||||
self._data.user[self._id][ATTR_BOOT] = value
|
||||
|
||||
@property
|
||||
def auto_update(self):
|
||||
"""Return if auto update is enable."""
|
||||
if ATTR_AUTO_UPDATE in self._data.user.get(self._id, {}):
|
||||
return self._data.user[self._id][ATTR_AUTO_UPDATE]
|
||||
return None
|
||||
|
||||
@auto_update.setter
|
||||
def auto_update(self, value):
|
||||
"""Set auto update."""
|
||||
self._data.user[self._id][ATTR_AUTO_UPDATE] = value
|
||||
|
||||
@property
|
||||
def name(self):
|
||||
"""Return name of addon."""
|
||||
return self._mesh[ATTR_NAME]
|
||||
|
||||
@property
|
||||
def timeout(self):
|
||||
"""Return timeout of addon for docker stop."""
|
||||
return self._mesh[ATTR_TIMEOUT]
|
||||
|
||||
@property
|
||||
def uuid(self):
|
||||
"""Return a API token for this add-on."""
|
||||
if self.is_installed:
|
||||
return self._data.user[self._id][ATTR_UUID]
|
||||
return None
|
||||
|
||||
@property
|
||||
def description(self):
|
||||
"""Return description of addon."""
|
||||
return self._mesh[ATTR_DESCRIPTON]
|
||||
|
||||
@property
|
||||
def long_description(self):
|
||||
"""Return README.md as long_description."""
|
||||
readme = Path(self.path_location, 'README.md')
|
||||
|
||||
# If readme not exists
|
||||
if not readme.exists():
|
||||
return None
|
||||
|
||||
# Return data
|
||||
with readme.open('r') as readme_file:
|
||||
return readme_file.read()
|
||||
|
||||
@property
|
||||
def repository(self):
|
||||
"""Return repository of addon."""
|
||||
return self._mesh[ATTR_REPOSITORY]
|
||||
|
||||
@property
|
||||
def last_version(self):
|
||||
"""Return version of addon."""
|
||||
if self._id in self._data.cache:
|
||||
return self._data.cache[self._id][ATTR_VERSION]
|
||||
return self.version_installed
|
||||
|
||||
@property
|
||||
def startup(self):
|
||||
"""Return startup type of addon."""
|
||||
return self._mesh.get(ATTR_STARTUP)
|
||||
|
||||
@property
|
||||
def ports(self):
|
||||
"""Return ports of addon."""
|
||||
if self.host_network or ATTR_PORTS not in self._mesh:
|
||||
return None
|
||||
|
||||
if not self.is_installed or \
|
||||
ATTR_NETWORK not in self._data.user[self._id]:
|
||||
return self._mesh[ATTR_PORTS]
|
||||
return self._data.user[self._id][ATTR_NETWORK]
|
||||
|
||||
@ports.setter
|
||||
def ports(self, value):
|
||||
"""Set custom ports of addon."""
|
||||
if value is None:
|
||||
self._data.user[self._id].pop(ATTR_NETWORK, None)
|
||||
else:
|
||||
new_ports = {}
|
||||
for container_port, host_port in value.items():
|
||||
if container_port in self._mesh.get(ATTR_PORTS, {}):
|
||||
new_ports[container_port] = host_port
|
||||
|
||||
self._data.user[self._id][ATTR_NETWORK] = new_ports
|
||||
|
||||
@property
|
||||
def webui(self):
|
||||
"""Return URL to webui or None."""
|
||||
if ATTR_WEBUI not in self._mesh:
|
||||
return None
|
||||
webui = RE_WEBUI.match(self._mesh[ATTR_WEBUI])
|
||||
|
||||
# extract arguments
|
||||
t_port = webui.group('t_port')
|
||||
t_proto = webui.group('t_proto')
|
||||
s_prefix = webui.group('s_prefix') or ""
|
||||
s_suffix = webui.group('s_suffix') or ""
|
||||
|
||||
# search host port for this docker port
|
||||
if self.ports is None:
|
||||
port = t_port
|
||||
else:
|
||||
port = self.ports.get(f"{t_port}/tcp", t_port)
|
||||
|
||||
# for interface config or port lists
|
||||
if isinstance(port, (tuple, list)):
|
||||
port = port[-1]
|
||||
|
||||
# lookup the correct protocol from config
|
||||
if t_proto:
|
||||
proto = 'https' if self.options[t_proto] else 'http'
|
||||
else:
|
||||
proto = s_prefix
|
||||
|
||||
return f"{proto}://[HOST]:{port}{s_suffix}"
|
||||
|
||||
@property
|
||||
def host_network(self):
|
||||
"""Return True if addon run on host network."""
|
||||
return self._mesh[ATTR_HOST_NETWORK]
|
||||
|
||||
@property
|
||||
def host_ipc(self):
|
||||
"""Return True if addon run on host IPC namespace."""
|
||||
return self._mesh[ATTR_HOST_IPC]
|
||||
|
||||
@property
|
||||
def host_dbus(self):
|
||||
"""Return True if addon run on host DBUS."""
|
||||
return self._mesh[ATTR_HOST_DBUS]
|
||||
|
||||
@property
|
||||
def devices(self):
|
||||
"""Return devices of addon."""
|
||||
return self._mesh.get(ATTR_DEVICES)
|
||||
|
||||
@property
|
||||
def auto_uart(self):
|
||||
"""Return True if we should map all uart device."""
|
||||
return self._mesh.get(ATTR_AUTO_UART)
|
||||
|
||||
@property
|
||||
def tmpfs(self):
|
||||
"""Return tmpfs of addon."""
|
||||
return self._mesh.get(ATTR_TMPFS)
|
||||
|
||||
@property
|
||||
def environment(self):
|
||||
"""Return environment of addon."""
|
||||
return self._mesh.get(ATTR_ENVIRONMENT)
|
||||
|
||||
@property
|
||||
def privileged(self):
|
||||
"""Return list of privilege."""
|
||||
return self._mesh.get(ATTR_PRIVILEGED)
|
||||
|
||||
@property
|
||||
def legacy(self):
|
||||
"""Return if the add-on don't support hass labels."""
|
||||
return self._mesh.get(ATTR_LEGACY)
|
||||
|
||||
@property
|
||||
def access_hassio_api(self):
|
||||
"""Return True if the add-on access to hassio api."""
|
||||
return self._mesh[ATTR_HASSIO_API]
|
||||
|
||||
@property
|
||||
def access_homeassistant_api(self):
|
||||
"""Return True if the add-on access to Home-Assistant api proxy."""
|
||||
return self._mesh[ATTR_HOMEASSISTANT_API]
|
||||
|
||||
@property
|
||||
def with_stdin(self):
|
||||
"""Return True if the add-on access use stdin input."""
|
||||
return self._mesh[ATTR_STDIN]
|
||||
|
||||
@property
|
||||
def with_gpio(self):
|
||||
"""Return True if the add-on access to gpio interface."""
|
||||
return self._mesh[ATTR_GPIO]
|
||||
|
||||
@property
|
||||
def with_audio(self):
|
||||
"""Return True if the add-on access to audio."""
|
||||
return self._mesh[ATTR_AUDIO]
|
||||
|
||||
@property
|
||||
def audio_output(self):
|
||||
"""Return ALSA config for output or None."""
|
||||
if not self.with_audio:
|
||||
return None
|
||||
|
||||
setting = self._config.audio_output
|
||||
if self.is_installed and \
|
||||
ATTR_AUDIO_OUTPUT in self._data.user[self._id]:
|
||||
setting = self._data.user[self._id][ATTR_AUDIO_OUTPUT]
|
||||
return setting
|
||||
|
||||
@audio_output.setter
|
||||
def audio_output(self, value):
|
||||
"""Set/remove custom audio output settings."""
|
||||
if value is None:
|
||||
self._data.user[self._id].pop(ATTR_AUDIO_OUTPUT, None)
|
||||
else:
|
||||
self._data.user[self._id][ATTR_AUDIO_OUTPUT] = value
|
||||
|
||||
@property
|
||||
def audio_input(self):
|
||||
"""Return ALSA config for input or None."""
|
||||
if not self.with_audio:
|
||||
return None
|
||||
|
||||
setting = self._config.audio_input
|
||||
if self.is_installed and ATTR_AUDIO_INPUT in self._data.user[self._id]:
|
||||
setting = self._data.user[self._id][ATTR_AUDIO_INPUT]
|
||||
return setting
|
||||
|
||||
@audio_input.setter
|
||||
def audio_input(self, value):
|
||||
"""Set/remove custom audio input settings."""
|
||||
if value is None:
|
||||
self._data.user[self._id].pop(ATTR_AUDIO_INPUT, None)
|
||||
else:
|
||||
self._data.user[self._id][ATTR_AUDIO_INPUT] = value
|
||||
|
||||
@property
|
||||
def url(self):
|
||||
"""Return url of addon."""
|
||||
return self._mesh.get(ATTR_URL)
|
||||
|
||||
@property
|
||||
def with_icon(self):
|
||||
"""Return True if a icon exists."""
|
||||
return self.path_icon.exists()
|
||||
|
||||
@property
|
||||
def with_logo(self):
|
||||
"""Return True if a logo exists."""
|
||||
return self.path_logo.exists()
|
||||
|
||||
@property
|
||||
def with_changelog(self):
|
||||
"""Return True if a changelog exists."""
|
||||
return self.path_changelog.exists()
|
||||
|
||||
@property
|
||||
def supported_arch(self):
|
||||
"""Return list of supported arch."""
|
||||
return self._mesh[ATTR_ARCH]
|
||||
|
||||
@property
|
||||
def image(self):
|
||||
"""Return image name of addon."""
|
||||
addon_data = self._mesh
|
||||
|
||||
# Repository with dockerhub images
|
||||
if ATTR_IMAGE in addon_data:
|
||||
return addon_data[ATTR_IMAGE].format(arch=self._arch)
|
||||
|
||||
# local build
|
||||
return "{}/{}-addon-{}".format(
|
||||
addon_data[ATTR_REPOSITORY], self._arch,
|
||||
addon_data[ATTR_SLUG])
|
||||
|
||||
@property
|
||||
def need_build(self):
|
||||
"""Return True if this addon need a local build."""
|
||||
return ATTR_IMAGE not in self._mesh
|
||||
|
||||
@property
|
||||
def map_volumes(self):
|
||||
"""Return a dict of {volume: policy} from addon."""
|
||||
volumes = {}
|
||||
for volume in self._mesh[ATTR_MAP]:
|
||||
result = RE_VOLUME.match(volume)
|
||||
volumes[result.group(1)] = result.group(2) or 'ro'
|
||||
|
||||
return volumes
|
||||
|
||||
@property
|
||||
def path_data(self):
|
||||
"""Return addon data path inside supervisor."""
|
||||
return Path(self._config.path_addons_data, self._id)
|
||||
|
||||
@property
|
||||
def path_extern_data(self):
|
||||
"""Return addon data path external for docker."""
|
||||
return PurePath(self._config.path_extern_addons_data, self._id)
|
||||
|
||||
@property
|
||||
def path_options(self):
|
||||
"""Return path to addons options."""
|
||||
return Path(self.path_data, "options.json")
|
||||
|
||||
@property
|
||||
def path_location(self):
|
||||
"""Return path to this addon."""
|
||||
return Path(self._mesh[ATTR_LOCATON])
|
||||
|
||||
@property
|
||||
def path_icon(self):
|
||||
"""Return path to addon icon."""
|
||||
return Path(self.path_location, 'icon.png')
|
||||
|
||||
@property
|
||||
def path_logo(self):
|
||||
"""Return path to addon logo."""
|
||||
return Path(self.path_location, 'logo.png')
|
||||
|
||||
@property
|
||||
def path_changelog(self):
|
||||
"""Return path to addon changelog."""
|
||||
return Path(self.path_location, 'CHANGELOG.md')
|
||||
|
||||
def save_data(self):
|
||||
"""Save data of addon."""
|
||||
self._addons.data.save_data()
|
||||
|
||||
def write_options(self):
|
||||
"""Return True if addon options is written to data."""
|
||||
schema = self.schema
|
||||
options = self.options
|
||||
|
||||
try:
|
||||
schema(options)
|
||||
write_json_file(self.path_options, options)
|
||||
except vol.Invalid as ex:
|
||||
_LOGGER.error("Addon %s have wrong options: %s", self._id,
|
||||
humanize_error(options, ex))
|
||||
except (OSError, json.JSONDecodeError) as err:
|
||||
_LOGGER.error("Addon %s can't write options: %s", self._id, err)
|
||||
else:
|
||||
return True
|
||||
|
||||
return False
|
||||
|
||||
@property
|
||||
def schema(self):
|
||||
"""Create a schema for addon options."""
|
||||
raw_schema = self._mesh[ATTR_SCHEMA]
|
||||
|
||||
if isinstance(raw_schema, bool):
|
||||
return vol.Schema(dict)
|
||||
return vol.Schema(vol.All(dict, validate_options(raw_schema)))
|
||||
|
||||
def test_udpate_schema(self):
|
||||
"""Check if the exists config valid after update."""
|
||||
if not self.is_installed or self.is_detached:
|
||||
return True
|
||||
|
||||
# load next schema
|
||||
new_raw_schema = self._data.cache[self._id][ATTR_SCHEMA]
|
||||
default_options = self._data.cache[self._id][ATTR_OPTIONS]
|
||||
|
||||
# if disabled
|
||||
if isinstance(new_raw_schema, bool):
|
||||
return True
|
||||
|
||||
# merge options
|
||||
options = {
|
||||
**self._data.user[self._id][ATTR_OPTIONS],
|
||||
**default_options,
|
||||
}
|
||||
|
||||
# create voluptuous
|
||||
new_schema = \
|
||||
vol.Schema(vol.All(dict, validate_options(new_raw_schema)))
|
||||
|
||||
# validate
|
||||
try:
|
||||
new_schema(options)
|
||||
except vol.Invalid:
|
||||
return False
|
||||
return True
|
||||
|
||||
async def install(self):
|
||||
"""Install a addon."""
|
||||
if self._arch not in self.supported_arch:
|
||||
_LOGGER.error(
|
||||
"Addon %s not supported on %s", self._id, self._arch)
|
||||
return False
|
||||
|
||||
if self.is_installed:
|
||||
_LOGGER.error("Addon %s is already installed", self._id)
|
||||
return False
|
||||
|
||||
if not self.path_data.is_dir():
|
||||
_LOGGER.info(
|
||||
"Create Home-Assistant addon data folder %s", self.path_data)
|
||||
self.path_data.mkdir()
|
||||
|
||||
if not await self.instance.install(self.last_version):
|
||||
return False
|
||||
|
||||
self._set_install(self.last_version)
|
||||
return True
|
||||
|
||||
@check_installed
|
||||
async def uninstall(self):
|
||||
"""Remove a addon."""
|
||||
if not await self.instance.remove():
|
||||
return False
|
||||
|
||||
if self.path_data.is_dir():
|
||||
_LOGGER.info(
|
||||
"Remove Home-Assistant addon data folder %s", self.path_data)
|
||||
shutil.rmtree(str(self.path_data))
|
||||
|
||||
self._set_uninstall()
|
||||
return True
|
||||
|
||||
async def state(self):
|
||||
"""Return running state of addon."""
|
||||
if not self.is_installed:
|
||||
return STATE_NONE
|
||||
|
||||
if await self.instance.is_running():
|
||||
return STATE_STARTED
|
||||
return STATE_STOPPED
|
||||
|
||||
@check_installed
|
||||
def start(self):
|
||||
"""Set options and start addon.
|
||||
|
||||
Return a coroutine.
|
||||
"""
|
||||
return self.instance.run()
|
||||
|
||||
@check_installed
|
||||
def stop(self):
|
||||
"""Stop addon.
|
||||
|
||||
Return a coroutine.
|
||||
"""
|
||||
return self.instance.stop()
|
||||
|
||||
@check_installed
|
||||
async def update(self):
|
||||
"""Update addon."""
|
||||
last_state = await self.state()
|
||||
|
||||
if self.last_version == self.version_installed:
|
||||
_LOGGER.warning("No update available for Addon %s", self._id)
|
||||
return False
|
||||
|
||||
if not await self.instance.update(self.last_version):
|
||||
return False
|
||||
self._set_update(self.last_version)
|
||||
|
||||
# restore state
|
||||
if last_state == STATE_STARTED:
|
||||
await self.instance.run()
|
||||
return True
|
||||
|
||||
@check_installed
|
||||
def restart(self):
|
||||
"""Restart addon.
|
||||
|
||||
Return a coroutine.
|
||||
"""
|
||||
return self.instance.restart()
|
||||
|
||||
@check_installed
|
||||
def logs(self):
|
||||
"""Return addons log output.
|
||||
|
||||
Return a coroutine.
|
||||
"""
|
||||
return self.instance.logs()
|
||||
|
||||
@check_installed
|
||||
def stats(self):
|
||||
"""Return stats of container.
|
||||
|
||||
Return a coroutine.
|
||||
"""
|
||||
return self.instance.stats()
|
||||
|
||||
@check_installed
|
||||
async def rebuild(self):
|
||||
"""Performe a rebuild of local build addon."""
|
||||
last_state = await self.state()
|
||||
|
||||
if not self.need_build:
|
||||
_LOGGER.error("Can't rebuild a none local build addon!")
|
||||
return False
|
||||
|
||||
# remove docker container but not addon config
|
||||
if not await self.instance.remove():
|
||||
return False
|
||||
|
||||
if not await self.instance.install(self.version_installed):
|
||||
return False
|
||||
|
||||
# restore state
|
||||
if last_state == STATE_STARTED:
|
||||
await self.instance.run()
|
||||
return True
|
||||
|
||||
@check_installed
|
||||
async def write_stdin(self, data):
|
||||
"""Write data to add-on stdin.
|
||||
|
||||
Return a coroutine.
|
||||
"""
|
||||
if not self.with_stdin:
|
||||
_LOGGER.error("Add-on don't support write to stdin!")
|
||||
return False
|
||||
|
||||
return await self.instance.write_stdin(data)
|
||||
|
||||
@check_installed
|
||||
async def snapshot(self, tar_file):
|
||||
"""Snapshot a state of a addon."""
|
||||
with TemporaryDirectory(dir=str(self._config.path_tmp)) as temp:
|
||||
# store local image
|
||||
if self.need_build and not await \
|
||||
self.instance.export_image(Path(temp, "image.tar")):
|
||||
return False
|
||||
|
||||
data = {
|
||||
ATTR_USER: self._data.user.get(self._id, {}),
|
||||
ATTR_SYSTEM: self._data.system.get(self._id, {}),
|
||||
ATTR_VERSION: self.version_installed,
|
||||
ATTR_STATE: await self.state(),
|
||||
}
|
||||
|
||||
# store local configs/state
|
||||
try:
|
||||
write_json_file(Path(temp, "addon.json"), data)
|
||||
except (OSError, json.JSONDecodeError) as err:
|
||||
_LOGGER.error("Can't save meta for %s: %s", self._id, err)
|
||||
return False
|
||||
|
||||
# write into tarfile
|
||||
def _create_tar():
|
||||
"""Write tar inside loop."""
|
||||
with tarfile.open(tar_file, "w:gz",
|
||||
compresslevel=1) as snapshot:
|
||||
snapshot.add(temp, arcname=".")
|
||||
snapshot.add(self.path_data, arcname="data")
|
||||
|
||||
try:
|
||||
_LOGGER.info("Build snapshot for addon %s", self._id)
|
||||
await self._loop.run_in_executor(None, _create_tar)
|
||||
except (tarfile.TarError, OSError) as err:
|
||||
_LOGGER.error("Can't write tarfile %s: %s", tar_file, err)
|
||||
return False
|
||||
|
||||
_LOGGER.info("Finish snapshot for addon %s", self._id)
|
||||
return True
|
||||
|
||||
async def restore(self, tar_file):
|
||||
"""Restore a state of a addon."""
|
||||
with TemporaryDirectory(dir=str(self._config.path_tmp)) as temp:
|
||||
# extract snapshot
|
||||
def _extract_tar():
|
||||
"""Extract tar snapshot."""
|
||||
with tarfile.open(tar_file, "r:gz") as snapshot:
|
||||
snapshot.extractall(path=Path(temp))
|
||||
|
||||
try:
|
||||
await self._loop.run_in_executor(None, _extract_tar)
|
||||
except tarfile.TarError as err:
|
||||
_LOGGER.error("Can't read tarfile %s: %s", tar_file, err)
|
||||
return False
|
||||
|
||||
# read snapshot data
|
||||
try:
|
||||
data = read_json_file(Path(temp, "addon.json"))
|
||||
except (OSError, json.JSONDecodeError) as err:
|
||||
_LOGGER.error("Can't read addon.json: %s", err)
|
||||
|
||||
# validate
|
||||
try:
|
||||
data = SCHEMA_ADDON_SNAPSHOT(data)
|
||||
except vol.Invalid as err:
|
||||
_LOGGER.error("Can't validate %s, snapshot data: %s",
|
||||
self._id, humanize_error(data, err))
|
||||
return False
|
||||
|
||||
# restore data / reload addon
|
||||
_LOGGER.info("Restore config for addon %s", self._id)
|
||||
self._restore_data(data[ATTR_USER], data[ATTR_SYSTEM])
|
||||
|
||||
# check version / restore image
|
||||
version = data[ATTR_VERSION]
|
||||
if not await self.instance.exists():
|
||||
_LOGGER.info("Restore image for addon %s", self._id)
|
||||
|
||||
image_file = Path(temp, "image.tar")
|
||||
if image_file.is_file():
|
||||
await self.instance.import_image(image_file, version)
|
||||
else:
|
||||
if await self.instance.install(version):
|
||||
await self.instance.cleanup()
|
||||
else:
|
||||
await self.instance.stop()
|
||||
|
||||
# restore data
|
||||
def _restore_data():
|
||||
"""Restore data."""
|
||||
if self.path_data.is_dir():
|
||||
shutil.rmtree(str(self.path_data), ignore_errors=True)
|
||||
shutil.copytree(str(Path(temp, "data")), str(self.path_data))
|
||||
|
||||
try:
|
||||
_LOGGER.info("Restore data for addon %s", self._id)
|
||||
await self._loop.run_in_executor(None, _restore_data)
|
||||
except shutil.Error as err:
|
||||
_LOGGER.error("Can't restore origin data: %s", err)
|
||||
return False
|
||||
|
||||
# run addon
|
||||
if data[ATTR_STATE] == STATE_STARTED:
|
||||
return await self.start()
|
||||
|
||||
_LOGGER.info("Finish restore for addon %s", self._id)
|
||||
return True
|
72
hassio/addons/build.py
Normal file
72
hassio/addons/build.py
Normal file
@@ -0,0 +1,72 @@
|
||||
"""HassIO addons build environment."""
|
||||
from pathlib import Path
|
||||
|
||||
from .validate import SCHEMA_BUILD_CONFIG, BASE_IMAGE
|
||||
from ..const import ATTR_SQUASH, ATTR_BUILD_FROM, ATTR_ARGS, META_ADDON
|
||||
from ..coresys import CoreSysAttributes
|
||||
from ..utils.json import JsonConfig
|
||||
|
||||
|
||||
class AddonBuild(JsonConfig, CoreSysAttributes):
|
||||
"""Handle build options for addons."""
|
||||
|
||||
def __init__(self, coresys, slug):
|
||||
"""Initialize addon builder."""
|
||||
self.coresys = coresys
|
||||
self._id = slug
|
||||
|
||||
super().__init__(
|
||||
Path(self.addon.path_location, 'build.json'), SCHEMA_BUILD_CONFIG)
|
||||
|
||||
def save_data(self):
|
||||
"""Ignore save function."""
|
||||
pass
|
||||
|
||||
@property
|
||||
def addon(self):
|
||||
"""Return addon of build data."""
|
||||
return self._addons.get(self._id)
|
||||
|
||||
@property
|
||||
def base_image(self):
|
||||
"""Base images for this addon."""
|
||||
return self._data[ATTR_BUILD_FROM].get(
|
||||
self._arch, BASE_IMAGE[self._arch])
|
||||
|
||||
@property
|
||||
def squash(self):
|
||||
"""Return True or False if squash is active."""
|
||||
return self._data[ATTR_SQUASH]
|
||||
|
||||
@property
|
||||
def additional_args(self):
|
||||
"""Return additional docker build arguments."""
|
||||
return self._data[ATTR_ARGS]
|
||||
|
||||
def get_docker_args(self, version):
|
||||
"""Create a dict with docker build arguments."""
|
||||
args = {
|
||||
'path': str(self.addon.path_location),
|
||||
'tag': f"{self.addon.image}:{version}",
|
||||
'pull': True,
|
||||
'forcerm': True,
|
||||
'squash': self.squash,
|
||||
'labels': {
|
||||
'io.hass.version': version,
|
||||
'io.hass.arch': self._arch,
|
||||
'io.hass.type': META_ADDON,
|
||||
'io.hass.name': self.addon.name,
|
||||
'io.hass.description': self.addon.description,
|
||||
},
|
||||
'buildargs': {
|
||||
'BUILD_FROM': self.base_image,
|
||||
'BUILD_VERSION': version,
|
||||
'BUILD_ARCH': self._arch,
|
||||
**self.additional_args,
|
||||
}
|
||||
}
|
||||
|
||||
if self.addon.url:
|
||||
args['labels']['io.hass.url'] = self.addon.url
|
||||
|
||||
return args
|
12
hassio/addons/built-in.json
Normal file
12
hassio/addons/built-in.json
Normal file
@@ -0,0 +1,12 @@
|
||||
{
|
||||
"local": {
|
||||
"name": "Local Add-Ons",
|
||||
"url": "https://home-assistant.io/hassio",
|
||||
"maintainer": "you"
|
||||
},
|
||||
"core": {
|
||||
"name": "Built-in Add-Ons",
|
||||
"url": "https://home-assistant.io/addons",
|
||||
"maintainer": "Home Assistant authors"
|
||||
}
|
||||
}
|
@@ -1,83 +1,90 @@
|
||||
"""Init file for HassIO addons."""
|
||||
import copy
|
||||
import logging
|
||||
from pathlib import Path, PurePath
|
||||
import json
|
||||
from pathlib import Path
|
||||
|
||||
import voluptuous as vol
|
||||
from voluptuous.humanize import humanize_error
|
||||
|
||||
from .util import extract_hash_from_path
|
||||
from .utils import extract_hash_from_path
|
||||
from .validate import (
|
||||
validate_options, SCHEMA_ADDON_CONFIG, SCHEMA_REPOSITORY_CONFIG)
|
||||
SCHEMA_ADDON_CONFIG, SCHEMA_ADDON_FILE, SCHEMA_REPOSITORY_CONFIG)
|
||||
from ..const import (
|
||||
FILE_HASSIO_ADDONS, ATTR_NAME, ATTR_VERSION, ATTR_SLUG, ATTR_DESCRIPTON,
|
||||
ATTR_STARTUP, ATTR_BOOT, ATTR_MAP, ATTR_OPTIONS, ATTR_PORTS, BOOT_AUTO,
|
||||
DOCKER_REPO, ATTR_SCHEMA, ATTR_IMAGE, MAP_CONFIG, MAP_SSL, MAP_ADDONS,
|
||||
MAP_BACKUP, ATTR_REPOSITORY)
|
||||
from ..config import Config
|
||||
from ..tools import read_json_file, write_json_file
|
||||
FILE_HASSIO_ADDONS, ATTR_VERSION, ATTR_SLUG, ATTR_REPOSITORY, ATTR_LOCATON,
|
||||
REPOSITORY_CORE, REPOSITORY_LOCAL, ATTR_USER, ATTR_SYSTEM)
|
||||
from ..coresys import CoreSysAttributes
|
||||
from ..utils.json import JsonConfig, read_json_file
|
||||
|
||||
_LOGGER = logging.getLogger(__name__)
|
||||
|
||||
SYSTEM = 'system'
|
||||
USER = 'user'
|
||||
|
||||
REPOSITORY_CORE = 'core'
|
||||
REPOSITORY_LOCAL = 'local'
|
||||
|
||||
|
||||
class AddonsData(Config):
|
||||
class Data(JsonConfig, CoreSysAttributes):
|
||||
"""Hold data for addons inside HassIO."""
|
||||
|
||||
def __init__(self, config):
|
||||
def __init__(self, coresys):
|
||||
"""Initialize data holder."""
|
||||
super().__init__(FILE_HASSIO_ADDONS)
|
||||
self.config = config
|
||||
self._system_data = self._data.get(SYSTEM, {})
|
||||
self._user_data = self._data.get(USER, {})
|
||||
self._addons_cache = {}
|
||||
self._repositories_data = {}
|
||||
self.arch = None
|
||||
super().__init__(FILE_HASSIO_ADDONS, SCHEMA_ADDON_FILE)
|
||||
self.coresys = coresys
|
||||
self._repositories = {}
|
||||
self._cache = {}
|
||||
|
||||
def save(self):
|
||||
"""Store data to config file."""
|
||||
self._data = {
|
||||
USER: self._user_data,
|
||||
SYSTEM: self._system_data,
|
||||
}
|
||||
super().save()
|
||||
@property
|
||||
def user(self):
|
||||
"""Return local addon user data."""
|
||||
return self._data[ATTR_USER]
|
||||
|
||||
def read_data_from_repositories(self):
|
||||
@property
|
||||
def system(self):
|
||||
"""Return local addon data."""
|
||||
return self._data[ATTR_SYSTEM]
|
||||
|
||||
@property
|
||||
def cache(self):
|
||||
"""Return addon data from cache/repositories."""
|
||||
return self._cache
|
||||
|
||||
@property
|
||||
def repositories(self):
|
||||
"""Return addon data from repositories."""
|
||||
return self._repositories
|
||||
|
||||
def reload(self):
|
||||
"""Read data from addons repository."""
|
||||
self._addons_cache = {}
|
||||
self._repositories_data = {}
|
||||
self._cache = {}
|
||||
self._repositories = {}
|
||||
|
||||
# read core repository
|
||||
self._read_addons_folder(
|
||||
self.config.path_addons_core, REPOSITORY_CORE)
|
||||
self._config.path_addons_core, REPOSITORY_CORE)
|
||||
|
||||
# read local repository
|
||||
self._read_addons_folder(
|
||||
self.config.path_addons_local, REPOSITORY_LOCAL)
|
||||
self._config.path_addons_local, REPOSITORY_LOCAL)
|
||||
|
||||
# add built-in repositories information
|
||||
self._set_builtin_repositories()
|
||||
|
||||
# read custom git repositories
|
||||
for repository_element in self.config.path_addons_git.iterdir():
|
||||
for repository_element in self._config.path_addons_git.iterdir():
|
||||
if repository_element.is_dir():
|
||||
self._read_git_repository(repository_element)
|
||||
|
||||
# update local data
|
||||
self._merge_config()
|
||||
|
||||
def _read_git_repository(self, path):
|
||||
"""Process a custom repository folder."""
|
||||
slug = extract_hash_from_path(path)
|
||||
repository_info = {ATTR_SLUG: slug}
|
||||
|
||||
# exists repository json
|
||||
repository_file = Path(path, "repository.json")
|
||||
try:
|
||||
repository_info.update(SCHEMA_REPOSITORY_CONFIG(
|
||||
repository_info = SCHEMA_REPOSITORY_CONFIG(
|
||||
read_json_file(repository_file)
|
||||
))
|
||||
)
|
||||
|
||||
except OSError:
|
||||
except (OSError, json.JSONDecodeError):
|
||||
_LOGGER.warning("Can't read repository information from %s",
|
||||
repository_file)
|
||||
return
|
||||
@@ -87,7 +94,7 @@ class AddonsData(Config):
|
||||
return
|
||||
|
||||
# process data
|
||||
self._repositories_data[slug] = repository_info
|
||||
self._repositories[slug] = repository_info
|
||||
self._read_addons_folder(path, slug)
|
||||
|
||||
def _read_addons_folder(self, path, repository):
|
||||
@@ -105,211 +112,51 @@ class AddonsData(Config):
|
||||
|
||||
# store
|
||||
addon_config[ATTR_REPOSITORY] = repository
|
||||
self._addons_cache[addon_slug] = addon_config
|
||||
addon_config[ATTR_LOCATON] = str(addon.parent)
|
||||
self._cache[addon_slug] = addon_config
|
||||
|
||||
except OSError:
|
||||
except (OSError, json.JSONDecodeError):
|
||||
_LOGGER.warning("Can't read %s", addon)
|
||||
|
||||
except vol.Invalid as ex:
|
||||
_LOGGER.warning("Can't read %s -> %s", addon,
|
||||
_LOGGER.warning("Can't read %s: %s", addon,
|
||||
humanize_error(addon_config, ex))
|
||||
|
||||
def merge_update_config(self):
|
||||
def _set_builtin_repositories(self):
|
||||
"""Add local built-in repository into dataset."""
|
||||
try:
|
||||
builtin_file = Path(__file__).parent.joinpath('built-in.json')
|
||||
builtin_data = read_json_file(builtin_file)
|
||||
except (OSError, json.JSONDecodeError) as err:
|
||||
_LOGGER.warning("Can't read built-in json: %s", err)
|
||||
return
|
||||
|
||||
# core repository
|
||||
self._repositories[REPOSITORY_CORE] = \
|
||||
builtin_data[REPOSITORY_CORE]
|
||||
|
||||
# local repository
|
||||
self._repositories[REPOSITORY_LOCAL] = \
|
||||
builtin_data[REPOSITORY_LOCAL]
|
||||
|
||||
def _merge_config(self):
|
||||
"""Update local config if they have update.
|
||||
|
||||
It need to be the same version as the local version is.
|
||||
It need to be the same version as the local version is for merge.
|
||||
"""
|
||||
have_change = False
|
||||
|
||||
for addon, data in self._system_data.items():
|
||||
for addon in set(self.system):
|
||||
# detached
|
||||
if addon not in self._addons_cache:
|
||||
if addon not in self._cache:
|
||||
continue
|
||||
|
||||
cache = self._addons_cache[addon]
|
||||
cache = self._cache[addon]
|
||||
data = self.system[addon]
|
||||
if data[ATTR_VERSION] == cache[ATTR_VERSION]:
|
||||
if data != cache:
|
||||
self._system_data[addon] = copy.deepcopy(cache)
|
||||
self.system[addon] = copy.deepcopy(cache)
|
||||
have_change = True
|
||||
|
||||
if have_change:
|
||||
self.save()
|
||||
|
||||
@property
|
||||
def list_installed(self):
|
||||
"""Return a list of installed addons."""
|
||||
return set(self._system_data.keys())
|
||||
|
||||
@property
|
||||
def list_all(self):
|
||||
"""Return a list of all addons."""
|
||||
return {
|
||||
**self._system_data,
|
||||
**self._addons_cache
|
||||
}
|
||||
|
||||
def list_startup(self, start_type):
|
||||
"""Get list of installed addon with need start by type."""
|
||||
addon_list = set()
|
||||
for addon in self._system_data.keys():
|
||||
if self.get_boot(addon) != BOOT_AUTO:
|
||||
continue
|
||||
|
||||
try:
|
||||
if self._system_data[addon][ATTR_STARTUP] == start_type:
|
||||
addon_list.add(addon)
|
||||
except KeyError:
|
||||
_LOGGER.warning("Orphaned addon detect %s", addon)
|
||||
continue
|
||||
|
||||
return addon_list
|
||||
|
||||
@property
|
||||
def list_detached(self):
|
||||
"""Return local addons they not support from repo."""
|
||||
addon_list = set()
|
||||
for addon in self._system_data.keys():
|
||||
if addon not in self._addons_cache:
|
||||
addon_list.add(addon)
|
||||
|
||||
return addon_list
|
||||
|
||||
@property
|
||||
def list_repositories(self):
|
||||
"""Return list of addon repositories."""
|
||||
return list(self._repositories_data.values())
|
||||
|
||||
def exists_addon(self, addon):
|
||||
"""Return True if a addon exists."""
|
||||
return addon in self._addons_cache or addon in self._system_data
|
||||
|
||||
def is_installed(self, addon):
|
||||
"""Return True if a addon is installed."""
|
||||
return addon in self._system_data
|
||||
|
||||
def version_installed(self, addon):
|
||||
"""Return installed version."""
|
||||
return self._user_data.get(addon, {}).get(ATTR_VERSION)
|
||||
|
||||
def set_addon_install(self, addon, version):
|
||||
"""Set addon as installed."""
|
||||
self._system_data[addon] = copy.deepcopy(self._addons_cache[addon])
|
||||
self._user_data[addon] = {
|
||||
ATTR_OPTIONS: {},
|
||||
ATTR_VERSION: version,
|
||||
}
|
||||
self.save()
|
||||
|
||||
def set_addon_uninstall(self, addon):
|
||||
"""Set addon as uninstalled."""
|
||||
self._system_data.pop(addon, None)
|
||||
self._user_data.pop(addon, None)
|
||||
self.save()
|
||||
|
||||
def set_addon_update(self, addon, version):
|
||||
"""Update version of addon."""
|
||||
self._system_data[addon] = copy.deepcopy(self._addons_cache[addon])
|
||||
self._user_data[addon][ATTR_VERSION] = version
|
||||
self.save()
|
||||
|
||||
def set_options(self, addon, options):
|
||||
"""Store user addon options."""
|
||||
self._user_data[addon][ATTR_OPTIONS] = copy.deepcopy(options)
|
||||
self.save()
|
||||
|
||||
def set_boot(self, addon, boot):
|
||||
"""Store user boot options."""
|
||||
self._user_data[addon][ATTR_BOOT] = boot
|
||||
self.save()
|
||||
|
||||
def get_options(self, addon):
|
||||
"""Return options with local changes."""
|
||||
return {
|
||||
**self._system_data[addon][ATTR_OPTIONS],
|
||||
**self._user_data[addon][ATTR_OPTIONS],
|
||||
}
|
||||
|
||||
def get_boot(self, addon):
|
||||
"""Return boot config with prio local settings."""
|
||||
if ATTR_BOOT in self._user_data[addon]:
|
||||
return self._user_data[addon][ATTR_BOOT]
|
||||
|
||||
return self._system_data[addon][ATTR_BOOT]
|
||||
|
||||
def get_name(self, addon):
|
||||
"""Return name of addon."""
|
||||
return self._system_data[addon][ATTR_NAME]
|
||||
|
||||
def get_description(self, addon):
|
||||
"""Return description of addon."""
|
||||
return self._system_data[addon][ATTR_DESCRIPTON]
|
||||
|
||||
def get_last_version(self, addon):
|
||||
"""Return version of addon."""
|
||||
if addon not in self._addons_cache:
|
||||
return self.version_installed(addon)
|
||||
return self._addons_cache[addon][ATTR_VERSION]
|
||||
|
||||
def get_ports(self, addon):
|
||||
"""Return ports of addon."""
|
||||
return self._system_data[addon].get(ATTR_PORTS)
|
||||
|
||||
def get_image(self, addon):
|
||||
"""Return image name of addon."""
|
||||
addon_data = self._system_data.get(
|
||||
addon, self._addons_cache.get(addon))
|
||||
|
||||
if ATTR_IMAGE not in addon_data:
|
||||
return "{}/{}-addon-{}".format(
|
||||
DOCKER_REPO, self.arch, addon_data[ATTR_SLUG])
|
||||
|
||||
return addon_data[ATTR_IMAGE].format(arch=self.arch)
|
||||
|
||||
def map_config(self, addon):
|
||||
"""Return True if config map is needed."""
|
||||
return MAP_CONFIG in self._system_data[addon][ATTR_MAP]
|
||||
|
||||
def map_ssl(self, addon):
|
||||
"""Return True if ssl map is needed."""
|
||||
return MAP_SSL in self._system_data[addon][ATTR_MAP]
|
||||
|
||||
def map_addons(self, addon):
|
||||
"""Return True if addons map is needed."""
|
||||
return MAP_ADDONS in self._system_data[addon][ATTR_MAP]
|
||||
|
||||
def map_backup(self, addon):
|
||||
"""Return True if backup map is needed."""
|
||||
return MAP_BACKUP in self._system_data[addon][ATTR_MAP]
|
||||
|
||||
def path_data(self, addon):
|
||||
"""Return addon data path inside supervisor."""
|
||||
return Path(self.config.path_addons_data, addon)
|
||||
|
||||
def path_extern_data(self, addon):
|
||||
"""Return addon data path external for docker."""
|
||||
return str(PurePath(self.config.path_extern_addons_data, addon))
|
||||
|
||||
def path_addon_options(self, addon):
|
||||
"""Return path to addons options."""
|
||||
return Path(self.path_data(addon), "options.json")
|
||||
|
||||
def write_addon_options(self, addon):
|
||||
"""Return True if addon options is written to data."""
|
||||
schema = self.get_schema(addon)
|
||||
options = self.get_options(addon)
|
||||
|
||||
try:
|
||||
schema(options)
|
||||
return write_json_file(self.path_addon_options(addon), options)
|
||||
except vol.Invalid as ex:
|
||||
_LOGGER.error("Addon %s have wrong options -> %s", addon,
|
||||
humanize_error(options, ex))
|
||||
|
||||
return False
|
||||
|
||||
def get_schema(self, addon):
|
||||
"""Create a schema for addon options."""
|
||||
raw_schema = self._system_data[addon][ATTR_SCHEMA]
|
||||
|
||||
schema = vol.Schema(vol.All(dict, validate_options(raw_schema)))
|
||||
return schema
|
||||
self.save_data()
|
||||
|
@@ -1,38 +1,39 @@
|
||||
"""Init file for HassIO addons git."""
|
||||
import asyncio
|
||||
import logging
|
||||
import functools as ft
|
||||
from pathlib import Path
|
||||
import shutil
|
||||
|
||||
import git
|
||||
|
||||
from .util import get_hash_from_repository
|
||||
from .utils import get_hash_from_repository
|
||||
from ..const import URL_HASSIO_ADDONS
|
||||
from ..coresys import CoreSysAttributes
|
||||
|
||||
_LOGGER = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class AddonsRepo(object):
|
||||
class GitRepo(CoreSysAttributes):
|
||||
"""Manage addons git repo."""
|
||||
|
||||
def __init__(self, config, loop, path, url):
|
||||
def __init__(self, coresys, path, url):
|
||||
"""Initialize git base wrapper."""
|
||||
self.config = config
|
||||
self.loop = loop
|
||||
self.coresys = coresys
|
||||
self.repo = None
|
||||
self.path = path
|
||||
self.url = url
|
||||
self._lock = asyncio.Lock(loop=loop)
|
||||
self.lock = asyncio.Lock(loop=coresys.loop)
|
||||
|
||||
async def load(self):
|
||||
"""Init git addon repo."""
|
||||
if not self.path.is_dir():
|
||||
return await self.clone()
|
||||
|
||||
async with self._lock:
|
||||
async with self.lock:
|
||||
try:
|
||||
_LOGGER.info("Load addon %s repository", self.path)
|
||||
self.repo = await self.loop.run_in_executor(
|
||||
self.repo = await self._loop.run_in_executor(
|
||||
None, git.Repo, str(self.path))
|
||||
|
||||
except (git.InvalidGitRepositoryError, git.NoSuchPathError,
|
||||
@@ -44,11 +45,13 @@ class AddonsRepo(object):
|
||||
|
||||
async def clone(self):
|
||||
"""Clone git addon repo."""
|
||||
async with self._lock:
|
||||
async with self.lock:
|
||||
try:
|
||||
_LOGGER.info("Clone addon %s repository", self.url)
|
||||
self.repo = await self.loop.run_in_executor(
|
||||
None, git.Repo.clone_from, self.url, str(self.path))
|
||||
self.repo = await self._loop.run_in_executor(
|
||||
None, ft.partial(
|
||||
git.Repo.clone_from, self.url, str(self.path),
|
||||
recursive=True))
|
||||
|
||||
except (git.InvalidGitRepositoryError, git.NoSuchPathError,
|
||||
git.GitCommandError) as err:
|
||||
@@ -59,41 +62,43 @@ class AddonsRepo(object):
|
||||
|
||||
async def pull(self):
|
||||
"""Pull git addon repo."""
|
||||
if self._lock.locked():
|
||||
if self.lock.locked():
|
||||
_LOGGER.warning("It is already a task in progress.")
|
||||
return False
|
||||
|
||||
async with self._lock:
|
||||
async with self.lock:
|
||||
try:
|
||||
_LOGGER.info("Pull addon %s repository", self.url)
|
||||
await self.loop.run_in_executor(
|
||||
await self._loop.run_in_executor(
|
||||
None, self.repo.remotes.origin.pull)
|
||||
|
||||
except (git.InvalidGitRepositoryError, git.NoSuchPathError,
|
||||
git.exc.GitCommandError) as err:
|
||||
git.GitCommandError) as err:
|
||||
_LOGGER.error("Can't pull %s repo: %s.", self.url, err)
|
||||
return False
|
||||
|
||||
return True
|
||||
|
||||
|
||||
class AddonsRepoHassIO(AddonsRepo):
|
||||
class GitRepoHassIO(GitRepo):
|
||||
"""HassIO addons repository."""
|
||||
|
||||
def __init__(self, config, loop):
|
||||
def __init__(self, coresys):
|
||||
"""Initialize git hassio addon repository."""
|
||||
super().__init__(
|
||||
config, loop, config.path_addons_core, URL_HASSIO_ADDONS)
|
||||
coresys, coresys.config.path_addons_core, URL_HASSIO_ADDONS)
|
||||
|
||||
|
||||
class AddonsRepoCustom(AddonsRepo):
|
||||
class GitRepoCustom(GitRepo):
|
||||
"""Custom addons repository."""
|
||||
|
||||
def __init__(self, config, loop, url):
|
||||
def __init__(self, coresys, url):
|
||||
"""Initialize git hassio addon repository."""
|
||||
path = Path(config.path_addons_git, get_hash_from_repository(url))
|
||||
path = Path(
|
||||
coresys.config.path_addons_git,
|
||||
get_hash_from_repository(url))
|
||||
|
||||
super().__init__(config, loop, path, url)
|
||||
super().__init__(coresys, path, url)
|
||||
|
||||
def remove(self):
|
||||
"""Remove a custom addon."""
|
||||
|
72
hassio/addons/repository.py
Normal file
72
hassio/addons/repository.py
Normal file
@@ -0,0 +1,72 @@
|
||||
"""Represent a HassIO repository."""
|
||||
from .git import GitRepoHassIO, GitRepoCustom
|
||||
from .utils import get_hash_from_repository
|
||||
from ..const import (
|
||||
REPOSITORY_CORE, REPOSITORY_LOCAL, ATTR_NAME, ATTR_URL, ATTR_MAINTAINER)
|
||||
from ..coresys import CoreSysAttributes
|
||||
|
||||
UNKNOWN = 'unknown'
|
||||
|
||||
|
||||
class Repository(CoreSysAttributes):
|
||||
"""Repository in HassIO."""
|
||||
|
||||
def __init__(self, coresys, repository):
|
||||
"""Initialize repository object."""
|
||||
self.coresys = coresys
|
||||
self.source = None
|
||||
self.git = None
|
||||
|
||||
if repository == REPOSITORY_LOCAL:
|
||||
self._id = repository
|
||||
elif repository == REPOSITORY_CORE:
|
||||
self._id = repository
|
||||
self.git = GitRepoHassIO(coresys)
|
||||
else:
|
||||
self._id = get_hash_from_repository(repository)
|
||||
self.git = GitRepoCustom(coresys, repository)
|
||||
self.source = repository
|
||||
|
||||
@property
|
||||
def _mesh(self):
|
||||
"""Return data struct repository."""
|
||||
return self._addons.data.repositories.get(self._id, {})
|
||||
|
||||
@property
|
||||
def slug(self):
|
||||
"""Return slug of repository."""
|
||||
return self._id
|
||||
|
||||
@property
|
||||
def name(self):
|
||||
"""Return name of repository."""
|
||||
return self._mesh.get(ATTR_NAME, UNKNOWN)
|
||||
|
||||
@property
|
||||
def url(self):
|
||||
"""Return url of repository."""
|
||||
return self._mesh.get(ATTR_URL, self.source)
|
||||
|
||||
@property
|
||||
def maintainer(self):
|
||||
"""Return url of repository."""
|
||||
return self._mesh.get(ATTR_MAINTAINER, UNKNOWN)
|
||||
|
||||
async def load(self):
|
||||
"""Load addon repository."""
|
||||
if self.git:
|
||||
return await self.git.load()
|
||||
return True
|
||||
|
||||
async def update(self):
|
||||
"""Update addon repository."""
|
||||
if self.git:
|
||||
return await self.git.pull()
|
||||
return True
|
||||
|
||||
def remove(self):
|
||||
"""Remove addon repository."""
|
||||
if self._id in (REPOSITORY_CORE, REPOSITORY_LOCAL):
|
||||
raise RuntimeError("Can't remove built-in repositories!")
|
||||
|
||||
self.git.remove()
|
@@ -1,10 +1,12 @@
|
||||
"""Util addons functions."""
|
||||
import hashlib
|
||||
import logging
|
||||
import re
|
||||
|
||||
RE_SLUGIFY = re.compile(r'[^a-z0-9_]+')
|
||||
RE_SHA1 = re.compile(r"[a-f0-9]{8}")
|
||||
|
||||
_LOGGER = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def get_hash_from_repository(name):
|
||||
"""Generate a hash from repository."""
|
||||
@@ -21,6 +23,13 @@ def extract_hash_from_path(path):
|
||||
return repo_dir
|
||||
|
||||
|
||||
def create_hash_index_list(name_list):
|
||||
"""Create a dict with hash from repositories list."""
|
||||
return {get_hash_from_repository(repo): repo for repo in name_list}
|
||||
def check_installed(method):
|
||||
"""Wrap function with check if addon is installed."""
|
||||
async def wrap_check(addon, *args, **kwargs):
|
||||
"""Return False if not installed or the function."""
|
||||
if not addon.is_installed:
|
||||
_LOGGER.error("Addon %s is not installed", addon.slug)
|
||||
return False
|
||||
return await method(addon, *args, **kwargs)
|
||||
|
||||
return wrap_check
|
@@ -1,11 +1,29 @@
|
||||
"""Validate addons options schema."""
|
||||
import logging
|
||||
import re
|
||||
import uuid
|
||||
|
||||
import voluptuous as vol
|
||||
|
||||
from ..const import (
|
||||
ATTR_NAME, ATTR_VERSION, ATTR_SLUG, ATTR_DESCRIPTON, ATTR_STARTUP,
|
||||
ATTR_BOOT, ATTR_MAP, ATTR_OPTIONS, ATTR_PORTS, STARTUP_ONCE, STARTUP_AFTER,
|
||||
STARTUP_BEFORE, BOOT_AUTO, BOOT_MANUAL, ATTR_SCHEMA, ATTR_IMAGE, MAP_SSL,
|
||||
MAP_CONFIG, MAP_ADDONS, MAP_BACKUP, ATTR_URL, ATTR_MAINTAINER)
|
||||
ATTR_BOOT, ATTR_MAP, ATTR_OPTIONS, ATTR_PORTS, STARTUP_ONCE,
|
||||
STARTUP_SYSTEM, STARTUP_SERVICES, STARTUP_APPLICATION, STARTUP_INITIALIZE,
|
||||
BOOT_AUTO, BOOT_MANUAL, ATTR_SCHEMA, ATTR_IMAGE, ATTR_URL, ATTR_MAINTAINER,
|
||||
ATTR_ARCH, ATTR_DEVICES, ATTR_ENVIRONMENT, ATTR_HOST_NETWORK, ARCH_ARMHF,
|
||||
ARCH_AARCH64, ARCH_AMD64, ARCH_I386, ATTR_TMPFS, ATTR_PRIVILEGED,
|
||||
ATTR_USER, ATTR_STATE, ATTR_SYSTEM, STATE_STARTED, STATE_STOPPED,
|
||||
ATTR_LOCATON, ATTR_REPOSITORY, ATTR_TIMEOUT, ATTR_NETWORK, ATTR_UUID,
|
||||
ATTR_AUTO_UPDATE, ATTR_WEBUI, ATTR_AUDIO, ATTR_AUDIO_INPUT, ATTR_HOST_IPC,
|
||||
ATTR_AUDIO_OUTPUT, ATTR_HASSIO_API, ATTR_BUILD_FROM, ATTR_SQUASH,
|
||||
ATTR_ARGS, ATTR_GPIO, ATTR_HOMEASSISTANT_API, ATTR_STDIN, ATTR_LEGACY,
|
||||
ATTR_HOST_DBUS, ATTR_AUTO_UART)
|
||||
from ..validate import NETWORK_PORT, DOCKER_PORTS, ALSA_CHANNEL
|
||||
|
||||
_LOGGER = logging.getLogger(__name__)
|
||||
|
||||
|
||||
RE_VOLUME = re.compile(r"^(config|ssl|addons|backup|share)(?::(rw|:ro))?$")
|
||||
|
||||
V_STR = 'str'
|
||||
V_INT = 'int'
|
||||
@@ -13,8 +31,53 @@ V_FLOAT = 'float'
|
||||
V_BOOL = 'bool'
|
||||
V_EMAIL = 'email'
|
||||
V_URL = 'url'
|
||||
V_PORT = 'port'
|
||||
V_MATCH = 'match'
|
||||
|
||||
RE_SCHEMA_ELEMENT = re.compile(
|
||||
r"^(?:"
|
||||
r"|str|bool|email|url|port"
|
||||
r"|int(?:\((?P<i_min>\d+)?,(?P<i_max>\d+)?\))?"
|
||||
r"|float(?:\((?P<f_min>[\d\.]+)?,(?P<f_max>[\d\.]+)?\))?"
|
||||
r"|match\((?P<match>.*)\)"
|
||||
r")\??$"
|
||||
)
|
||||
|
||||
SCHEMA_ELEMENT = vol.Match(RE_SCHEMA_ELEMENT)
|
||||
|
||||
ARCH_ALL = [
|
||||
ARCH_ARMHF, ARCH_AARCH64, ARCH_AMD64, ARCH_I386
|
||||
]
|
||||
|
||||
STARTUP_ALL = [
|
||||
STARTUP_ONCE, STARTUP_INITIALIZE, STARTUP_SYSTEM, STARTUP_SERVICES,
|
||||
STARTUP_APPLICATION
|
||||
]
|
||||
|
||||
PRIVILEGED_ALL = [
|
||||
"NET_ADMIN",
|
||||
"SYS_ADMIN",
|
||||
"SYS_RAWIO",
|
||||
"SYS_TIME",
|
||||
"SYS_NICE"
|
||||
]
|
||||
|
||||
BASE_IMAGE = {
|
||||
ARCH_ARMHF: "homeassistant/armhf-base:latest",
|
||||
ARCH_AARCH64: "homeassistant/aarch64-base:latest",
|
||||
ARCH_I386: "homeassistant/i386-base:latest",
|
||||
ARCH_AMD64: "homeassistant/amd64-base:latest",
|
||||
}
|
||||
|
||||
|
||||
def _simple_startup(value):
|
||||
"""Simple startup schema."""
|
||||
if value == "before":
|
||||
return STARTUP_SERVICES
|
||||
if value == "after":
|
||||
return STARTUP_APPLICATION
|
||||
return value
|
||||
|
||||
ADDON_ELEMENT = vol.In([V_STR, V_INT, V_FLOAT, V_BOOL, V_EMAIL, V_URL])
|
||||
|
||||
# pylint: disable=no-value-for-parameter
|
||||
SCHEMA_ADDON_CONFIG = vol.Schema({
|
||||
@@ -22,22 +85,46 @@ SCHEMA_ADDON_CONFIG = vol.Schema({
|
||||
vol.Required(ATTR_VERSION): vol.Coerce(str),
|
||||
vol.Required(ATTR_SLUG): vol.Coerce(str),
|
||||
vol.Required(ATTR_DESCRIPTON): vol.Coerce(str),
|
||||
vol.Optional(ATTR_URL): vol.Url(),
|
||||
vol.Optional(ATTR_ARCH, default=ARCH_ALL): [vol.In(ARCH_ALL)],
|
||||
vol.Required(ATTR_STARTUP):
|
||||
vol.In([STARTUP_BEFORE, STARTUP_AFTER, STARTUP_ONCE]),
|
||||
vol.All(_simple_startup, vol.In(STARTUP_ALL)),
|
||||
vol.Required(ATTR_BOOT):
|
||||
vol.In([BOOT_AUTO, BOOT_MANUAL]),
|
||||
vol.Optional(ATTR_PORTS): dict,
|
||||
vol.Optional(ATTR_MAP, default=[]): [
|
||||
vol.In([MAP_CONFIG, MAP_SSL, MAP_ADDONS, MAP_BACKUP])
|
||||
],
|
||||
vol.Optional(ATTR_PORTS): DOCKER_PORTS,
|
||||
vol.Optional(ATTR_WEBUI):
|
||||
vol.Match(r"^(?:https?|\[PROTO:\w+\]):\/\/\[HOST\]:\[PORT:\d+\].*$"),
|
||||
vol.Optional(ATTR_HOST_NETWORK, default=False): vol.Boolean(),
|
||||
vol.Optional(ATTR_HOST_IPC, default=False): vol.Boolean(),
|
||||
vol.Optional(ATTR_HOST_DBUS, default=False): vol.Boolean(),
|
||||
vol.Optional(ATTR_DEVICES): [vol.Match(r"^(.*):(.*):([rwm]{1,3})$")],
|
||||
vol.Optional(ATTR_AUTO_UART, default=False): vol.Boolean(),
|
||||
vol.Optional(ATTR_TMPFS):
|
||||
vol.Match(r"^size=(\d)*[kmg](,uid=\d{1,4})?(,rw)?$"),
|
||||
vol.Optional(ATTR_MAP, default=list): [vol.Match(RE_VOLUME)],
|
||||
vol.Optional(ATTR_ENVIRONMENT): {vol.Match(r"\w*"): vol.Coerce(str)},
|
||||
vol.Optional(ATTR_PRIVILEGED): [vol.In(PRIVILEGED_ALL)],
|
||||
vol.Optional(ATTR_AUDIO, default=False): vol.Boolean(),
|
||||
vol.Optional(ATTR_GPIO, default=False): vol.Boolean(),
|
||||
vol.Optional(ATTR_HASSIO_API, default=False): vol.Boolean(),
|
||||
vol.Optional(ATTR_HOMEASSISTANT_API, default=False): vol.Boolean(),
|
||||
vol.Optional(ATTR_STDIN, default=False): vol.Boolean(),
|
||||
vol.Optional(ATTR_LEGACY, default=False): vol.Boolean(),
|
||||
vol.Required(ATTR_OPTIONS): dict,
|
||||
vol.Required(ATTR_SCHEMA): {
|
||||
vol.Coerce(str): vol.Any(ADDON_ELEMENT, [
|
||||
vol.Any(ADDON_ELEMENT, {vol.Coerce(str): ADDON_ELEMENT})
|
||||
])
|
||||
},
|
||||
vol.Optional(ATTR_IMAGE): vol.Match(r"\w*/\w*"),
|
||||
}, extra=vol.ALLOW_EXTRA)
|
||||
vol.Required(ATTR_SCHEMA): vol.Any(vol.Schema({
|
||||
vol.Coerce(str): vol.Any(SCHEMA_ELEMENT, [
|
||||
vol.Any(
|
||||
SCHEMA_ELEMENT,
|
||||
{vol.Coerce(str): vol.Any(SCHEMA_ELEMENT, [SCHEMA_ELEMENT])}
|
||||
),
|
||||
], vol.Schema({
|
||||
vol.Coerce(str): vol.Any(SCHEMA_ELEMENT, [SCHEMA_ELEMENT])
|
||||
}))
|
||||
}), False),
|
||||
vol.Optional(ATTR_IMAGE): vol.Match(r"^[\w{}]+/[\-\w{}]+$"),
|
||||
vol.Optional(ATTR_TIMEOUT, default=10):
|
||||
vol.All(vol.Coerce(int), vol.Range(min=10, max=120)),
|
||||
}, extra=vol.REMOVE_EXTRA)
|
||||
|
||||
|
||||
# pylint: disable=no-value-for-parameter
|
||||
@@ -45,7 +132,58 @@ SCHEMA_REPOSITORY_CONFIG = vol.Schema({
|
||||
vol.Required(ATTR_NAME): vol.Coerce(str),
|
||||
vol.Optional(ATTR_URL): vol.Url(),
|
||||
vol.Optional(ATTR_MAINTAINER): vol.Coerce(str),
|
||||
}, extra=vol.ALLOW_EXTRA)
|
||||
}, extra=vol.REMOVE_EXTRA)
|
||||
|
||||
|
||||
# pylint: disable=no-value-for-parameter
|
||||
SCHEMA_BUILD_CONFIG = vol.Schema({
|
||||
vol.Optional(ATTR_BUILD_FROM, default=BASE_IMAGE): vol.Schema({
|
||||
vol.In(ARCH_ALL): vol.Match(r"(?:^[\w{}]+/)?[\-\w{}]+:[\.\-\w{}]+$"),
|
||||
}),
|
||||
vol.Optional(ATTR_SQUASH, default=False): vol.Boolean(),
|
||||
vol.Optional(ATTR_ARGS, default=dict): vol.Schema({
|
||||
vol.Coerce(str): vol.Coerce(str)
|
||||
}),
|
||||
}, extra=vol.REMOVE_EXTRA)
|
||||
|
||||
|
||||
# pylint: disable=no-value-for-parameter
|
||||
SCHEMA_ADDON_USER = vol.Schema({
|
||||
vol.Required(ATTR_VERSION): vol.Coerce(str),
|
||||
vol.Optional(ATTR_UUID, default=lambda: uuid.uuid4().hex):
|
||||
vol.Match(r"^[0-9a-f]{32}$"),
|
||||
vol.Optional(ATTR_OPTIONS, default=dict): dict,
|
||||
vol.Optional(ATTR_AUTO_UPDATE, default=False): vol.Boolean(),
|
||||
vol.Optional(ATTR_BOOT):
|
||||
vol.In([BOOT_AUTO, BOOT_MANUAL]),
|
||||
vol.Optional(ATTR_NETWORK): DOCKER_PORTS,
|
||||
vol.Optional(ATTR_AUDIO_OUTPUT): ALSA_CHANNEL,
|
||||
vol.Optional(ATTR_AUDIO_INPUT): ALSA_CHANNEL,
|
||||
}, extra=vol.REMOVE_EXTRA)
|
||||
|
||||
|
||||
SCHEMA_ADDON_SYSTEM = SCHEMA_ADDON_CONFIG.extend({
|
||||
vol.Required(ATTR_LOCATON): vol.Coerce(str),
|
||||
vol.Required(ATTR_REPOSITORY): vol.Coerce(str),
|
||||
})
|
||||
|
||||
|
||||
SCHEMA_ADDON_FILE = vol.Schema({
|
||||
vol.Optional(ATTR_USER, default=dict): {
|
||||
vol.Coerce(str): SCHEMA_ADDON_USER,
|
||||
},
|
||||
vol.Optional(ATTR_SYSTEM, default=dict): {
|
||||
vol.Coerce(str): SCHEMA_ADDON_SYSTEM,
|
||||
}
|
||||
})
|
||||
|
||||
|
||||
SCHEMA_ADDON_SNAPSHOT = vol.Schema({
|
||||
vol.Required(ATTR_USER): SCHEMA_ADDON_USER,
|
||||
vol.Required(ATTR_SYSTEM): SCHEMA_ADDON_SYSTEM,
|
||||
vol.Required(ATTR_STATE): vol.In([STATE_STARTED, STATE_STOPPED]),
|
||||
vol.Required(ATTR_VERSION): vol.Coerce(str),
|
||||
}, extra=vol.REMOVE_EXTRA)
|
||||
|
||||
|
||||
def validate_options(raw_schema):
|
||||
@@ -56,70 +194,110 @@ def validate_options(raw_schema):
|
||||
|
||||
# read options
|
||||
for key, value in struct.items():
|
||||
# Ignore unknown options / remove from list
|
||||
if key not in raw_schema:
|
||||
raise vol.Invalid("Unknown options {}.".format(key))
|
||||
_LOGGER.warning("Unknown options %s", key)
|
||||
continue
|
||||
|
||||
typ = raw_schema[key]
|
||||
try:
|
||||
if isinstance(typ, list):
|
||||
# nested value
|
||||
options[key] = _nested_validate(typ[0], value)
|
||||
# nested value list
|
||||
options[key] = _nested_validate_list(typ[0], value, key)
|
||||
elif isinstance(typ, dict):
|
||||
# nested value dict
|
||||
options[key] = _nested_validate_dict(typ, value, key)
|
||||
else:
|
||||
# normal value
|
||||
options[key] = _single_validate(typ, value)
|
||||
options[key] = _single_validate(typ, value, key)
|
||||
except (IndexError, KeyError):
|
||||
raise vol.Invalid(
|
||||
"Type error for {}.".format(key)) from None
|
||||
raise vol.Invalid(f"Type error for {key}") from None
|
||||
|
||||
_check_missing_options(raw_schema, options, 'root')
|
||||
return options
|
||||
|
||||
return validate
|
||||
|
||||
|
||||
# pylint: disable=no-value-for-parameter
|
||||
def _single_validate(typ, value):
|
||||
# pylint: disable=inconsistent-return-statements
|
||||
def _single_validate(typ, value, key):
|
||||
"""Validate a single element."""
|
||||
try:
|
||||
# if required argument
|
||||
if value is None:
|
||||
raise vol.Invalid("A required argument is not set!")
|
||||
# if required argument
|
||||
if value is None:
|
||||
raise vol.Invalid(f"Missing required option '{key}'")
|
||||
|
||||
if typ == V_STR:
|
||||
return str(value)
|
||||
elif typ == V_INT:
|
||||
return int(value)
|
||||
elif typ == V_FLOAT:
|
||||
return float(value)
|
||||
elif typ == V_BOOL:
|
||||
return vol.Boolean()(value)
|
||||
elif typ == V_EMAIL:
|
||||
return vol.Email()(value)
|
||||
elif typ == V_URL:
|
||||
return vol.Url()(value)
|
||||
# parse extend data from type
|
||||
match = RE_SCHEMA_ELEMENT.match(typ)
|
||||
|
||||
raise vol.Invalid("Fatal error for {}.".format(value))
|
||||
except ValueError:
|
||||
raise vol.Invalid(
|
||||
"Type {} error for {}.".format(typ, value)) from None
|
||||
# prepare range
|
||||
range_args = {}
|
||||
for group_name in ('i_min', 'i_max', 'f_min', 'f_max'):
|
||||
group_value = match.group(group_name)
|
||||
if group_value:
|
||||
range_args[group_name[2:]] = float(group_value)
|
||||
|
||||
if typ.startswith(V_STR):
|
||||
return str(value)
|
||||
elif typ.startswith(V_INT):
|
||||
return vol.All(vol.Coerce(int), vol.Range(**range_args))(value)
|
||||
elif typ.startswith(V_FLOAT):
|
||||
return vol.All(vol.Coerce(float), vol.Range(**range_args))(value)
|
||||
elif typ.startswith(V_BOOL):
|
||||
return vol.Boolean()(value)
|
||||
elif typ.startswith(V_EMAIL):
|
||||
return vol.Email()(value)
|
||||
elif typ.startswith(V_URL):
|
||||
return vol.Url()(value)
|
||||
elif typ.startswith(V_PORT):
|
||||
return NETWORK_PORT(value)
|
||||
elif typ.startswith(V_MATCH):
|
||||
return vol.Match(match.group('match'))(str(value))
|
||||
|
||||
raise vol.Invalid(f"Fatal error for {key} type {typ}")
|
||||
|
||||
|
||||
def _nested_validate(typ, data_list):
|
||||
def _nested_validate_list(typ, data_list, key):
|
||||
"""Validate nested items."""
|
||||
options = []
|
||||
|
||||
for element in data_list:
|
||||
# dict list
|
||||
# Nested?
|
||||
if isinstance(typ, dict):
|
||||
c_options = {}
|
||||
for c_key, c_value in element.items():
|
||||
if c_key not in typ:
|
||||
raise vol.Invalid(
|
||||
"Unknown nested options {}.".format(c_key))
|
||||
|
||||
c_options[c_key] = _single_validate(typ[c_key], c_value)
|
||||
c_options = _nested_validate_dict(typ, element, key)
|
||||
options.append(c_options)
|
||||
# normal list
|
||||
else:
|
||||
options.append(_single_validate(typ, element))
|
||||
options.append(_single_validate(typ, element, key))
|
||||
|
||||
return options
|
||||
|
||||
|
||||
def _nested_validate_dict(typ, data_dict, key):
|
||||
"""Validate nested items."""
|
||||
options = {}
|
||||
|
||||
for c_key, c_value in data_dict.items():
|
||||
# Ignore unknown options / remove from list
|
||||
if c_key not in typ:
|
||||
_LOGGER.warning("Unknown options %s", c_key)
|
||||
continue
|
||||
|
||||
# Nested?
|
||||
if isinstance(typ[c_key], list):
|
||||
options[c_key] = _nested_validate_list(typ[c_key][0],
|
||||
c_value, c_key)
|
||||
else:
|
||||
options[c_key] = _single_validate(typ[c_key], c_value, c_key)
|
||||
|
||||
_check_missing_options(typ, options, key)
|
||||
return options
|
||||
|
||||
|
||||
def _check_missing_options(origin, exists, root):
|
||||
"""Check if all options are exists."""
|
||||
missing = set(origin) - set(exists)
|
||||
for miss_opt in missing:
|
||||
if isinstance(origin[miss_opt], str) and \
|
||||
origin[miss_opt].endswith("?"):
|
||||
continue
|
||||
raise vol.Invalid(f"Missing option {miss_opt} in {root}")
|
||||
|
@@ -1,5 +1,6 @@
|
||||
"""Init file for HassIO rest api."""
|
||||
import logging
|
||||
from pathlib import Path
|
||||
|
||||
from aiohttp import web
|
||||
|
||||
@@ -7,49 +8,66 @@ from .addons import APIAddons
|
||||
from .homeassistant import APIHomeAssistant
|
||||
from .host import APIHost
|
||||
from .network import APINetwork
|
||||
from .proxy import APIProxy
|
||||
from .supervisor import APISupervisor
|
||||
from .snapshots import APISnapshots
|
||||
from ..coresys import CoreSysAttributes
|
||||
|
||||
_LOGGER = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class RestAPI(object):
|
||||
class RestAPI(CoreSysAttributes):
|
||||
"""Handle rest api for hassio."""
|
||||
|
||||
def __init__(self, config, loop):
|
||||
def __init__(self, coresys):
|
||||
"""Initialize docker base wrapper."""
|
||||
self.config = config
|
||||
self.loop = loop
|
||||
self.webapp = web.Application(loop=self.loop)
|
||||
self.coresys = coresys
|
||||
self.webapp = web.Application(loop=self._loop)
|
||||
|
||||
# service stuff
|
||||
self._handler = None
|
||||
self.server = None
|
||||
|
||||
def register_host(self, host_control):
|
||||
async def load(self):
|
||||
"""Register REST API Calls."""
|
||||
self._register_supervisor()
|
||||
self._register_host()
|
||||
self._register_homeassistant()
|
||||
self._register_proxy()
|
||||
self._register_panel()
|
||||
self._register_addons()
|
||||
self._register_snapshots()
|
||||
self._register_network()
|
||||
|
||||
def _register_host(self):
|
||||
"""Register hostcontrol function."""
|
||||
api_host = APIHost(self.config, self.loop, host_control)
|
||||
api_host = APIHost()
|
||||
api_host.coresys = self.coresys
|
||||
|
||||
self.webapp.router.add_get('/host/info', api_host.info)
|
||||
self.webapp.router.add_get('/host/hardware', api_host.hardware)
|
||||
self.webapp.router.add_post('/host/reboot', api_host.reboot)
|
||||
self.webapp.router.add_post('/host/shutdown', api_host.shutdown)
|
||||
self.webapp.router.add_post('/host/update', api_host.update)
|
||||
self.webapp.router.add_post('/host/options', api_host.options)
|
||||
self.webapp.router.add_post('/host/reload', api_host.reload)
|
||||
|
||||
def register_network(self, host_control):
|
||||
def _register_network(self):
|
||||
"""Register network function."""
|
||||
api_net = APINetwork(self.config, self.loop, host_control)
|
||||
api_net = APINetwork()
|
||||
api_net.coresys = self.coresys
|
||||
|
||||
self.webapp.router.add_get('/network/info', api_net.info)
|
||||
self.webapp.router.add_post('/network/options', api_net.options)
|
||||
|
||||
def register_supervisor(self, supervisor, addons, host_control):
|
||||
def _register_supervisor(self):
|
||||
"""Register supervisor function."""
|
||||
api_supervisor = APISupervisor(
|
||||
self.config, self.loop, supervisor, addons, host_control)
|
||||
api_supervisor = APISupervisor()
|
||||
api_supervisor.coresys = self.coresys
|
||||
|
||||
self.webapp.router.add_get('/supervisor/ping', api_supervisor.ping)
|
||||
self.webapp.router.add_get('/supervisor/info', api_supervisor.info)
|
||||
self.webapp.router.add_get(
|
||||
'/supervisor/addons', api_supervisor.available_addons)
|
||||
self.webapp.router.add_get('/supervisor/stats', api_supervisor.stats)
|
||||
self.webapp.router.add_post(
|
||||
'/supervisor/update', api_supervisor.update)
|
||||
self.webapp.router.add_post(
|
||||
@@ -58,18 +76,46 @@ class RestAPI(object):
|
||||
'/supervisor/options', api_supervisor.options)
|
||||
self.webapp.router.add_get('/supervisor/logs', api_supervisor.logs)
|
||||
|
||||
def register_homeassistant(self, dock_homeassistant):
|
||||
def _register_homeassistant(self):
|
||||
"""Register homeassistant function."""
|
||||
api_hass = APIHomeAssistant(self.config, self.loop, dock_homeassistant)
|
||||
api_hass = APIHomeAssistant()
|
||||
api_hass.coresys = self.coresys
|
||||
|
||||
self.webapp.router.add_get('/homeassistant/info', api_hass.info)
|
||||
self.webapp.router.add_post('/homeassistant/update', api_hass.update)
|
||||
self.webapp.router.add_get('/homeassistant/logs', api_hass.logs)
|
||||
self.webapp.router.add_get('/homeassistant/stats', api_hass.stats)
|
||||
self.webapp.router.add_post('/homeassistant/options', api_hass.options)
|
||||
self.webapp.router.add_post('/homeassistant/update', api_hass.update)
|
||||
self.webapp.router.add_post('/homeassistant/restart', api_hass.restart)
|
||||
self.webapp.router.add_post('/homeassistant/stop', api_hass.stop)
|
||||
self.webapp.router.add_post('/homeassistant/start', api_hass.start)
|
||||
self.webapp.router.add_post('/homeassistant/check', api_hass.check)
|
||||
|
||||
def register_addons(self, addons):
|
||||
def _register_proxy(self):
|
||||
"""Register HomeAssistant API Proxy."""
|
||||
api_proxy = APIProxy()
|
||||
api_proxy.coresys = self.coresys
|
||||
|
||||
self.webapp.router.add_get(
|
||||
'/homeassistant/api/websocket', api_proxy.websocket)
|
||||
self.webapp.router.add_get(
|
||||
'/homeassistant/websocket', api_proxy.websocket)
|
||||
self.webapp.router.add_get(
|
||||
'/homeassistant/api/stream', api_proxy.stream)
|
||||
self.webapp.router.add_post(
|
||||
'/homeassistant/api/{path:.+}', api_proxy.api)
|
||||
self.webapp.router.add_get(
|
||||
'/homeassistant/api/{path:.+}', api_proxy.api)
|
||||
self.webapp.router.add_get(
|
||||
'/homeassistant/api/', api_proxy.api)
|
||||
|
||||
def _register_addons(self):
|
||||
"""Register homeassistant function."""
|
||||
api_addons = APIAddons(self.config, self.loop, addons)
|
||||
api_addons = APIAddons()
|
||||
api_addons.coresys = self.coresys
|
||||
|
||||
self.webapp.router.add_get('/addons', api_addons.list)
|
||||
self.webapp.router.add_post('/addons/reload', api_addons.reload)
|
||||
self.webapp.router.add_get('/addons/{addon}/info', api_addons.info)
|
||||
self.webapp.router.add_post(
|
||||
'/addons/{addon}/install', api_addons.install)
|
||||
@@ -77,18 +123,75 @@ class RestAPI(object):
|
||||
'/addons/{addon}/uninstall', api_addons.uninstall)
|
||||
self.webapp.router.add_post('/addons/{addon}/start', api_addons.start)
|
||||
self.webapp.router.add_post('/addons/{addon}/stop', api_addons.stop)
|
||||
self.webapp.router.add_post(
|
||||
'/addons/{addon}/restart', api_addons.restart)
|
||||
self.webapp.router.add_post(
|
||||
'/addons/{addon}/update', api_addons.update)
|
||||
self.webapp.router.add_post(
|
||||
'/addons/{addon}/options', api_addons.options)
|
||||
self.webapp.router.add_post(
|
||||
'/addons/{addon}/rebuild', api_addons.rebuild)
|
||||
self.webapp.router.add_get('/addons/{addon}/logs', api_addons.logs)
|
||||
self.webapp.router.add_get('/addons/{addon}/icon', api_addons.icon)
|
||||
self.webapp.router.add_get('/addons/{addon}/logo', api_addons.logo)
|
||||
self.webapp.router.add_get(
|
||||
'/addons/{addon}/changelog', api_addons.changelog)
|
||||
self.webapp.router.add_post('/addons/{addon}/stdin', api_addons.stdin)
|
||||
self.webapp.router.add_get('/addons/{addon}/stats', api_addons.stats)
|
||||
|
||||
def _register_snapshots(self):
|
||||
"""Register snapshots function."""
|
||||
api_snapshots = APISnapshots()
|
||||
api_snapshots.coresys = self.coresys
|
||||
|
||||
self.webapp.router.add_get('/snapshots', api_snapshots.list)
|
||||
self.webapp.router.add_post('/snapshots/reload', api_snapshots.reload)
|
||||
|
||||
self.webapp.router.add_post(
|
||||
'/snapshots/new/full', api_snapshots.snapshot_full)
|
||||
self.webapp.router.add_post(
|
||||
'/snapshots/new/partial', api_snapshots.snapshot_partial)
|
||||
|
||||
self.webapp.router.add_get(
|
||||
'/snapshots/{snapshot}/info', api_snapshots.info)
|
||||
self.webapp.router.add_post(
|
||||
'/snapshots/{snapshot}/remove', api_snapshots.remove)
|
||||
self.webapp.router.add_post(
|
||||
'/snapshots/{snapshot}/restore/full', api_snapshots.restore_full)
|
||||
self.webapp.router.add_post(
|
||||
'/snapshots/{snapshot}/restore/partial',
|
||||
api_snapshots.restore_partial)
|
||||
|
||||
def _register_panel(self):
|
||||
"""Register panel for homeassistant."""
|
||||
def create_panel_response(build_type):
|
||||
"""Create a function to generate a response."""
|
||||
path = Path(__file__).parent.joinpath(
|
||||
f"panel/{build_type}.html")
|
||||
return lambda request: web.FileResponse(path)
|
||||
|
||||
# This route is for backwards compatibility with HA < 0.58
|
||||
self.webapp.router.add_get(
|
||||
'/panel', create_panel_response('hassio-main-es5'))
|
||||
|
||||
# This route is for backwards compatibility with HA 0.58 - 0.61
|
||||
self.webapp.router.add_get(
|
||||
'/panel_es5', create_panel_response('hassio-main-es5'))
|
||||
self.webapp.router.add_get(
|
||||
'/panel_latest', create_panel_response('hassio-main-latest'))
|
||||
|
||||
# This route is for HA > 0.61
|
||||
self.webapp.router.add_get(
|
||||
'/app-es5/index.html', create_panel_response('index'))
|
||||
self.webapp.router.add_get(
|
||||
'/app-es5/hassio-app.html', create_panel_response('hassio-app'))
|
||||
|
||||
async def start(self):
|
||||
"""Run rest api webserver."""
|
||||
self._handler = self.webapp.make_handler(loop=self.loop)
|
||||
self._handler = self.webapp.make_handler(loop=self._loop)
|
||||
|
||||
try:
|
||||
self.server = await self.loop.create_server(
|
||||
self.server = await self._loop.create_server(
|
||||
self._handler, "0.0.0.0", "80")
|
||||
except OSError as err:
|
||||
_LOGGER.fatal(
|
||||
@@ -102,5 +205,5 @@ class RestAPI(object):
|
||||
await self.webapp.shutdown()
|
||||
|
||||
if self._handler:
|
||||
await self._handler.finish_connections(60)
|
||||
await self._handler.shutdown(60)
|
||||
await self.webapp.cleanup()
|
||||
|
@@ -5,10 +5,21 @@ import logging
|
||||
import voluptuous as vol
|
||||
from voluptuous.humanize import humanize_error
|
||||
|
||||
from .util import api_process, api_process_raw, api_validate
|
||||
from .utils import api_process, api_process_raw, api_validate
|
||||
from ..const import (
|
||||
ATTR_VERSION, ATTR_LAST_VERSION, ATTR_STATE, ATTR_BOOT, ATTR_OPTIONS,
|
||||
STATE_STOPPED, STATE_STARTED, BOOT_AUTO, BOOT_MANUAL)
|
||||
ATTR_URL, ATTR_DESCRIPTON, ATTR_DETACHED, ATTR_NAME, ATTR_REPOSITORY,
|
||||
ATTR_BUILD, ATTR_AUTO_UPDATE, ATTR_NETWORK, ATTR_HOST_NETWORK, ATTR_SLUG,
|
||||
ATTR_SOURCE, ATTR_REPOSITORIES, ATTR_ADDONS, ATTR_ARCH, ATTR_MAINTAINER,
|
||||
ATTR_INSTALLED, ATTR_LOGO, ATTR_WEBUI, ATTR_DEVICES, ATTR_PRIVILEGED,
|
||||
ATTR_AUDIO, ATTR_AUDIO_INPUT, ATTR_AUDIO_OUTPUT, ATTR_HASSIO_API,
|
||||
ATTR_GPIO, ATTR_HOMEASSISTANT_API, ATTR_STDIN, BOOT_AUTO, BOOT_MANUAL,
|
||||
ATTR_CHANGELOG, ATTR_HOST_IPC, ATTR_HOST_DBUS, ATTR_LONG_DESCRIPTION,
|
||||
ATTR_CPU_PERCENT, ATTR_MEMORY_LIMIT, ATTR_MEMORY_USAGE, ATTR_NETWORK_TX,
|
||||
ATTR_NETWORK_RX, ATTR_BLK_READ, ATTR_BLK_WRITE, ATTR_ICON,
|
||||
CONTENT_TYPE_PNG, CONTENT_TYPE_BINARY, CONTENT_TYPE_TEXT)
|
||||
from ..coresys import CoreSysAttributes
|
||||
from ..validate import DOCKER_PORTS
|
||||
|
||||
_LOGGER = logging.getLogger(__name__)
|
||||
|
||||
@@ -16,129 +27,260 @@ SCHEMA_VERSION = vol.Schema({
|
||||
vol.Optional(ATTR_VERSION): vol.Coerce(str),
|
||||
})
|
||||
|
||||
# pylint: disable=no-value-for-parameter
|
||||
SCHEMA_OPTIONS = vol.Schema({
|
||||
vol.Optional(ATTR_BOOT): vol.In([BOOT_AUTO, BOOT_MANUAL])
|
||||
vol.Optional(ATTR_BOOT): vol.In([BOOT_AUTO, BOOT_MANUAL]),
|
||||
vol.Optional(ATTR_NETWORK): vol.Any(None, DOCKER_PORTS),
|
||||
vol.Optional(ATTR_AUTO_UPDATE): vol.Boolean(),
|
||||
})
|
||||
|
||||
|
||||
class APIAddons(object):
|
||||
class APIAddons(CoreSysAttributes):
|
||||
"""Handle rest api for addons functions."""
|
||||
|
||||
def __init__(self, config, loop, addons):
|
||||
"""Initialize homeassistant rest api part."""
|
||||
self.config = config
|
||||
self.loop = loop
|
||||
self.addons = addons
|
||||
|
||||
def _extract_addon(self, request, check_installed=True):
|
||||
"""Return addon and if not exists trow a exception."""
|
||||
addon = request.match_info.get('addon')
|
||||
|
||||
# check data
|
||||
if not self.addons.exists_addon(addon):
|
||||
addon = self._addons.get(request.match_info.get('addon'))
|
||||
if not addon:
|
||||
raise RuntimeError("Addon not exists")
|
||||
if check_installed and not self.addons.is_installed(addon):
|
||||
|
||||
if check_installed and not addon.is_installed:
|
||||
raise RuntimeError("Addon is not installed")
|
||||
|
||||
return addon
|
||||
|
||||
@staticmethod
|
||||
def _pretty_devices(addon):
|
||||
"""Return a simplified device list."""
|
||||
dev_list = addon.devices
|
||||
if not dev_list:
|
||||
return None
|
||||
return [row.split(':')[0] for row in dev_list]
|
||||
|
||||
@api_process
|
||||
async def list(self, request):
|
||||
"""Return all addons / repositories ."""
|
||||
data_addons = []
|
||||
for addon in self._addons.list_addons:
|
||||
data_addons.append({
|
||||
ATTR_NAME: addon.name,
|
||||
ATTR_SLUG: addon.slug,
|
||||
ATTR_DESCRIPTON: addon.description,
|
||||
ATTR_VERSION: addon.last_version,
|
||||
ATTR_INSTALLED: addon.version_installed,
|
||||
ATTR_ARCH: addon.supported_arch,
|
||||
ATTR_DETACHED: addon.is_detached,
|
||||
ATTR_REPOSITORY: addon.repository,
|
||||
ATTR_BUILD: addon.need_build,
|
||||
ATTR_URL: addon.url,
|
||||
ATTR_ICON: addon.with_icon,
|
||||
ATTR_LOGO: addon.with_logo,
|
||||
})
|
||||
|
||||
data_repositories = []
|
||||
for repository in self._addons.list_repositories:
|
||||
data_repositories.append({
|
||||
ATTR_SLUG: repository.slug,
|
||||
ATTR_NAME: repository.name,
|
||||
ATTR_SOURCE: repository.source,
|
||||
ATTR_URL: repository.url,
|
||||
ATTR_MAINTAINER: repository.maintainer,
|
||||
})
|
||||
|
||||
return {
|
||||
ATTR_ADDONS: data_addons,
|
||||
ATTR_REPOSITORIES: data_repositories,
|
||||
}
|
||||
|
||||
@api_process
|
||||
async def reload(self, request):
|
||||
"""Reload all addons data."""
|
||||
await asyncio.shield(self._addons.reload(), loop=self._loop)
|
||||
return True
|
||||
|
||||
@api_process
|
||||
async def info(self, request):
|
||||
"""Return addon information."""
|
||||
addon = self._extract_addon(request)
|
||||
addon = self._extract_addon(request, check_installed=False)
|
||||
|
||||
return {
|
||||
ATTR_VERSION: self.addons.version_installed(addon),
|
||||
ATTR_LAST_VERSION: self.addons.get_last_version(addon),
|
||||
ATTR_STATE: await self.addons.state(addon),
|
||||
ATTR_BOOT: self.addons.get_boot(addon),
|
||||
ATTR_OPTIONS: self.addons.get_options(addon),
|
||||
ATTR_NAME: addon.name,
|
||||
ATTR_DESCRIPTON: addon.description,
|
||||
ATTR_LONG_DESCRIPTION: addon.long_description,
|
||||
ATTR_VERSION: addon.version_installed,
|
||||
ATTR_AUTO_UPDATE: addon.auto_update,
|
||||
ATTR_REPOSITORY: addon.repository,
|
||||
ATTR_LAST_VERSION: addon.last_version,
|
||||
ATTR_STATE: await addon.state(),
|
||||
ATTR_BOOT: addon.boot,
|
||||
ATTR_OPTIONS: addon.options,
|
||||
ATTR_URL: addon.url,
|
||||
ATTR_DETACHED: addon.is_detached,
|
||||
ATTR_BUILD: addon.need_build,
|
||||
ATTR_NETWORK: addon.ports,
|
||||
ATTR_HOST_NETWORK: addon.host_network,
|
||||
ATTR_HOST_IPC: addon.host_ipc,
|
||||
ATTR_HOST_DBUS: addon.host_dbus,
|
||||
ATTR_PRIVILEGED: addon.privileged,
|
||||
ATTR_DEVICES: self._pretty_devices(addon),
|
||||
ATTR_ICON: addon.with_icon,
|
||||
ATTR_LOGO: addon.with_logo,
|
||||
ATTR_CHANGELOG: addon.with_changelog,
|
||||
ATTR_WEBUI: addon.webui,
|
||||
ATTR_STDIN: addon.with_stdin,
|
||||
ATTR_HASSIO_API: addon.access_hassio_api,
|
||||
ATTR_HOMEASSISTANT_API: addon.access_homeassistant_api,
|
||||
ATTR_GPIO: addon.with_gpio,
|
||||
ATTR_AUDIO: addon.with_audio,
|
||||
ATTR_AUDIO_INPUT: addon.audio_input,
|
||||
ATTR_AUDIO_OUTPUT: addon.audio_output,
|
||||
}
|
||||
|
||||
@api_process
|
||||
async def options(self, request):
|
||||
"""Store user options for addon."""
|
||||
addon = self._extract_addon(request)
|
||||
options_schema = self.addons.get_schema(addon)
|
||||
|
||||
addon_schema = SCHEMA_OPTIONS.extend({
|
||||
vol.Optional(ATTR_OPTIONS): options_schema,
|
||||
vol.Optional(ATTR_OPTIONS): vol.Any(None, addon.schema),
|
||||
})
|
||||
|
||||
body = await api_validate(addon_schema, request)
|
||||
|
||||
if ATTR_OPTIONS in body:
|
||||
self.addons.set_options(addon, body[ATTR_OPTIONS])
|
||||
addon.options = body[ATTR_OPTIONS]
|
||||
if ATTR_BOOT in body:
|
||||
self.addons.set_boot(addon, body[ATTR_BOOT])
|
||||
addon.boot = body[ATTR_BOOT]
|
||||
if ATTR_AUTO_UPDATE in body:
|
||||
addon.auto_update = body[ATTR_AUTO_UPDATE]
|
||||
if ATTR_NETWORK in body:
|
||||
addon.ports = body[ATTR_NETWORK]
|
||||
if ATTR_AUDIO_INPUT in body:
|
||||
addon.audio_input = body[ATTR_AUDIO_INPUT]
|
||||
if ATTR_AUDIO_OUTPUT in body:
|
||||
addon.audio_output = body[ATTR_AUDIO_OUTPUT]
|
||||
|
||||
addon.save_data()
|
||||
return True
|
||||
|
||||
@api_process
|
||||
async def install(self, request):
|
||||
"""Install addon."""
|
||||
body = await api_validate(SCHEMA_VERSION, request)
|
||||
addon = self._extract_addon(request, check_installed=False)
|
||||
version = body.get(
|
||||
ATTR_VERSION, self.addons.get_last_version(addon))
|
||||
async def stats(self, request):
|
||||
"""Return resource information."""
|
||||
addon = self._extract_addon(request)
|
||||
stats = await addon.stats()
|
||||
|
||||
return await asyncio.shield(
|
||||
self.addons.install(addon, version), loop=self.loop)
|
||||
if not stats:
|
||||
raise RuntimeError("No stats available")
|
||||
|
||||
return {
|
||||
ATTR_CPU_PERCENT: stats.cpu_percent,
|
||||
ATTR_MEMORY_USAGE: stats.memory_usage,
|
||||
ATTR_MEMORY_LIMIT: stats.memory_limit,
|
||||
ATTR_NETWORK_RX: stats.network_rx,
|
||||
ATTR_NETWORK_TX: stats.network_tx,
|
||||
ATTR_BLK_READ: stats.blk_read,
|
||||
ATTR_BLK_WRITE: stats.blk_write,
|
||||
}
|
||||
|
||||
@api_process
|
||||
async def uninstall(self, request):
|
||||
def install(self, request):
|
||||
"""Install addon."""
|
||||
addon = self._extract_addon(request, check_installed=False)
|
||||
return asyncio.shield(addon.install(), loop=self._loop)
|
||||
|
||||
@api_process
|
||||
def uninstall(self, request):
|
||||
"""Uninstall addon."""
|
||||
addon = self._extract_addon(request)
|
||||
|
||||
return await asyncio.shield(
|
||||
self.addons.uninstall(addon), loop=self.loop)
|
||||
return asyncio.shield(addon.uninstall(), loop=self._loop)
|
||||
|
||||
@api_process
|
||||
async def start(self, request):
|
||||
def start(self, request):
|
||||
"""Start addon."""
|
||||
addon = self._extract_addon(request)
|
||||
|
||||
if await self.addons.state(addon) == STATE_STARTED:
|
||||
raise RuntimeError("Addon is already running")
|
||||
|
||||
# validate options
|
||||
# check options
|
||||
options = addon.options
|
||||
try:
|
||||
schema = self.addons.get_schema(addon)
|
||||
options = self.addons.get_options(addon)
|
||||
schema(options)
|
||||
addon.schema(options)
|
||||
except vol.Invalid as ex:
|
||||
raise RuntimeError(humanize_error(options, ex)) from None
|
||||
|
||||
return await asyncio.shield(
|
||||
self.addons.start(addon), loop=self.loop)
|
||||
return asyncio.shield(addon.start(), loop=self._loop)
|
||||
|
||||
@api_process
|
||||
async def stop(self, request):
|
||||
def stop(self, request):
|
||||
"""Stop addon."""
|
||||
addon = self._extract_addon(request)
|
||||
|
||||
if await self.addons.state(addon) == STATE_STOPPED:
|
||||
raise RuntimeError("Addon is already stoped")
|
||||
|
||||
return await asyncio.shield(
|
||||
self.addons.stop(addon), loop=self.loop)
|
||||
return asyncio.shield(addon.stop(), loop=self._loop)
|
||||
|
||||
@api_process
|
||||
async def update(self, request):
|
||||
def update(self, request):
|
||||
"""Update addon."""
|
||||
body = await api_validate(SCHEMA_VERSION, request)
|
||||
addon = self._extract_addon(request)
|
||||
version = body.get(
|
||||
ATTR_VERSION, self.addons.get_last_version(addon))
|
||||
|
||||
if version == self.addons.version_installed(addon):
|
||||
raise RuntimeError("Version is already in use")
|
||||
if addon.last_version == addon.version_installed:
|
||||
raise RuntimeError("No update available!")
|
||||
|
||||
return await asyncio.shield(
|
||||
self.addons.update(addon, version), loop=self.loop)
|
||||
return asyncio.shield(addon.update(), loop=self._loop)
|
||||
|
||||
@api_process_raw
|
||||
@api_process
|
||||
def restart(self, request):
|
||||
"""Restart addon."""
|
||||
addon = self._extract_addon(request)
|
||||
return asyncio.shield(addon.restart(), loop=self._loop)
|
||||
|
||||
@api_process
|
||||
def rebuild(self, request):
|
||||
"""Rebuild local build addon."""
|
||||
addon = self._extract_addon(request)
|
||||
if not addon.need_build:
|
||||
raise RuntimeError("Only local build addons are supported")
|
||||
|
||||
return asyncio.shield(addon.rebuild(), loop=self._loop)
|
||||
|
||||
@api_process_raw(CONTENT_TYPE_BINARY)
|
||||
def logs(self, request):
|
||||
"""Return logs from addon."""
|
||||
addon = self._extract_addon(request)
|
||||
return self.addons.logs(addon)
|
||||
return addon.logs()
|
||||
|
||||
@api_process_raw(CONTENT_TYPE_PNG)
|
||||
async def icon(self, request):
|
||||
"""Return icon from addon."""
|
||||
addon = self._extract_addon(request, check_installed=False)
|
||||
if not addon.with_icon:
|
||||
raise RuntimeError("No icon found!")
|
||||
|
||||
with addon.path_icon.open('rb') as png:
|
||||
return png.read()
|
||||
|
||||
@api_process_raw(CONTENT_TYPE_PNG)
|
||||
async def logo(self, request):
|
||||
"""Return logo from addon."""
|
||||
addon = self._extract_addon(request, check_installed=False)
|
||||
if not addon.with_logo:
|
||||
raise RuntimeError("No logo found!")
|
||||
|
||||
with addon.path_logo.open('rb') as png:
|
||||
return png.read()
|
||||
|
||||
@api_process_raw(CONTENT_TYPE_TEXT)
|
||||
async def changelog(self, request):
|
||||
"""Return changelog from addon."""
|
||||
addon = self._extract_addon(request, check_installed=False)
|
||||
if not addon.with_changelog:
|
||||
raise RuntimeError("No changelog found!")
|
||||
|
||||
with addon.path_changelog.open('r') as changelog:
|
||||
return changelog.read()
|
||||
|
||||
@api_process
|
||||
async def stdin(self, request):
|
||||
"""Write to stdin of addon."""
|
||||
addon = self._extract_addon(request)
|
||||
if not addon.with_stdin:
|
||||
raise RuntimeError("STDIN not supported by addon")
|
||||
|
||||
data = await request.read()
|
||||
return await asyncio.shield(addon.write_stdin(data), loop=self._loop)
|
||||
|
@@ -4,54 +4,134 @@ import logging
|
||||
|
||||
import voluptuous as vol
|
||||
|
||||
from .util import api_process, api_process_raw, api_validate
|
||||
from ..const import ATTR_VERSION, ATTR_LAST_VERSION
|
||||
from .utils import api_process, api_process_raw, api_validate
|
||||
from ..const import (
|
||||
ATTR_VERSION, ATTR_LAST_VERSION, ATTR_IMAGE, ATTR_CUSTOM, ATTR_BOOT,
|
||||
ATTR_PORT, ATTR_PASSWORD, ATTR_SSL, ATTR_WATCHDOG, ATTR_CPU_PERCENT,
|
||||
ATTR_MEMORY_USAGE, ATTR_MEMORY_LIMIT, ATTR_NETWORK_RX, ATTR_NETWORK_TX,
|
||||
ATTR_BLK_READ, ATTR_BLK_WRITE, CONTENT_TYPE_BINARY)
|
||||
from ..coresys import CoreSysAttributes
|
||||
from ..validate import NETWORK_PORT, DOCKER_IMAGE
|
||||
|
||||
_LOGGER = logging.getLogger(__name__)
|
||||
|
||||
|
||||
# pylint: disable=no-value-for-parameter
|
||||
SCHEMA_OPTIONS = vol.Schema({
|
||||
vol.Optional(ATTR_BOOT): vol.Boolean(),
|
||||
vol.Inclusive(ATTR_IMAGE, 'custom_hass'):
|
||||
vol.Any(None, vol.Coerce(str)),
|
||||
vol.Inclusive(ATTR_LAST_VERSION, 'custom_hass'):
|
||||
vol.Any(None, DOCKER_IMAGE),
|
||||
vol.Optional(ATTR_PORT): NETWORK_PORT,
|
||||
vol.Optional(ATTR_PASSWORD): vol.Any(None, vol.Coerce(str)),
|
||||
vol.Optional(ATTR_SSL): vol.Boolean(),
|
||||
vol.Optional(ATTR_WATCHDOG): vol.Boolean(),
|
||||
})
|
||||
|
||||
SCHEMA_VERSION = vol.Schema({
|
||||
vol.Optional(ATTR_VERSION): vol.Coerce(str),
|
||||
})
|
||||
|
||||
|
||||
class APIHomeAssistant(object):
|
||||
class APIHomeAssistant(CoreSysAttributes):
|
||||
"""Handle rest api for homeassistant functions."""
|
||||
|
||||
def __init__(self, config, loop, homeassistant):
|
||||
"""Initialize homeassistant rest api part."""
|
||||
self.config = config
|
||||
self.loop = loop
|
||||
self.homeassistant = homeassistant
|
||||
|
||||
@api_process
|
||||
async def info(self, request):
|
||||
"""Return host information."""
|
||||
info = {
|
||||
ATTR_VERSION: self.homeassistant.version,
|
||||
ATTR_LAST_VERSION: self.config.last_homeassistant,
|
||||
return {
|
||||
ATTR_VERSION: self._homeassistant.version,
|
||||
ATTR_LAST_VERSION: self._homeassistant.last_version,
|
||||
ATTR_IMAGE: self._homeassistant.image,
|
||||
ATTR_CUSTOM: self._homeassistant.is_custom_image,
|
||||
ATTR_BOOT: self._homeassistant.boot,
|
||||
ATTR_PORT: self._homeassistant.api_port,
|
||||
ATTR_SSL: self._homeassistant.api_ssl,
|
||||
ATTR_WATCHDOG: self._homeassistant.watchdog,
|
||||
}
|
||||
|
||||
return info
|
||||
@api_process
|
||||
async def options(self, request):
|
||||
"""Set homeassistant options."""
|
||||
body = await api_validate(SCHEMA_OPTIONS, request)
|
||||
|
||||
if ATTR_IMAGE in body and ATTR_LAST_VERSION in body:
|
||||
self._homeassistant.image = body[ATTR_IMAGE]
|
||||
self._homeassistant.last_version = body[ATTR_LAST_VERSION]
|
||||
|
||||
if ATTR_BOOT in body:
|
||||
self._homeassistant.boot = body[ATTR_BOOT]
|
||||
|
||||
if ATTR_PORT in body:
|
||||
self._homeassistant.api_port = body[ATTR_PORT]
|
||||
|
||||
if ATTR_PASSWORD in body:
|
||||
self._homeassistant.api_password = body[ATTR_PASSWORD]
|
||||
|
||||
if ATTR_SSL in body:
|
||||
self._homeassistant.api_ssl = body[ATTR_SSL]
|
||||
|
||||
if ATTR_WATCHDOG in body:
|
||||
self._homeassistant.watchdog = body[ATTR_WATCHDOG]
|
||||
|
||||
self._homeassistant.save_data()
|
||||
return True
|
||||
|
||||
@api_process
|
||||
async def stats(self, request):
|
||||
"""Return resource information."""
|
||||
stats = await self._homeassistant.stats()
|
||||
if not stats:
|
||||
raise RuntimeError("No stats available")
|
||||
|
||||
return {
|
||||
ATTR_CPU_PERCENT: stats.cpu_percent,
|
||||
ATTR_MEMORY_USAGE: stats.memory_usage,
|
||||
ATTR_MEMORY_LIMIT: stats.memory_limit,
|
||||
ATTR_NETWORK_RX: stats.network_rx,
|
||||
ATTR_NETWORK_TX: stats.network_tx,
|
||||
ATTR_BLK_READ: stats.blk_read,
|
||||
ATTR_BLK_WRITE: stats.blk_write,
|
||||
}
|
||||
|
||||
@api_process
|
||||
async def update(self, request):
|
||||
"""Update host OS."""
|
||||
"""Update homeassistant."""
|
||||
body = await api_validate(SCHEMA_VERSION, request)
|
||||
version = body.get(ATTR_VERSION, self.config.last_homeassistant)
|
||||
version = body.get(ATTR_VERSION, self._homeassistant.last_version)
|
||||
|
||||
if self.homeassistant.in_progress:
|
||||
raise RuntimeError("Other task is in progress")
|
||||
|
||||
if version == self.homeassistant.version:
|
||||
raise RuntimeError("Version is already in use")
|
||||
if version == self._homeassistant.version:
|
||||
raise RuntimeError("Version {} is already in use".format(version))
|
||||
|
||||
return await asyncio.shield(
|
||||
self.homeassistant.update(version), loop=self.loop)
|
||||
self._homeassistant.update(version), loop=self._loop)
|
||||
|
||||
@api_process_raw
|
||||
@api_process
|
||||
def stop(self, request):
|
||||
"""Stop homeassistant."""
|
||||
return asyncio.shield(self._homeassistant.stop(), loop=self._loop)
|
||||
|
||||
@api_process
|
||||
def start(self, request):
|
||||
"""Start homeassistant."""
|
||||
return asyncio.shield(self._homeassistant.run(), loop=self._loop)
|
||||
|
||||
@api_process
|
||||
def restart(self, request):
|
||||
"""Restart homeassistant."""
|
||||
return asyncio.shield(self._homeassistant.restart(), loop=self._loop)
|
||||
|
||||
@api_process_raw(CONTENT_TYPE_BINARY)
|
||||
def logs(self, request):
|
||||
"""Return homeassistant docker logs.
|
||||
"""Return homeassistant docker logs."""
|
||||
return self._homeassistant.logs()
|
||||
|
||||
Return a coroutine.
|
||||
"""
|
||||
return self.homeassistant.logs()
|
||||
@api_process
|
||||
async def check(self, request):
|
||||
"""Check config of homeassistant."""
|
||||
code, message = await self._homeassistant.check_config()
|
||||
if not code:
|
||||
raise RuntimeError(message)
|
||||
|
||||
return True
|
||||
|
@@ -4,10 +4,13 @@ import logging
|
||||
|
||||
import voluptuous as vol
|
||||
|
||||
from .util import api_process_hostcontrol, api_process, api_validate
|
||||
from .utils import api_process_hostcontrol, api_process, api_validate
|
||||
from ..const import (
|
||||
ATTR_VERSION, ATTR_LAST_VERSION, ATTR_TYPE, ATTR_HOSTNAME, ATTR_FEATURES,
|
||||
ATTR_OS)
|
||||
ATTR_OS, ATTR_SERIAL, ATTR_INPUT, ATTR_DISK, ATTR_AUDIO, ATTR_AUDIO_INPUT,
|
||||
ATTR_AUDIO_OUTPUT, ATTR_GPIO)
|
||||
from ..coresys import CoreSysAttributes
|
||||
from ..validate import ALSA_CHANNEL
|
||||
|
||||
_LOGGER = logging.getLogger(__name__)
|
||||
|
||||
@@ -15,46 +18,75 @@ SCHEMA_VERSION = vol.Schema({
|
||||
vol.Optional(ATTR_VERSION): vol.Coerce(str),
|
||||
})
|
||||
|
||||
SCHEMA_OPTIONS = vol.Schema({
|
||||
vol.Optional(ATTR_AUDIO_OUTPUT): ALSA_CHANNEL,
|
||||
vol.Optional(ATTR_AUDIO_INPUT): ALSA_CHANNEL,
|
||||
})
|
||||
|
||||
class APIHost(object):
|
||||
|
||||
class APIHost(CoreSysAttributes):
|
||||
"""Handle rest api for host functions."""
|
||||
|
||||
def __init__(self, config, loop, host_control):
|
||||
"""Initialize host rest api part."""
|
||||
self.config = config
|
||||
self.loop = loop
|
||||
self.host_control = host_control
|
||||
|
||||
@api_process
|
||||
async def info(self, request):
|
||||
"""Return host information."""
|
||||
return {
|
||||
ATTR_TYPE: self.host_control.type,
|
||||
ATTR_VERSION: self.host_control.version,
|
||||
ATTR_LAST_VERSION: self.host_control.last_version,
|
||||
ATTR_FEATURES: self.host_control.features,
|
||||
ATTR_HOSTNAME: self.host_control.hostname,
|
||||
ATTR_OS: self.host_control.os_info,
|
||||
ATTR_TYPE: self._host_control.type,
|
||||
ATTR_VERSION: self._host_control.version,
|
||||
ATTR_LAST_VERSION: self._host_control.last_version,
|
||||
ATTR_FEATURES: self._host_control.features,
|
||||
ATTR_HOSTNAME: self._host_control.hostname,
|
||||
ATTR_OS: self._host_control.os_info,
|
||||
}
|
||||
|
||||
@api_process
|
||||
async def options(self, request):
|
||||
"""Process host options."""
|
||||
body = await api_validate(SCHEMA_OPTIONS, request)
|
||||
|
||||
if ATTR_AUDIO_OUTPUT in body:
|
||||
self._config.audio_output = body[ATTR_AUDIO_OUTPUT]
|
||||
if ATTR_AUDIO_INPUT in body:
|
||||
self._config.audio_input = body[ATTR_AUDIO_INPUT]
|
||||
|
||||
self._config.save_data()
|
||||
return True
|
||||
|
||||
@api_process_hostcontrol
|
||||
def reboot(self, request):
|
||||
"""Reboot host."""
|
||||
return self.host_control.reboot()
|
||||
return self._host_control.reboot()
|
||||
|
||||
@api_process_hostcontrol
|
||||
def shutdown(self, request):
|
||||
"""Poweroff host."""
|
||||
return self.host_control.shutdown()
|
||||
return self._host_control.shutdown()
|
||||
|
||||
@api_process_hostcontrol
|
||||
async def reload(self, request):
|
||||
"""Reload host data."""
|
||||
await self._host_control.load()
|
||||
return True
|
||||
|
||||
@api_process_hostcontrol
|
||||
async def update(self, request):
|
||||
"""Update host OS."""
|
||||
body = await api_validate(SCHEMA_VERSION, request)
|
||||
version = body.get(ATTR_VERSION, self.host_control.last_version)
|
||||
version = body.get(ATTR_VERSION, self._host_control.last_version)
|
||||
|
||||
if version == self.host_control.version:
|
||||
raise RuntimeError("Version is already in use")
|
||||
if version == self._host_control.version:
|
||||
raise RuntimeError(f"Version {version} is already in use")
|
||||
|
||||
return await asyncio.shield(
|
||||
self.host_control.update(version=version), loop=self.loop)
|
||||
self._host_control.update(version=version), loop=self._loop)
|
||||
|
||||
@api_process
|
||||
async def hardware(self, request):
|
||||
"""Return local hardware infos."""
|
||||
return {
|
||||
ATTR_SERIAL: list(self._hardware.serial_devices),
|
||||
ATTR_INPUT: list(self._hardware.input_devices),
|
||||
ATTR_DISK: list(self._hardware.disk_devices),
|
||||
ATTR_GPIO: list(self._hardware.gpio_devices),
|
||||
ATTR_AUDIO: self._hardware.audio_devices,
|
||||
}
|
||||
|
@@ -1,26 +1,38 @@
|
||||
"""Init file for HassIO network rest api."""
|
||||
import logging
|
||||
|
||||
from .util import api_process_hostcontrol
|
||||
import voluptuous as vol
|
||||
|
||||
from .utils import api_process, api_process_hostcontrol, api_validate
|
||||
from ..const import ATTR_HOSTNAME
|
||||
from ..coresys import CoreSysAttributes
|
||||
|
||||
_LOGGER = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class APINetwork(object):
|
||||
SCHEMA_OPTIONS = vol.Schema({
|
||||
vol.Optional(ATTR_HOSTNAME): vol.Coerce(str),
|
||||
})
|
||||
|
||||
|
||||
class APINetwork(CoreSysAttributes):
|
||||
"""Handle rest api for network functions."""
|
||||
|
||||
def __init__(self, config, loop, host_control):
|
||||
"""Initialize network rest api part."""
|
||||
self.config = config
|
||||
self.loop = loop
|
||||
self.host_control = host_control
|
||||
|
||||
@api_process_hostcontrol
|
||||
def info(self, request):
|
||||
@api_process
|
||||
async def info(self, request):
|
||||
"""Show network settings."""
|
||||
pass
|
||||
return {
|
||||
ATTR_HOSTNAME: self._host_control.hostname,
|
||||
}
|
||||
|
||||
@api_process_hostcontrol
|
||||
def options(self, request):
|
||||
async def options(self, request):
|
||||
"""Edit network settings."""
|
||||
pass
|
||||
body = await api_validate(SCHEMA_OPTIONS, request)
|
||||
|
||||
# hostname
|
||||
if ATTR_HOSTNAME in body:
|
||||
if self._host_control.hostname != body[ATTR_HOSTNAME]:
|
||||
await self._host_control.set_hostname(body[ATTR_HOSTNAME])
|
||||
|
||||
return True
|
||||
|
78
hassio/api/panel/hassio-app.html
Normal file
78
hassio/api/panel/hassio-app.html
Normal file
File diff suppressed because one or more lines are too long
BIN
hassio/api/panel/hassio-app.html.gz
Normal file
BIN
hassio/api/panel/hassio-app.html.gz
Normal file
Binary file not shown.
72
hassio/api/panel/hassio-main-es5.html
Normal file
72
hassio/api/panel/hassio-main-es5.html
Normal file
File diff suppressed because one or more lines are too long
BIN
hassio/api/panel/hassio-main-es5.html.gz
Normal file
BIN
hassio/api/panel/hassio-main-es5.html.gz
Normal file
Binary file not shown.
72
hassio/api/panel/hassio-main-latest.html
Normal file
72
hassio/api/panel/hassio-main-latest.html
Normal file
File diff suppressed because one or more lines are too long
BIN
hassio/api/panel/hassio-main-latest.html.gz
Normal file
BIN
hassio/api/panel/hassio-main-latest.html.gz
Normal file
Binary file not shown.
37
hassio/api/panel/index.html
Normal file
37
hassio/api/panel/index.html
Normal file
@@ -0,0 +1,37 @@
|
||||
<!doctype html>
|
||||
<html>
|
||||
<head>
|
||||
<meta charset="utf-8">
|
||||
<title>Hass.io</title>
|
||||
<meta name='viewport' content='width=device-width, user-scalable=no'>
|
||||
<style>
|
||||
body {
|
||||
height: 100vh;
|
||||
margin: 0;
|
||||
padding: 0;
|
||||
}
|
||||
</style>
|
||||
</head>
|
||||
<body>
|
||||
<hassio-app></hassio-app>
|
||||
<script>
|
||||
function addScript(src) {
|
||||
var e = document.createElement('script');
|
||||
e.src = src;
|
||||
document.head.appendChild(e);
|
||||
}
|
||||
if (!window.parent.HASS_DEV) {
|
||||
addScript('/frontend_es5/custom-elements-es5-adapter.js');
|
||||
}
|
||||
var webComponentsSupported = (
|
||||
'customElements' in window &&
|
||||
'import' in document.createElement('link') &&
|
||||
'content' in document.createElement('template'));
|
||||
if (!webComponentsSupported) {
|
||||
addScript('/static/webcomponents-lite.js');
|
||||
}
|
||||
</script>
|
||||
<link rel='import' href='./hassio-app.html'>
|
||||
<link rel='import' href='/static/mdi.html' async>
|
||||
</body>
|
||||
</html>
|
BIN
hassio/api/panel/index.html.gz
Normal file
BIN
hassio/api/panel/index.html.gz
Normal file
Binary file not shown.
190
hassio/api/proxy.py
Normal file
190
hassio/api/proxy.py
Normal file
@@ -0,0 +1,190 @@
|
||||
"""Utils for HomeAssistant Proxy."""
|
||||
import asyncio
|
||||
import logging
|
||||
|
||||
import aiohttp
|
||||
from aiohttp import web
|
||||
from aiohttp.web_exceptions import HTTPBadGateway
|
||||
from aiohttp.hdrs import CONTENT_TYPE
|
||||
import async_timeout
|
||||
|
||||
from ..const import HEADER_HA_ACCESS
|
||||
from ..coresys import CoreSysAttributes
|
||||
|
||||
_LOGGER = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class APIProxy(CoreSysAttributes):
|
||||
"""API Proxy for Home-Assistant."""
|
||||
|
||||
async def _api_client(self, request, path, timeout=300):
|
||||
"""Return a client request with proxy origin for Home-Assistant."""
|
||||
url = f"{self._homeassistant.api_url}/api/{path}"
|
||||
|
||||
try:
|
||||
data = None
|
||||
headers = {}
|
||||
method = getattr(self._websession_ssl, request.method.lower())
|
||||
params = request.query or None
|
||||
|
||||
# read data
|
||||
with async_timeout.timeout(30, loop=self._loop):
|
||||
data = await request.read()
|
||||
|
||||
if data:
|
||||
headers.update({CONTENT_TYPE: request.content_type})
|
||||
|
||||
# need api password?
|
||||
if self._homeassistant.api_password:
|
||||
headers = {HEADER_HA_ACCESS: self._homeassistant.api_password}
|
||||
|
||||
# reset headers
|
||||
if not headers:
|
||||
headers = None
|
||||
|
||||
client = await method(
|
||||
url, data=data, headers=headers, timeout=timeout,
|
||||
params=params
|
||||
)
|
||||
|
||||
return client
|
||||
|
||||
except aiohttp.ClientError as err:
|
||||
_LOGGER.error("Client error on API %s request %s.", path, err)
|
||||
|
||||
except asyncio.TimeoutError:
|
||||
_LOGGER.error("Client timeout error on API request %s.", path)
|
||||
|
||||
raise HTTPBadGateway()
|
||||
|
||||
async def stream(self, request):
|
||||
"""Proxy HomeAssistant EventStream Requests."""
|
||||
_LOGGER.info("Home-Assistant EventStream start")
|
||||
client = await self._api_client(request, 'stream', timeout=None)
|
||||
|
||||
response = web.StreamResponse()
|
||||
response.content_type = request.headers.get(CONTENT_TYPE)
|
||||
try:
|
||||
await response.prepare(request)
|
||||
while True:
|
||||
data = await client.content.read(10)
|
||||
if not data:
|
||||
await response.write_eof()
|
||||
break
|
||||
response.write(data)
|
||||
|
||||
except aiohttp.ClientError:
|
||||
await response.write_eof()
|
||||
|
||||
except asyncio.CancelledError:
|
||||
pass
|
||||
|
||||
finally:
|
||||
client.close()
|
||||
_LOGGER.info("Home-Assistant EventStream close")
|
||||
|
||||
async def api(self, request):
|
||||
"""Proxy HomeAssistant API Requests."""
|
||||
path = request.match_info.get('path', '')
|
||||
|
||||
# Normal request
|
||||
_LOGGER.info("Home-Assistant /api/%s request", path)
|
||||
client = await self._api_client(request, path)
|
||||
|
||||
data = await client.read()
|
||||
return web.Response(
|
||||
body=data,
|
||||
status=client.status,
|
||||
content_type=client.content_type
|
||||
)
|
||||
|
||||
async def _websocket_client(self):
|
||||
"""Initialize a websocket api connection."""
|
||||
url = f"{self.homeassistant.api_url}/api/websocket"
|
||||
|
||||
try:
|
||||
client = await self._websession_ssl.ws_connect(
|
||||
url, heartbeat=60, verify_ssl=False)
|
||||
|
||||
# handle authentication
|
||||
for _ in range(2):
|
||||
data = await client.receive_json()
|
||||
if data.get('type') == 'auth_ok':
|
||||
return client
|
||||
elif data.get('type') == 'auth_required':
|
||||
await client.send_json({
|
||||
'type': 'auth',
|
||||
'api_password': self._homeassistant.api_password,
|
||||
})
|
||||
|
||||
_LOGGER.error("Authentication to Home-Assistant websocket")
|
||||
|
||||
except (aiohttp.ClientError, RuntimeError) as err:
|
||||
_LOGGER.error("Client error on websocket API %s.", err)
|
||||
|
||||
raise HTTPBadGateway()
|
||||
|
||||
async def websocket(self, request):
|
||||
"""Initialize a websocket api connection."""
|
||||
_LOGGER.info("Home-Assistant Websocket API request initialze")
|
||||
|
||||
# init server
|
||||
server = web.WebSocketResponse(heartbeat=60)
|
||||
await server.prepare(request)
|
||||
|
||||
# handle authentication
|
||||
await server.send_json({'type': 'auth_required'})
|
||||
await server.receive_json() # get internal token
|
||||
await server.send_json({'type': 'auth_ok'})
|
||||
|
||||
# init connection to hass
|
||||
client = await self._websocket_client()
|
||||
|
||||
_LOGGER.info("Home-Assistant Websocket API request running")
|
||||
try:
|
||||
client_read = None
|
||||
server_read = None
|
||||
while not server.closed and not client.closed:
|
||||
if not client_read:
|
||||
client_read = asyncio.ensure_future(
|
||||
client.receive_str(), loop=self._loop)
|
||||
if not server_read:
|
||||
server_read = asyncio.ensure_future(
|
||||
server.receive_str(), loop=self._loop)
|
||||
|
||||
# wait until data need to be processed
|
||||
await asyncio.wait(
|
||||
[client_read, server_read],
|
||||
loop=self._loop, return_when=asyncio.FIRST_COMPLETED
|
||||
)
|
||||
|
||||
# server
|
||||
if server_read.done() and not client.closed:
|
||||
server_read.exception()
|
||||
await client.send_str(server_read.result())
|
||||
server_read = None
|
||||
|
||||
# client
|
||||
if client_read.done() and not server.closed:
|
||||
client_read.exception()
|
||||
await server.send_str(client_read.result())
|
||||
client_read = None
|
||||
|
||||
except asyncio.CancelledError:
|
||||
pass
|
||||
|
||||
except RuntimeError as err:
|
||||
_LOGGER.info("Home-Assistant Websocket API error: %s", err)
|
||||
|
||||
finally:
|
||||
if client_read:
|
||||
client_read.cancel()
|
||||
if server_read:
|
||||
server_read.cancel()
|
||||
|
||||
# close connections
|
||||
await client.close()
|
||||
await server.close()
|
||||
|
||||
_LOGGER.info("Home-Assistant Websocket API connection is closed")
|
||||
return server
|
132
hassio/api/snapshots.py
Normal file
132
hassio/api/snapshots.py
Normal file
@@ -0,0 +1,132 @@
|
||||
"""Init file for HassIO snapshot rest api."""
|
||||
import asyncio
|
||||
import logging
|
||||
|
||||
import voluptuous as vol
|
||||
|
||||
from .utils import api_process, api_validate
|
||||
from ..snapshots.validate import ALL_FOLDERS
|
||||
from ..const import (
|
||||
ATTR_NAME, ATTR_SLUG, ATTR_DATE, ATTR_ADDONS, ATTR_REPOSITORIES,
|
||||
ATTR_HOMEASSISTANT, ATTR_VERSION, ATTR_SIZE, ATTR_FOLDERS, ATTR_TYPE,
|
||||
ATTR_SNAPSHOTS)
|
||||
from ..coresys import CoreSysAttributes
|
||||
|
||||
_LOGGER = logging.getLogger(__name__)
|
||||
|
||||
|
||||
# pylint: disable=no-value-for-parameter
|
||||
SCHEMA_RESTORE_PARTIAL = vol.Schema({
|
||||
vol.Optional(ATTR_HOMEASSISTANT): vol.Boolean(),
|
||||
vol.Optional(ATTR_ADDONS):
|
||||
vol.All([vol.Coerce(str)], vol.Unique()),
|
||||
vol.Optional(ATTR_FOLDERS):
|
||||
vol.All([vol.In(ALL_FOLDERS)], vol.Unique()),
|
||||
})
|
||||
|
||||
SCHEMA_SNAPSHOT_FULL = vol.Schema({
|
||||
vol.Optional(ATTR_NAME): vol.Coerce(str),
|
||||
})
|
||||
|
||||
SCHEMA_SNAPSHOT_PARTIAL = SCHEMA_SNAPSHOT_FULL.extend({
|
||||
vol.Optional(ATTR_ADDONS):
|
||||
vol.All([vol.Coerce(str)], vol.Unique()),
|
||||
vol.Optional(ATTR_FOLDERS):
|
||||
vol.All([vol.In(ALL_FOLDERS)], vol.Unique()),
|
||||
})
|
||||
|
||||
|
||||
class APISnapshots(CoreSysAttributes):
|
||||
"""Handle rest api for snapshot functions."""
|
||||
|
||||
def _extract_snapshot(self, request):
|
||||
"""Return addon and if not exists trow a exception."""
|
||||
snapshot = self._snapshots.get(request.match_info.get('snapshot'))
|
||||
if not snapshot:
|
||||
raise RuntimeError("Snapshot not exists")
|
||||
return snapshot
|
||||
|
||||
@api_process
|
||||
async def list(self, request):
|
||||
"""Return snapshot list."""
|
||||
data_snapshots = []
|
||||
for snapshot in self._snapshots.list_snapshots:
|
||||
data_snapshots.append({
|
||||
ATTR_SLUG: snapshot.slug,
|
||||
ATTR_NAME: snapshot.name,
|
||||
ATTR_DATE: snapshot.date,
|
||||
ATTR_TYPE: snapshot.sys_type,
|
||||
})
|
||||
|
||||
return {
|
||||
ATTR_SNAPSHOTS: data_snapshots,
|
||||
}
|
||||
|
||||
@api_process
|
||||
async def reload(self, request):
|
||||
"""Reload snapshot list."""
|
||||
await asyncio.shield(self._snapshots.reload(), loop=self._loop)
|
||||
return True
|
||||
|
||||
@api_process
|
||||
async def info(self, request):
|
||||
"""Return snapshot info."""
|
||||
snapshot = self._extract_snapshot(request)
|
||||
|
||||
data_addons = []
|
||||
for addon_data in snapshot.addons:
|
||||
data_addons.append({
|
||||
ATTR_SLUG: addon_data[ATTR_SLUG],
|
||||
ATTR_NAME: addon_data[ATTR_NAME],
|
||||
ATTR_VERSION: addon_data[ATTR_VERSION],
|
||||
})
|
||||
|
||||
return {
|
||||
ATTR_SLUG: snapshot.slug,
|
||||
ATTR_TYPE: snapshot.sys_type,
|
||||
ATTR_NAME: snapshot.name,
|
||||
ATTR_DATE: snapshot.date,
|
||||
ATTR_SIZE: snapshot.size,
|
||||
ATTR_HOMEASSISTANT: snapshot.homeassistant_version,
|
||||
ATTR_ADDONS: data_addons,
|
||||
ATTR_REPOSITORIES: snapshot.repositories,
|
||||
ATTR_FOLDERS: snapshot.folders,
|
||||
}
|
||||
|
||||
@api_process
|
||||
async def snapshot_full(self, request):
|
||||
"""Full-Snapshot a snapshot."""
|
||||
body = await api_validate(SCHEMA_SNAPSHOT_FULL, request)
|
||||
return await asyncio.shield(
|
||||
self._snapshots.do_snapshot_full(**body), loop=self._loop)
|
||||
|
||||
@api_process
|
||||
async def snapshot_partial(self, request):
|
||||
"""Partial-Snapshot a snapshot."""
|
||||
body = await api_validate(SCHEMA_SNAPSHOT_PARTIAL, request)
|
||||
return await asyncio.shield(
|
||||
self._snapshots.do_snapshot_partial(**body), loop=self._loop)
|
||||
|
||||
@api_process
|
||||
def restore_full(self, request):
|
||||
"""Full-Restore a snapshot."""
|
||||
snapshot = self._extract_snapshot(request)
|
||||
return asyncio.shield(
|
||||
self._snapshots.do_restore_full(snapshot), loop=self._loop)
|
||||
|
||||
@api_process
|
||||
async def restore_partial(self, request):
|
||||
"""Partial-Restore a snapshot."""
|
||||
snapshot = self._extract_snapshot(request)
|
||||
body = await api_validate(SCHEMA_SNAPSHOT_PARTIAL, request)
|
||||
|
||||
return await asyncio.shield(
|
||||
self._snapshots.do_restore_partial(snapshot, **body),
|
||||
loop=self._loop
|
||||
)
|
||||
|
||||
@api_process
|
||||
async def remove(self, request):
|
||||
"""Remove a snapshot."""
|
||||
snapshot = self._extract_snapshot(request)
|
||||
return self._snapshots.remove(snapshot)
|
@@ -4,20 +4,25 @@ import logging
|
||||
|
||||
import voluptuous as vol
|
||||
|
||||
from .util import api_process, api_process_raw, api_validate
|
||||
from ..addons.util import create_hash_index_list
|
||||
from .utils import api_process, api_process_raw, api_validate
|
||||
from ..const import (
|
||||
ATTR_ADDONS, ATTR_VERSION, ATTR_LAST_VERSION, ATTR_BETA_CHANNEL,
|
||||
HASSIO_VERSION, ATTR_ADDONS_REPOSITORIES, ATTR_REPOSITORIES,
|
||||
ATTR_REPOSITORY, ATTR_DESCRIPTON, ATTR_NAME, ATTR_SLUG, ATTR_INSTALLED,
|
||||
ATTR_DETACHED, ATTR_SOURCE, ATTR_MAINTAINER, ATTR_URL)
|
||||
ATTR_ADDONS, ATTR_VERSION, ATTR_LAST_VERSION, ATTR_BETA_CHANNEL, ATTR_ARCH,
|
||||
HASSIO_VERSION, ATTR_ADDONS_REPOSITORIES, ATTR_LOGO, ATTR_REPOSITORY,
|
||||
ATTR_DESCRIPTON, ATTR_NAME, ATTR_SLUG, ATTR_INSTALLED, ATTR_TIMEZONE,
|
||||
ATTR_STATE, ATTR_WAIT_BOOT, ATTR_CPU_PERCENT, ATTR_MEMORY_USAGE,
|
||||
ATTR_MEMORY_LIMIT, ATTR_NETWORK_RX, ATTR_NETWORK_TX, ATTR_BLK_READ,
|
||||
ATTR_BLK_WRITE, CONTENT_TYPE_BINARY, ATTR_ICON)
|
||||
from ..coresys import CoreSysAttributes
|
||||
from ..validate import validate_timezone, WAIT_BOOT, REPOSITORIES
|
||||
|
||||
_LOGGER = logging.getLogger(__name__)
|
||||
|
||||
SCHEMA_OPTIONS = vol.Schema({
|
||||
# pylint: disable=no-value-for-parameter
|
||||
vol.Optional(ATTR_BETA_CHANNEL): vol.Boolean(),
|
||||
vol.Optional(ATTR_ADDONS_REPOSITORIES): [vol.Url()],
|
||||
vol.Optional(ATTR_ADDONS_REPOSITORIES): REPOSITORIES,
|
||||
vol.Optional(ATTR_TIMEZONE): validate_timezone,
|
||||
vol.Optional(ATTR_WAIT_BOOT): WAIT_BOOT,
|
||||
})
|
||||
|
||||
SCHEMA_VERSION = vol.Schema({
|
||||
@@ -25,53 +30,9 @@ SCHEMA_VERSION = vol.Schema({
|
||||
})
|
||||
|
||||
|
||||
class APISupervisor(object):
|
||||
class APISupervisor(CoreSysAttributes):
|
||||
"""Handle rest api for supervisor functions."""
|
||||
|
||||
def __init__(self, config, loop, supervisor, addons, host_control):
|
||||
"""Initialize supervisor rest api part."""
|
||||
self.config = config
|
||||
self.loop = loop
|
||||
self.supervisor = supervisor
|
||||
self.addons = addons
|
||||
self.host_control = host_control
|
||||
|
||||
def _addons_list(self, only_installed):
|
||||
"""Return a list of addons."""
|
||||
data = []
|
||||
detached = self.addons.list_detached
|
||||
|
||||
for addon, values in self.addons.list_all.items():
|
||||
i_version = self.addons.version_installed(addon)
|
||||
|
||||
data.append({
|
||||
ATTR_NAME: values[ATTR_NAME],
|
||||
ATTR_SLUG: addon,
|
||||
ATTR_DESCRIPTON: values[ATTR_DESCRIPTON],
|
||||
ATTR_VERSION: values[ATTR_VERSION],
|
||||
ATTR_INSTALLED: i_version,
|
||||
ATTR_DETACHED: addon in detached,
|
||||
ATTR_REPOSITORY: values[ATTR_REPOSITORY],
|
||||
})
|
||||
|
||||
return data
|
||||
|
||||
def _repositories_list(self):
|
||||
"""Return a list of addons repositories."""
|
||||
data = []
|
||||
list_id = create_hash_index_list(self.config.addons_repositories)
|
||||
|
||||
for repository in self.addons.list_repositories:
|
||||
data.append({
|
||||
ATTR_SLUG: repository[ATTR_SLUG],
|
||||
ATTR_NAME: repository[ATTR_NAME],
|
||||
ATTR_SOURCE: list_id.get(repository[ATTR_SLUG]),
|
||||
ATTR_URL: repository.get(ATTR_URL),
|
||||
ATTR_MAINTAINER: repository.get(ATTR_MAINTAINER),
|
||||
})
|
||||
|
||||
return data
|
||||
|
||||
@api_process
|
||||
async def ping(self, request):
|
||||
"""Return ok for signal that the api is ready."""
|
||||
@@ -80,20 +41,30 @@ class APISupervisor(object):
|
||||
@api_process
|
||||
async def info(self, request):
|
||||
"""Return host information."""
|
||||
list_addons = []
|
||||
for addon in self._addons.list_addons:
|
||||
if addon.is_installed:
|
||||
list_addons.append({
|
||||
ATTR_NAME: addon.name,
|
||||
ATTR_SLUG: addon.slug,
|
||||
ATTR_DESCRIPTON: addon.description,
|
||||
ATTR_STATE: await addon.state(),
|
||||
ATTR_VERSION: addon.last_version,
|
||||
ATTR_INSTALLED: addon.version_installed,
|
||||
ATTR_REPOSITORY: addon.repository,
|
||||
ATTR_ICON: addon.with_icon,
|
||||
ATTR_LOGO: addon.with_logo,
|
||||
})
|
||||
|
||||
return {
|
||||
ATTR_VERSION: HASSIO_VERSION,
|
||||
ATTR_LAST_VERSION: self.config.last_hassio,
|
||||
ATTR_BETA_CHANNEL: self.config.upstream_beta,
|
||||
ATTR_ADDONS: self._addons_list(only_installed=True),
|
||||
ATTR_ADDONS_REPOSITORIES: self.config.addons_repositories,
|
||||
}
|
||||
|
||||
@api_process
|
||||
async def available_addons(self, request):
|
||||
"""Return information for all available addons."""
|
||||
return {
|
||||
ATTR_ADDONS: self._addons_list(only_installed=False),
|
||||
ATTR_REPOSITORIES: self._repositories_list(),
|
||||
ATTR_LAST_VERSION: self._updater.version_hassio,
|
||||
ATTR_BETA_CHANNEL: self._updater.beta_channel,
|
||||
ATTR_ARCH: self._arch,
|
||||
ATTR_WAIT_BOOT: self._config.wait_boot,
|
||||
ATTR_TIMEZONE: self._config.timezone,
|
||||
ATTR_ADDONS: list_addons,
|
||||
ATTR_ADDONS_REPOSITORIES: self._config.addons_repositories,
|
||||
}
|
||||
|
||||
@api_process
|
||||
@@ -102,49 +73,59 @@ class APISupervisor(object):
|
||||
body = await api_validate(SCHEMA_OPTIONS, request)
|
||||
|
||||
if ATTR_BETA_CHANNEL in body:
|
||||
self.config.upstream_beta = body[ATTR_BETA_CHANNEL]
|
||||
self._updater.beta_channel = body[ATTR_BETA_CHANNEL]
|
||||
|
||||
if ATTR_TIMEZONE in body:
|
||||
self._config.timezone = body[ATTR_TIMEZONE]
|
||||
|
||||
if ATTR_WAIT_BOOT in body:
|
||||
self._config.wait_boot = body[ATTR_WAIT_BOOT]
|
||||
|
||||
if ATTR_ADDONS_REPOSITORIES in body:
|
||||
new = set(body[ATTR_ADDONS_REPOSITORIES])
|
||||
old = set(self.config.addons_repositories)
|
||||
|
||||
# add new repositories
|
||||
tasks = [self.addons.add_git_repository(url) for url in
|
||||
set(new - old)]
|
||||
if tasks:
|
||||
await asyncio.shield(
|
||||
asyncio.wait(tasks, loop=self.loop), loop=self.loop)
|
||||
|
||||
# remove old repositories
|
||||
for url in set(old - new):
|
||||
self.addons.drop_git_repository(url)
|
||||
|
||||
# read repository
|
||||
self.addons.read_data_from_repositories()
|
||||
await asyncio.shield(self._addons.load_repositories(new))
|
||||
|
||||
self._updater.save_data()
|
||||
self._config.save_data()
|
||||
return True
|
||||
|
||||
@api_process
|
||||
async def stats(self, request):
|
||||
"""Return resource information."""
|
||||
stats = await self._supervisor.stats()
|
||||
if not stats:
|
||||
raise RuntimeError("No stats available")
|
||||
|
||||
return {
|
||||
ATTR_CPU_PERCENT: stats.cpu_percent,
|
||||
ATTR_MEMORY_USAGE: stats.memory_usage,
|
||||
ATTR_MEMORY_LIMIT: stats.memory_limit,
|
||||
ATTR_NETWORK_RX: stats.network_rx,
|
||||
ATTR_NETWORK_TX: stats.network_tx,
|
||||
ATTR_BLK_READ: stats.blk_read,
|
||||
ATTR_BLK_WRITE: stats.blk_write,
|
||||
}
|
||||
|
||||
@api_process
|
||||
async def update(self, request):
|
||||
"""Update supervisor OS."""
|
||||
body = await api_validate(SCHEMA_VERSION, request)
|
||||
version = body.get(ATTR_VERSION, self.config.last_hassio)
|
||||
version = body.get(ATTR_VERSION, self._updater.version_hassio)
|
||||
|
||||
if version == self.supervisor.version:
|
||||
raise RuntimeError("Version is already in use")
|
||||
if version == self._supervisor.version:
|
||||
raise RuntimeError("Version {} is already in use".format(version))
|
||||
|
||||
return await asyncio.shield(
|
||||
self.supervisor.update(version), loop=self.loop)
|
||||
self._supervisor.update(version), loop=self._loop)
|
||||
|
||||
@api_process
|
||||
async def reload(self, request):
|
||||
"""Reload addons, config ect."""
|
||||
tasks = [
|
||||
self.addons.reload(), self.config.fetch_update_infos(),
|
||||
self.host_control.load()
|
||||
self._updater.reload(),
|
||||
]
|
||||
results, _ = await asyncio.shield(
|
||||
asyncio.wait(tasks, loop=self.loop), loop=self.loop)
|
||||
asyncio.wait(tasks, loop=self._loop), loop=self._loop)
|
||||
|
||||
for result in results:
|
||||
if result.exception() is not None:
|
||||
@@ -152,10 +133,7 @@ class APISupervisor(object):
|
||||
|
||||
return True
|
||||
|
||||
@api_process_raw
|
||||
@api_process_raw(CONTENT_TYPE_BINARY)
|
||||
def logs(self, request):
|
||||
"""Return supervisor docker logs.
|
||||
|
||||
Return a coroutine.
|
||||
"""
|
||||
return self.supervisor.logs()
|
||||
"""Return supervisor docker logs."""
|
||||
return self._supervisor.logs()
|
||||
|
@@ -1,5 +1,6 @@
|
||||
"""Init file for HassIO util for rest api."""
|
||||
import json
|
||||
import hashlib
|
||||
import logging
|
||||
|
||||
from aiohttp import web
|
||||
@@ -8,17 +9,20 @@ import voluptuous as vol
|
||||
from voluptuous.humanize import humanize_error
|
||||
|
||||
from ..const import (
|
||||
JSON_RESULT, JSON_DATA, JSON_MESSAGE, RESULT_OK, RESULT_ERROR)
|
||||
JSON_RESULT, JSON_DATA, JSON_MESSAGE, RESULT_OK, RESULT_ERROR,
|
||||
CONTENT_TYPE_BINARY)
|
||||
|
||||
_LOGGER = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def json_loads(data):
|
||||
"""Extract json from string with support for '' and None."""
|
||||
if not data:
|
||||
return {}
|
||||
try:
|
||||
return json.loads(data)
|
||||
except json.JSONDecodeError:
|
||||
return {}
|
||||
raise RuntimeError("Invalid json")
|
||||
|
||||
|
||||
def api_process(method):
|
||||
@@ -32,6 +36,8 @@ def api_process(method):
|
||||
|
||||
if isinstance(answer, dict):
|
||||
return api_return_ok(data=answer)
|
||||
if isinstance(answer, web.Response):
|
||||
return answer
|
||||
elif answer:
|
||||
return api_return_ok()
|
||||
return api_return_error()
|
||||
@@ -43,7 +49,8 @@ def api_process_hostcontrol(method):
|
||||
"""Wrap HostControl calls to rest api."""
|
||||
async def wrap_hostcontrol(api, *args, **kwargs):
|
||||
"""Return host information."""
|
||||
if not api.host_control.active:
|
||||
# pylint: disable=protected-access
|
||||
if not api._host_control.active:
|
||||
raise HTTPServiceUnavailable()
|
||||
|
||||
try:
|
||||
@@ -62,18 +69,23 @@ def api_process_hostcontrol(method):
|
||||
return wrap_hostcontrol
|
||||
|
||||
|
||||
def api_process_raw(method):
|
||||
"""Wrap function with raw output to rest api."""
|
||||
async def wrap_api(api, *args, **kwargs):
|
||||
"""Return api information."""
|
||||
try:
|
||||
message = await method(api, *args, **kwargs)
|
||||
except RuntimeError as err:
|
||||
message = str(err).encode()
|
||||
def api_process_raw(content):
|
||||
"""Wrap content_type into function."""
|
||||
def wrap_method(method):
|
||||
"""Wrap function with raw output to rest api."""
|
||||
async def wrap_api(api, *args, **kwargs):
|
||||
"""Return api information."""
|
||||
try:
|
||||
msg_data = await method(api, *args, **kwargs)
|
||||
msg_type = content
|
||||
except RuntimeError as err:
|
||||
msg_data = str(err).encode()
|
||||
msg_type = CONTENT_TYPE_BINARY
|
||||
|
||||
return web.Response(body=message)
|
||||
return web.Response(body=msg_data, content_type=msg_type)
|
||||
|
||||
return wrap_api
|
||||
return wrap_api
|
||||
return wrap_method
|
||||
|
||||
|
||||
def api_return_error(message=None):
|
||||
@@ -101,3 +113,9 @@ async def api_validate(schema, request):
|
||||
raise RuntimeError(humanize_error(data, ex)) from None
|
||||
|
||||
return data
|
||||
|
||||
|
||||
def hash_password(password):
|
||||
"""Hash and salt our passwords."""
|
||||
key = ")*()*SALT_HASSIO2123{}6554547485HSKA!!*JSLAfdasda$".format(password)
|
||||
return hashlib.sha256(key.encode()).hexdigest()
|
@@ -2,18 +2,46 @@
|
||||
import logging
|
||||
import os
|
||||
import signal
|
||||
import shutil
|
||||
from pathlib import Path
|
||||
|
||||
from colorlog import ColoredFormatter
|
||||
|
||||
from .addons import AddonManager
|
||||
from .api import RestAPI
|
||||
from .const import SOCKET_DOCKER
|
||||
from .config import CoreConfig
|
||||
from .coresys import CoreSys
|
||||
from .supervisor import Supervisor
|
||||
from .homeassistant import HomeAssistant
|
||||
from .snapshots import SnapshotsManager
|
||||
from .tasks import Tasks
|
||||
from .updater import Updater
|
||||
|
||||
_LOGGER = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def initialize_system_data(websession):
|
||||
def initialize_coresys(loop):
|
||||
"""Initialize HassIO coresys/objects."""
|
||||
coresys = CoreSys(loop)
|
||||
|
||||
# Initialize core objects
|
||||
coresys.updater = Updater(coresys)
|
||||
coresys.api = RestAPI(coresys)
|
||||
coresys.supervisor = Supervisor(coresys)
|
||||
coresys.homeassistant = HomeAssistant(coresys)
|
||||
coresys.addons = AddonManager(coresys)
|
||||
coresys.snapshots = SnapshotsManager(coresys)
|
||||
coresys.tasks = Tasks(coresys)
|
||||
|
||||
# bootstrap config
|
||||
initialize_system_data(coresys)
|
||||
|
||||
return coresys
|
||||
|
||||
|
||||
def initialize_system_data(coresys):
|
||||
"""Setup default config and create folders."""
|
||||
config = CoreConfig(websession)
|
||||
config = coresys.config
|
||||
|
||||
# homeassistant config folder
|
||||
if not config.path_config.is_dir():
|
||||
@@ -21,36 +49,58 @@ def initialize_system_data(websession):
|
||||
"Create Home-Assistant config folder %s", config.path_config)
|
||||
config.path_config.mkdir()
|
||||
|
||||
# homeassistant ssl folder
|
||||
# hassio ssl folder
|
||||
if not config.path_ssl.is_dir():
|
||||
_LOGGER.info("Create Home-Assistant ssl folder %s", config.path_ssl)
|
||||
_LOGGER.info("Create hassio ssl folder %s", config.path_ssl)
|
||||
config.path_ssl.mkdir()
|
||||
|
||||
# homeassistant addon data folder
|
||||
# hassio addon data folder
|
||||
if not config.path_addons_data.is_dir():
|
||||
_LOGGER.info("Create Home-Assistant addon data folder %s",
|
||||
config.path_addons_data)
|
||||
_LOGGER.info(
|
||||
"Create hassio addon data folder %s", config.path_addons_data)
|
||||
config.path_addons_data.mkdir(parents=True)
|
||||
|
||||
if not config.path_addons_local.is_dir():
|
||||
_LOGGER.info("Create Home-Assistant addon local repository folder %s",
|
||||
_LOGGER.info("Create hassio addon local repository folder %s",
|
||||
config.path_addons_local)
|
||||
config.path_addons_local.mkdir(parents=True)
|
||||
|
||||
if not config.path_addons_git.is_dir():
|
||||
_LOGGER.info("Create Home-Assistant addon git repositories folder %s",
|
||||
_LOGGER.info("Create hassio addon git repositories folder %s",
|
||||
config.path_addons_git)
|
||||
config.path_addons_git.mkdir(parents=True)
|
||||
|
||||
# homeassistant backup folder
|
||||
# hassio tmp folder
|
||||
if not config.path_tmp.is_dir():
|
||||
_LOGGER.info("Create hassio temp folder %s", config.path_tmp)
|
||||
config.path_tmp.mkdir(parents=True)
|
||||
|
||||
# hassio backup folder
|
||||
if not config.path_backup.is_dir():
|
||||
_LOGGER.info("Create Home-Assistant backup folder %s",
|
||||
config.path_backup)
|
||||
_LOGGER.info("Create hassio backup folder %s", config.path_backup)
|
||||
config.path_backup.mkdir()
|
||||
|
||||
# share folder
|
||||
if not config.path_share.is_dir():
|
||||
_LOGGER.info("Create hassio share folder %s", config.path_share)
|
||||
config.path_share.mkdir()
|
||||
|
||||
return config
|
||||
|
||||
|
||||
def migrate_system_env(coresys):
|
||||
"""Cleanup some stuff after update."""
|
||||
config = coresys.config
|
||||
|
||||
# hass.io 0.37 -> 0.38
|
||||
old_build = Path(config.path_hassio, "addons/build")
|
||||
if old_build.is_dir():
|
||||
try:
|
||||
old_build.rmdir()
|
||||
except OSError:
|
||||
_LOGGER.warning("Can't cleanup old addons build dir.")
|
||||
|
||||
|
||||
def initialize_logging():
|
||||
"""Setup the logging."""
|
||||
logging.basicConfig(level=logging.INFO)
|
||||
@@ -78,6 +128,7 @@ def initialize_logging():
|
||||
|
||||
def check_environment():
|
||||
"""Check if all environment are exists."""
|
||||
# check environment variables
|
||||
for key in ('SUPERVISOR_SHARE', 'SUPERVISOR_NAME',
|
||||
'HOMEASSISTANT_REPOSITORY'):
|
||||
try:
|
||||
@@ -86,29 +137,35 @@ def check_environment():
|
||||
_LOGGER.fatal("Can't find %s in env!", key)
|
||||
return False
|
||||
|
||||
# check docker socket
|
||||
if not SOCKET_DOCKER.is_socket():
|
||||
_LOGGER.fatal("Can't find docker socket!")
|
||||
return False
|
||||
|
||||
# check socat exec
|
||||
if not shutil.which('socat'):
|
||||
_LOGGER.fatal("Can0t find socat program!")
|
||||
return False
|
||||
|
||||
return True
|
||||
|
||||
|
||||
def reg_signal(loop, hassio):
|
||||
def reg_signal(loop):
|
||||
"""Register SIGTERM, SIGKILL to stop system."""
|
||||
try:
|
||||
loop.add_signal_handler(
|
||||
signal.SIGTERM, lambda: loop.create_task(hassio.stop()))
|
||||
signal.SIGTERM, lambda: loop.call_soon(loop.stop))
|
||||
except (ValueError, RuntimeError):
|
||||
_LOGGER.warning("Could not bind to SIGTERM")
|
||||
|
||||
try:
|
||||
loop.add_signal_handler(
|
||||
signal.SIGHUP, lambda: loop.create_task(hassio.stop()))
|
||||
signal.SIGHUP, lambda: loop.call_soon(loop.stop))
|
||||
except (ValueError, RuntimeError):
|
||||
_LOGGER.warning("Could not bind to SIGHUP")
|
||||
|
||||
try:
|
||||
loop.add_signal_handler(
|
||||
signal.SIGINT, lambda: loop.create_task(hassio.stop()))
|
||||
signal.SIGINT, lambda: loop.call_soon(loop.stop))
|
||||
except (ValueError, RuntimeError):
|
||||
_LOGGER.warning("Could not bind to SIGINT")
|
||||
|
227
hassio/config.py
227
hassio/config.py
@@ -1,153 +1,80 @@
|
||||
"""Bootstrap HassIO."""
|
||||
from datetime import datetime
|
||||
import logging
|
||||
import json
|
||||
import os
|
||||
from pathlib import Path, PurePath
|
||||
|
||||
import voluptuous as vol
|
||||
from voluptuous.humanize import humanize_error
|
||||
|
||||
from .const import FILE_HASSIO_CONFIG, HASSIO_SHARE
|
||||
from .tools import (
|
||||
fetch_last_versions, write_json_file, read_json_file)
|
||||
from .const import (
|
||||
FILE_HASSIO_CONFIG, HASSIO_DATA, ATTR_TIMEZONE, ATTR_ADDONS_CUSTOM_LIST,
|
||||
ATTR_AUDIO_INPUT, ATTR_AUDIO_OUTPUT, ATTR_LAST_BOOT, ATTR_WAIT_BOOT)
|
||||
from .utils.dt import parse_datetime
|
||||
from .utils.json import JsonConfig
|
||||
from .validate import SCHEMA_HASSIO_CONFIG
|
||||
|
||||
_LOGGER = logging.getLogger(__name__)
|
||||
|
||||
HOMEASSISTANT_CONFIG = PurePath("homeassistant")
|
||||
HOMEASSISTANT_LAST = 'homeassistant_last'
|
||||
|
||||
HASSIO_SSL = PurePath("ssl")
|
||||
HASSIO_LAST = 'hassio_last'
|
||||
HASSIO_CLEANUP = 'hassio_cleanup'
|
||||
|
||||
ADDONS_CORE = PurePath("addons/core")
|
||||
ADDONS_LOCAL = PurePath("addons/local")
|
||||
ADDONS_GIT = PurePath("addons/git")
|
||||
ADDONS_DATA = PurePath("addons/data")
|
||||
ADDONS_CUSTOM_LIST = 'addons_custom_list'
|
||||
|
||||
BACKUP_DATA = PurePath("backup")
|
||||
SHARE_DATA = PurePath("share")
|
||||
TMP_DATA = PurePath("tmp")
|
||||
|
||||
UPSTREAM_BETA = 'upstream_beta'
|
||||
|
||||
API_ENDPOINT = 'api_endpoint'
|
||||
DEFAULT_BOOT_TIME = datetime.utcfromtimestamp(0).isoformat()
|
||||
|
||||
|
||||
# pylint: disable=no-value-for-parameter
|
||||
SCHEMA_CONFIG = vol.Schema({
|
||||
vol.Optional(UPSTREAM_BETA, default=False): vol.Boolean(),
|
||||
vol.Optional(API_ENDPOINT): vol.Coerce(str),
|
||||
vol.Optional(HOMEASSISTANT_LAST): vol.Coerce(str),
|
||||
vol.Optional(HASSIO_LAST): vol.Coerce(str),
|
||||
vol.Optional(HASSIO_CLEANUP): vol.Coerce(str),
|
||||
vol.Optional(ADDONS_CUSTOM_LIST, default=[]): [vol.Url()],
|
||||
}, extra=vol.REMOVE_EXTRA)
|
||||
|
||||
|
||||
class Config(object):
|
||||
"""Hold all config data."""
|
||||
|
||||
def __init__(self, config_file):
|
||||
"""Initialize config object."""
|
||||
self._file = config_file
|
||||
self._data = {}
|
||||
|
||||
# init or load data
|
||||
if self._file.is_file():
|
||||
try:
|
||||
self._data = read_json_file(self._file)
|
||||
except (OSError, json.JSONDecodeError):
|
||||
_LOGGER.warning("Can't read %s", self._file)
|
||||
self._data = {}
|
||||
|
||||
def save(self):
|
||||
"""Store data to config file."""
|
||||
if not write_json_file(self._file, self._data):
|
||||
_LOGGER.error("Can't store config in %s", self._file)
|
||||
return False
|
||||
return True
|
||||
|
||||
|
||||
class CoreConfig(Config):
|
||||
class CoreConfig(JsonConfig):
|
||||
"""Hold all core config data."""
|
||||
|
||||
def __init__(self, websession):
|
||||
def __init__(self):
|
||||
"""Initialize config object."""
|
||||
self.websession = websession
|
||||
|
||||
super().__init__(FILE_HASSIO_CONFIG)
|
||||
|
||||
# validate data
|
||||
try:
|
||||
self._data = SCHEMA_CONFIG(self._data)
|
||||
self.save()
|
||||
except vol.Invalid as ex:
|
||||
_LOGGER.warning(
|
||||
"Invalid config %s", humanize_error(self._data, ex))
|
||||
|
||||
async def fetch_update_infos(self):
|
||||
"""Read current versions from web."""
|
||||
last = await fetch_last_versions(
|
||||
self.websession, beta=self.upstream_beta)
|
||||
|
||||
if last:
|
||||
self._data.update({
|
||||
HOMEASSISTANT_LAST: last.get('homeassistant'),
|
||||
HASSIO_LAST: last.get('hassio'),
|
||||
})
|
||||
self.save()
|
||||
return True
|
||||
|
||||
return False
|
||||
super().__init__(FILE_HASSIO_CONFIG, SCHEMA_HASSIO_CONFIG)
|
||||
|
||||
@property
|
||||
def api_endpoint(self):
|
||||
"""Return IP address of api endpoint."""
|
||||
return self._data[API_ENDPOINT]
|
||||
def timezone(self):
|
||||
"""Return system timezone."""
|
||||
return self._data[ATTR_TIMEZONE]
|
||||
|
||||
@api_endpoint.setter
|
||||
def api_endpoint(self, value):
|
||||
"""Store IP address of api endpoint."""
|
||||
self._data[API_ENDPOINT] = value
|
||||
@timezone.setter
|
||||
def timezone(self, value):
|
||||
"""Set system timezone."""
|
||||
self._data[ATTR_TIMEZONE] = value
|
||||
|
||||
@property
|
||||
def upstream_beta(self):
|
||||
"""Return True if we run in beta upstream."""
|
||||
return self._data[UPSTREAM_BETA]
|
||||
def wait_boot(self):
|
||||
"""Return wait time for auto boot stages."""
|
||||
return self._data[ATTR_WAIT_BOOT]
|
||||
|
||||
@upstream_beta.setter
|
||||
def upstream_beta(self, value):
|
||||
"""Set beta upstream mode."""
|
||||
self._data[UPSTREAM_BETA] = bool(value)
|
||||
@wait_boot.setter
|
||||
def wait_boot(self, value):
|
||||
"""Set wait boot time."""
|
||||
self._data[ATTR_WAIT_BOOT] = value
|
||||
|
||||
@property
|
||||
def hassio_cleanup(self):
|
||||
"""Return Version they need to cleanup."""
|
||||
return self._data.get(HASSIO_CLEANUP)
|
||||
def last_boot(self):
|
||||
"""Return last boot datetime."""
|
||||
boot_str = self._data.get(ATTR_LAST_BOOT, DEFAULT_BOOT_TIME)
|
||||
|
||||
@hassio_cleanup.setter
|
||||
def hassio_cleanup(self, version):
|
||||
"""Set or remove cleanup flag."""
|
||||
if version is None:
|
||||
self._data.pop(HASSIO_CLEANUP, None)
|
||||
else:
|
||||
self._data[HASSIO_CLEANUP] = version
|
||||
self.save()
|
||||
boot_time = parse_datetime(boot_str)
|
||||
if not boot_time:
|
||||
return datetime.utcfromtimestamp(1)
|
||||
return boot_time
|
||||
|
||||
@last_boot.setter
|
||||
def last_boot(self, value):
|
||||
"""Set last boot datetime."""
|
||||
self._data[ATTR_LAST_BOOT] = value.isoformat()
|
||||
|
||||
@property
|
||||
def homeassistant_image(self):
|
||||
"""Return docker homeassistant repository."""
|
||||
return os.environ['HOMEASSISTANT_REPOSITORY']
|
||||
|
||||
@property
|
||||
def last_homeassistant(self):
|
||||
"""Actual version of homeassistant."""
|
||||
return self._data.get(HOMEASSISTANT_LAST)
|
||||
|
||||
@property
|
||||
def last_hassio(self):
|
||||
"""Actual version of hassio."""
|
||||
return self._data.get(HASSIO_LAST)
|
||||
def path_hassio(self):
|
||||
"""Return hassio data path."""
|
||||
return HASSIO_DATA
|
||||
|
||||
@property
|
||||
def path_extern_hassio(self):
|
||||
@@ -162,7 +89,7 @@ class CoreConfig(Config):
|
||||
@property
|
||||
def path_config(self):
|
||||
"""Return config path inside supervisor."""
|
||||
return Path(HASSIO_SHARE, HOMEASSISTANT_CONFIG)
|
||||
return Path(HASSIO_DATA, HOMEASSISTANT_CONFIG)
|
||||
|
||||
@property
|
||||
def path_extern_ssl(self):
|
||||
@@ -172,66 +99,98 @@ class CoreConfig(Config):
|
||||
@property
|
||||
def path_ssl(self):
|
||||
"""Return SSL path inside supervisor."""
|
||||
return Path(HASSIO_SHARE, HASSIO_SSL)
|
||||
return Path(HASSIO_DATA, HASSIO_SSL)
|
||||
|
||||
@property
|
||||
def path_addons_core(self):
|
||||
"""Return git path for core addons."""
|
||||
return Path(HASSIO_SHARE, ADDONS_CORE)
|
||||
return Path(HASSIO_DATA, ADDONS_CORE)
|
||||
|
||||
@property
|
||||
def path_addons_git(self):
|
||||
"""Return path for git addons."""
|
||||
return Path(HASSIO_SHARE, ADDONS_GIT)
|
||||
return Path(HASSIO_DATA, ADDONS_GIT)
|
||||
|
||||
@property
|
||||
def path_addons_local(self):
|
||||
"""Return path for customs addons."""
|
||||
return Path(HASSIO_SHARE, ADDONS_LOCAL)
|
||||
return Path(HASSIO_DATA, ADDONS_LOCAL)
|
||||
|
||||
@property
|
||||
def path_extern_addons_local(self):
|
||||
"""Return path for customs addons."""
|
||||
return str(PurePath(self.path_extern_hassio, ADDONS_LOCAL))
|
||||
return PurePath(self.path_extern_hassio, ADDONS_LOCAL)
|
||||
|
||||
@property
|
||||
def path_addons_data(self):
|
||||
"""Return root addon data folder."""
|
||||
return Path(HASSIO_SHARE, ADDONS_DATA)
|
||||
return Path(HASSIO_DATA, ADDONS_DATA)
|
||||
|
||||
@property
|
||||
def path_extern_addons_data(self):
|
||||
"""Return root addon data folder extern for docker."""
|
||||
return str(PurePath(self.path_extern_hassio, ADDONS_DATA))
|
||||
return PurePath(self.path_extern_hassio, ADDONS_DATA)
|
||||
|
||||
@property
|
||||
def path_tmp(self):
|
||||
"""Return hass.io temp folder."""
|
||||
return Path(HASSIO_DATA, TMP_DATA)
|
||||
|
||||
@property
|
||||
def path_backup(self):
|
||||
"""Return root backup data folder."""
|
||||
return Path(HASSIO_SHARE, BACKUP_DATA)
|
||||
return Path(HASSIO_DATA, BACKUP_DATA)
|
||||
|
||||
@property
|
||||
def path_extern_backup(self):
|
||||
"""Return root backup data folder extern for docker."""
|
||||
return str(PurePath(self.path_extern_hassio, BACKUP_DATA))
|
||||
return PurePath(self.path_extern_hassio, BACKUP_DATA)
|
||||
|
||||
@property
|
||||
def path_share(self):
|
||||
"""Return root share data folder."""
|
||||
return Path(HASSIO_DATA, SHARE_DATA)
|
||||
|
||||
@property
|
||||
def path_extern_share(self):
|
||||
"""Return root share data folder extern for docker."""
|
||||
return PurePath(self.path_extern_hassio, SHARE_DATA)
|
||||
|
||||
@property
|
||||
def addons_repositories(self):
|
||||
"""Return list of addons custom repositories."""
|
||||
return self._data[ADDONS_CUSTOM_LIST]
|
||||
return self._data[ATTR_ADDONS_CUSTOM_LIST]
|
||||
|
||||
@addons_repositories.setter
|
||||
def addons_repositories(self, repo):
|
||||
def add_addon_repository(self, repo):
|
||||
"""Add a custom repository to list."""
|
||||
if repo in self._data[ADDONS_CUSTOM_LIST]:
|
||||
if repo in self._data[ATTR_ADDONS_CUSTOM_LIST]:
|
||||
return
|
||||
|
||||
self._data[ADDONS_CUSTOM_LIST].append(repo)
|
||||
self.save()
|
||||
self._data[ATTR_ADDONS_CUSTOM_LIST].append(repo)
|
||||
|
||||
def drop_addon_repository(self, repo):
|
||||
"""Remove a custom repository from list."""
|
||||
if repo not in self._data[ADDONS_CUSTOM_LIST]:
|
||||
if repo not in self._data[ATTR_ADDONS_CUSTOM_LIST]:
|
||||
return
|
||||
|
||||
self._data[ADDONS_CUSTOM_LIST].remove(repo)
|
||||
self.save()
|
||||
self._data[ATTR_ADDONS_CUSTOM_LIST].remove(repo)
|
||||
|
||||
@property
|
||||
def audio_output(self):
|
||||
"""Return ALSA audio output card,dev."""
|
||||
return self._data.get(ATTR_AUDIO_OUTPUT)
|
||||
|
||||
@audio_output.setter
|
||||
def audio_output(self, value):
|
||||
"""Set ALSA audio output card,dev."""
|
||||
self._data[ATTR_AUDIO_OUTPUT] = value
|
||||
|
||||
@property
|
||||
def audio_input(self):
|
||||
"""Return ALSA audio input card,dev."""
|
||||
return self._data.get(ATTR_AUDIO_INPUT)
|
||||
|
||||
@audio_input.setter
|
||||
def audio_input(self, value):
|
||||
"""Set ALSA audio input card,dev."""
|
||||
self._data[ATTR_AUDIO_INPUT] = value
|
||||
|
130
hassio/const.py
130
hassio/const.py
@@ -1,31 +1,36 @@
|
||||
"""Const file for HassIO."""
|
||||
from pathlib import Path
|
||||
from ipaddress import ip_network
|
||||
|
||||
HASSIO_VERSION = '0.20'
|
||||
HASSIO_VERSION = '0.84'
|
||||
|
||||
URL_HASSIO_VERSION = ('https://raw.githubusercontent.com/home-assistant/'
|
||||
'hassio/master/version.json')
|
||||
URL_HASSIO_VERSION_BETA = ('https://raw.githubusercontent.com/home-assistant/'
|
||||
'hassio/dev/version.json')
|
||||
'hassio/{}/version.json')
|
||||
|
||||
URL_HASSIO_ADDONS = 'https://github.com/home-assistant/hassio-addons'
|
||||
|
||||
DOCKER_REPO = "homeassistant"
|
||||
HASSIO_DATA = Path("/data")
|
||||
|
||||
HASSIO_SHARE = Path("/data")
|
||||
|
||||
RUN_UPDATE_INFO_TASKS = 28800
|
||||
RUN_UPDATE_SUPERVISOR_TASKS = 29100
|
||||
RUN_RELOAD_ADDONS_TASKS = 28800
|
||||
|
||||
RESTART_EXIT_CODE = 100
|
||||
|
||||
FILE_HASSIO_ADDONS = Path(HASSIO_SHARE, "addons.json")
|
||||
FILE_HASSIO_CONFIG = Path(HASSIO_SHARE, "config.json")
|
||||
FILE_HASSIO_ADDONS = Path(HASSIO_DATA, "addons.json")
|
||||
FILE_HASSIO_CONFIG = Path(HASSIO_DATA, "config.json")
|
||||
FILE_HASSIO_HOMEASSISTANT = Path(HASSIO_DATA, "homeassistant.json")
|
||||
FILE_HASSIO_UPDATER = Path(HASSIO_DATA, "updater.json")
|
||||
|
||||
SOCKET_DOCKER = Path("/var/run/docker.sock")
|
||||
SOCKET_HC = Path("/var/run/hassio-hc.sock")
|
||||
|
||||
DOCKER_NETWORK = 'hassio'
|
||||
DOCKER_NETWORK_MASK = ip_network('172.30.32.0/23')
|
||||
DOCKER_NETWORK_RANGE = ip_network('172.30.33.0/24')
|
||||
|
||||
LABEL_VERSION = 'io.hass.version'
|
||||
LABEL_ARCH = 'io.hass.arch'
|
||||
LABEL_TYPE = 'io.hass.type'
|
||||
|
||||
META_ADDON = 'addon'
|
||||
META_SUPERVISOR = 'supervisor'
|
||||
META_HOMEASSISTANT = 'homeassistant'
|
||||
|
||||
JSON_RESULT = 'result'
|
||||
JSON_DATA = 'data'
|
||||
JSON_MESSAGE = 'message'
|
||||
@@ -33,13 +38,29 @@ JSON_MESSAGE = 'message'
|
||||
RESULT_ERROR = 'error'
|
||||
RESULT_OK = 'ok'
|
||||
|
||||
CONTENT_TYPE_BINARY = 'application/octet-stream'
|
||||
CONTENT_TYPE_PNG = 'image/png'
|
||||
CONTENT_TYPE_JSON = 'application/json'
|
||||
CONTENT_TYPE_TEXT = 'text/plain'
|
||||
HEADER_HA_ACCESS = 'x-ha-access'
|
||||
|
||||
ATTR_WAIT_BOOT = 'wait_boot'
|
||||
ATTR_WATCHDOG = 'watchdog'
|
||||
ATTR_CHANGELOG = 'changelog'
|
||||
ATTR_DATE = 'date'
|
||||
ATTR_ARCH = 'arch'
|
||||
ATTR_LONG_DESCRIPTION = 'long_description'
|
||||
ATTR_HOSTNAME = 'hostname'
|
||||
ATTR_TIMEZONE = 'timezone'
|
||||
ATTR_ARGS = 'args'
|
||||
ATTR_OS = 'os'
|
||||
ATTR_TYPE = 'type'
|
||||
ATTR_SOURCE = 'source'
|
||||
ATTR_FEATURES = 'features'
|
||||
ATTR_ADDONS = 'addons'
|
||||
ATTR_VERSION = 'version'
|
||||
ATTR_AUTO_UART = 'auto_uart'
|
||||
ATTR_LAST_BOOT = 'last_boot'
|
||||
ATTR_LAST_VERSION = 'last_version'
|
||||
ATTR_BETA_CHANNEL = 'beta_channel'
|
||||
ATTR_NAME = 'name'
|
||||
@@ -48,21 +69,78 @@ ATTR_DESCRIPTON = 'description'
|
||||
ATTR_STARTUP = 'startup'
|
||||
ATTR_BOOT = 'boot'
|
||||
ATTR_PORTS = 'ports'
|
||||
ATTR_PORT = 'port'
|
||||
ATTR_SSL = 'ssl'
|
||||
ATTR_MAP = 'map'
|
||||
ATTR_WEBUI = 'webui'
|
||||
ATTR_OPTIONS = 'options'
|
||||
ATTR_INSTALLED = 'installed'
|
||||
ATTR_DETACHED = 'detached'
|
||||
ATTR_STATE = 'state'
|
||||
ATTR_SCHEMA = 'schema'
|
||||
ATTR_IMAGE = 'image'
|
||||
ATTR_ICON = 'icon'
|
||||
ATTR_LOGO = 'logo'
|
||||
ATTR_STDIN = 'stdin'
|
||||
ATTR_ADDONS_REPOSITORIES = 'addons_repositories'
|
||||
ATTR_REPOSITORY = 'repository'
|
||||
ATTR_REPOSITORIES = 'repositories'
|
||||
ATTR_URL = 'url'
|
||||
ATTR_MAINTAINER = 'maintainer'
|
||||
ATTR_PASSWORD = 'password'
|
||||
ATTR_TOTP = 'totp'
|
||||
ATTR_INITIALIZE = 'initialize'
|
||||
ATTR_SESSION = 'session'
|
||||
ATTR_SESSIONS = 'sessions'
|
||||
ATTR_LOCATON = 'location'
|
||||
ATTR_BUILD = 'build'
|
||||
ATTR_DEVICES = 'devices'
|
||||
ATTR_ENVIRONMENT = 'environment'
|
||||
ATTR_HOST_NETWORK = 'host_network'
|
||||
ATTR_HOST_IPC = 'host_ipc'
|
||||
ATTR_HOST_DBUS = 'host_dbus'
|
||||
ATTR_NETWORK = 'network'
|
||||
ATTR_TMPFS = 'tmpfs'
|
||||
ATTR_PRIVILEGED = 'privileged'
|
||||
ATTR_USER = 'user'
|
||||
ATTR_SYSTEM = 'system'
|
||||
ATTR_SNAPSHOTS = 'snapshots'
|
||||
ATTR_HOMEASSISTANT = 'homeassistant'
|
||||
ATTR_HASSIO = 'hassio'
|
||||
ATTR_HASSIO_API = 'hassio_api'
|
||||
ATTR_HOMEASSISTANT_API = 'homeassistant_api'
|
||||
ATTR_UUID = 'uuid'
|
||||
ATTR_FOLDERS = 'folders'
|
||||
ATTR_SIZE = 'size'
|
||||
ATTR_TYPE = 'type'
|
||||
ATTR_TIMEOUT = 'timeout'
|
||||
ATTR_AUTO_UPDATE = 'auto_update'
|
||||
ATTR_CUSTOM = 'custom'
|
||||
ATTR_AUDIO = 'audio'
|
||||
ATTR_AUDIO_INPUT = 'audio_input'
|
||||
ATTR_AUDIO_OUTPUT = 'audio_output'
|
||||
ATTR_INPUT = 'input'
|
||||
ATTR_OUTPUT = 'output'
|
||||
ATTR_DISK = 'disk'
|
||||
ATTR_SERIAL = 'serial'
|
||||
ATTR_SECURITY = 'security'
|
||||
ATTR_BUILD_FROM = 'build_from'
|
||||
ATTR_SQUASH = 'squash'
|
||||
ATTR_GPIO = 'gpio'
|
||||
ATTR_LEGACY = 'legacy'
|
||||
ATTR_ADDONS_CUSTOM_LIST = 'addons_custom_list'
|
||||
ATTR_CPU_PERCENT = 'cpu_percent'
|
||||
ATTR_NETWORK_RX = 'network_rx'
|
||||
ATTR_NETWORK_TX = 'network_tx'
|
||||
ATTR_MEMORY_LIMIT = 'memory_limit'
|
||||
ATTR_MEMORY_USAGE = 'memory_usage'
|
||||
ATTR_BLK_READ = 'blk_read'
|
||||
ATTR_BLK_WRITE = 'blk_write'
|
||||
|
||||
STARTUP_BEFORE = 'before'
|
||||
STARTUP_AFTER = 'after'
|
||||
STARTUP_INITIALIZE = 'initialize'
|
||||
STARTUP_SYSTEM = 'system'
|
||||
STARTUP_SERVICES = 'services'
|
||||
STARTUP_APPLICATION = 'application'
|
||||
STARTUP_ONCE = 'once'
|
||||
|
||||
BOOT_AUTO = 'auto'
|
||||
@@ -70,8 +148,26 @@ BOOT_MANUAL = 'manual'
|
||||
|
||||
STATE_STARTED = 'started'
|
||||
STATE_STOPPED = 'stopped'
|
||||
STATE_NONE = 'none'
|
||||
|
||||
MAP_CONFIG = 'config'
|
||||
MAP_SSL = 'ssl'
|
||||
MAP_ADDONS = 'addons'
|
||||
MAP_BACKUP = 'backup'
|
||||
MAP_SHARE = 'share'
|
||||
|
||||
ARCH_ARMHF = 'armhf'
|
||||
ARCH_AARCH64 = 'aarch64'
|
||||
ARCH_AMD64 = 'amd64'
|
||||
ARCH_I386 = 'i386'
|
||||
|
||||
REPOSITORY_CORE = 'core'
|
||||
REPOSITORY_LOCAL = 'local'
|
||||
|
||||
FOLDER_HOMEASSISTANT = 'homeassistant'
|
||||
FOLDER_SHARE = 'share'
|
||||
FOLDER_ADDONS = 'addons/local'
|
||||
FOLDER_SSL = 'ssl'
|
||||
|
||||
SNAPSHOT_FULL = 'full'
|
||||
SNAPSHOT_PARTIAL = 'partial'
|
||||
|
188
hassio/core.py
188
hassio/core.py
@@ -2,149 +2,109 @@
|
||||
import asyncio
|
||||
import logging
|
||||
|
||||
import aiohttp
|
||||
import docker
|
||||
|
||||
from . import bootstrap
|
||||
from .addons import AddonManager
|
||||
from .api import RestAPI
|
||||
from .host_control import HostControl
|
||||
from .coresys import CoreSysAttributes
|
||||
from .const import (
|
||||
SOCKET_DOCKER, RUN_UPDATE_INFO_TASKS, RUN_RELOAD_ADDONS_TASKS,
|
||||
RUN_UPDATE_SUPERVISOR_TASKS, STARTUP_AFTER, STARTUP_BEFORE)
|
||||
from .scheduler import Scheduler
|
||||
from .dock.homeassistant import DockerHomeAssistant
|
||||
from .dock.supervisor import DockerSupervisor
|
||||
from .tools import get_arch_from_image, get_local_ip
|
||||
STARTUP_SYSTEM, STARTUP_SERVICES, STARTUP_APPLICATION, STARTUP_INITIALIZE)
|
||||
from .utils.dt import fetch_timezone
|
||||
|
||||
_LOGGER = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class HassIO(object):
|
||||
class HassIO(CoreSysAttributes):
|
||||
"""Main object of hassio."""
|
||||
|
||||
def __init__(self, loop):
|
||||
def __init__(self, coresys):
|
||||
"""Initialize hassio object."""
|
||||
self.exit_code = 0
|
||||
self.loop = loop
|
||||
self.websession = aiohttp.ClientSession(loop=self.loop)
|
||||
self.config = bootstrap.initialize_system_data(self.websession)
|
||||
self.scheduler = Scheduler(self.loop)
|
||||
self.api = RestAPI(self.config, self.loop)
|
||||
self.dock = docker.DockerClient(
|
||||
base_url="unix:/{}".format(str(SOCKET_DOCKER)), version='auto')
|
||||
|
||||
# init basic docker container
|
||||
self.supervisor = DockerSupervisor(
|
||||
self.config, self.loop, self.dock, self)
|
||||
self.homeassistant = DockerHomeAssistant(
|
||||
self.config, self.loop, self.dock)
|
||||
|
||||
# init HostControl
|
||||
self.host_control = HostControl(self.loop)
|
||||
|
||||
# init addon system
|
||||
self.addons = AddonManager(self.config, self.loop, self.dock)
|
||||
self.coresys = coresys
|
||||
|
||||
async def setup(self):
|
||||
"""Setup HassIO orchestration."""
|
||||
# supervisor
|
||||
await self.supervisor.attach()
|
||||
await self.supervisor.cleanup()
|
||||
# update timezone
|
||||
if self._config.timezone == 'UTC':
|
||||
self._config.timezone = await fetch_timezone(self._websession)
|
||||
|
||||
# set api endpoint
|
||||
self.config.api_endpoint = await get_local_ip(self.loop)
|
||||
# supervisor
|
||||
await self._supervisor.load()
|
||||
|
||||
# hostcontrol
|
||||
await self.host_control.load()
|
||||
await self._host_control.load()
|
||||
|
||||
# schedule update info tasks
|
||||
self.scheduler.register_task(
|
||||
self.host_control.load, RUN_UPDATE_INFO_TASKS)
|
||||
|
||||
# rest api views
|
||||
self.api.register_host(self.host_control)
|
||||
self.api.register_network(self.host_control)
|
||||
self.api.register_supervisor(
|
||||
self.supervisor, self.addons, self.host_control)
|
||||
self.api.register_homeassistant(self.homeassistant)
|
||||
self.api.register_addons(self.addons)
|
||||
|
||||
# schedule update info tasks
|
||||
self.scheduler.register_task(
|
||||
self.config.fetch_update_infos, RUN_UPDATE_INFO_TASKS,
|
||||
now=True)
|
||||
|
||||
# first start of supervisor?
|
||||
if not await self.homeassistant.exists():
|
||||
_LOGGER.info("No HomeAssistant docker found.")
|
||||
await self._setup_homeassistant()
|
||||
# Load homeassistant
|
||||
await self._homeassistant.load()
|
||||
|
||||
# Load addons
|
||||
arch = get_arch_from_image(self.supervisor.image)
|
||||
await self.addons.prepare(arch)
|
||||
await self._addons.load()
|
||||
|
||||
# schedule addon update task
|
||||
self.scheduler.register_task(
|
||||
self.addons.reload, RUN_RELOAD_ADDONS_TASKS, now=True)
|
||||
# rest api views
|
||||
await self._api.load()
|
||||
|
||||
# schedule self update task
|
||||
self.scheduler.register_task(
|
||||
self._hassio_update, RUN_UPDATE_SUPERVISOR_TASKS)
|
||||
# load last available data
|
||||
await self._updater.load()
|
||||
|
||||
# load last available data
|
||||
await self._snapshots.load()
|
||||
|
||||
# start dns forwarding
|
||||
self._loop.create_task(self._dns.start())
|
||||
|
||||
# start addon mark as initialize
|
||||
await self._addons.auto_boot(STARTUP_INITIALIZE)
|
||||
|
||||
async def start(self):
|
||||
"""Start HassIO orchestration."""
|
||||
# on release channel, try update itself
|
||||
# on beta channel, only read new versions
|
||||
if not self._updater.beta_channel and self._supervisor.need_update:
|
||||
if await self._supervisor.update():
|
||||
return
|
||||
else:
|
||||
_LOGGER.info("Ignore Hass.io auto updates on beta mode")
|
||||
|
||||
# start api
|
||||
await self.api.start()
|
||||
_LOGGER.info("Start hassio api on %s", self.config.api_endpoint)
|
||||
await self._api.start()
|
||||
_LOGGER.info("Start API on %s", self._docker.network.supervisor)
|
||||
|
||||
# HomeAssistant is already running / supervisor have only reboot
|
||||
if await self.homeassistant.is_running():
|
||||
_LOGGER.info("HassIO reboot detected")
|
||||
return
|
||||
try:
|
||||
# HomeAssistant is already running / supervisor have only reboot
|
||||
if self._hardware.last_boot == self._config.last_boot:
|
||||
_LOGGER.info("Hass.io reboot detected")
|
||||
return
|
||||
|
||||
# start addon mark as before
|
||||
await self.addons.auto_boot(STARTUP_BEFORE)
|
||||
# start addon mark as system
|
||||
await self._addons.auto_boot(STARTUP_SYSTEM)
|
||||
|
||||
# run HomeAssistant
|
||||
await self.homeassistant.run()
|
||||
# start addon mark as services
|
||||
await self._addons.auto_boot(STARTUP_SERVICES)
|
||||
|
||||
# start addon mark as after
|
||||
await self.addons.auto_boot(STARTUP_AFTER)
|
||||
# run HomeAssistant
|
||||
if self._homeassistant.boot:
|
||||
await self._homeassistant.run()
|
||||
|
||||
async def stop(self, exit_code=0):
|
||||
# start addon mark as application
|
||||
await self._addons.auto_boot(STARTUP_APPLICATION)
|
||||
|
||||
# store new last boot
|
||||
self._config.last_boot = self._hardware.last_boot
|
||||
|
||||
finally:
|
||||
# Add core tasks into scheduler
|
||||
await self._tasks.load()
|
||||
|
||||
# If landingpage / run upgrade in background
|
||||
if self._homeassistant.version == 'landingpage':
|
||||
self._loop.create_task(self._homeassistant.install())
|
||||
|
||||
_LOGGER.info("Hass.io is up and running")
|
||||
|
||||
async def stop(self):
|
||||
"""Stop a running orchestration."""
|
||||
# don't process scheduler anymore
|
||||
self.scheduler.stop()
|
||||
self._scheduler.suspend = True
|
||||
|
||||
# process stop task pararell
|
||||
tasks = [self.websession.close(), self.api.stop()]
|
||||
await asyncio.wait(tasks, loop=self.loop)
|
||||
# process stop tasks
|
||||
self._websession.close()
|
||||
self._websession_ssl.close()
|
||||
|
||||
self.exit_code = exit_code
|
||||
self.loop.stop()
|
||||
|
||||
async def _setup_homeassistant(self):
|
||||
"""Install a homeassistant docker container."""
|
||||
while True:
|
||||
# read homeassistant tag and install it
|
||||
if not self.config.last_homeassistant:
|
||||
await self.config.fetch_update_infos()
|
||||
|
||||
tag = self.config.last_homeassistant
|
||||
if tag and await self.homeassistant.install(tag):
|
||||
break
|
||||
_LOGGER.warning("Error on setup HomeAssistant. Retry in 60.")
|
||||
await asyncio.sleep(60, loop=self.loop)
|
||||
|
||||
# store version
|
||||
_LOGGER.info("HomeAssistant docker now installed.")
|
||||
|
||||
async def _hassio_update(self):
|
||||
"""Check and run update of supervisor hassio."""
|
||||
if self.config.last_hassio == self.supervisor.version:
|
||||
return
|
||||
|
||||
_LOGGER.info(
|
||||
"Found new HassIO version %s.", self.config.last_hassio)
|
||||
await self.supervisor.update(self.config.last_hassio)
|
||||
# process async stop tasks
|
||||
await asyncio.wait(
|
||||
[self._api.stop(), self._dns.stop()], loop=self._loop)
|
||||
|
190
hassio/coresys.py
Normal file
190
hassio/coresys.py
Normal file
@@ -0,0 +1,190 @@
|
||||
"""Handle core shared data."""
|
||||
|
||||
import aiohttp
|
||||
|
||||
from .config import CoreConfig
|
||||
from .docker import DockerAPI
|
||||
from .misc.dns import DNSForward
|
||||
from .misc.hardware import Hardware
|
||||
from .misc.host_control import HostControl
|
||||
from .misc.scheduler import Scheduler
|
||||
|
||||
|
||||
class CoreSys(object):
|
||||
"""Class that handle all shared data."""
|
||||
|
||||
def __init__(self, loop):
|
||||
"""Initialize coresys."""
|
||||
# Static attributes
|
||||
self.exit_code = 0
|
||||
|
||||
# External objects
|
||||
self._loop = loop
|
||||
self._websession = aiohttp.ClientSession(loop=loop)
|
||||
self._websession_ssl = aiohttp.ClientSession(
|
||||
connector=aiohttp.TCPConnector(verify_ssl=False), loop=loop)
|
||||
|
||||
# Global objects
|
||||
self._config = CoreConfig()
|
||||
self._hardware = Hardware()
|
||||
self._docker = DockerAPI()
|
||||
self._scheduler = Scheduler(loop=loop)
|
||||
self._dns = DNSForward(loop=loop)
|
||||
self._host_control = HostControl(loop=loop)
|
||||
|
||||
# Internal objects pointers
|
||||
self._homeassistant = None
|
||||
self._supervisor = None
|
||||
self._addons = None
|
||||
self._api = None
|
||||
self._updater = None
|
||||
self._snapshots = None
|
||||
self._tasks = None
|
||||
|
||||
@property
|
||||
def arch(self):
|
||||
"""Return running arch of hass.io system."""
|
||||
if self._supervisor:
|
||||
return self._supervisor.arch
|
||||
return None
|
||||
|
||||
@property
|
||||
def loop(self):
|
||||
"""Return loop object."""
|
||||
return self._loop
|
||||
|
||||
@property
|
||||
def websession(self):
|
||||
"""Return websession object."""
|
||||
return self._websession
|
||||
|
||||
@property
|
||||
def websession_ssl(self):
|
||||
"""Return websession object with disabled SSL."""
|
||||
return self._websession_ssl
|
||||
|
||||
@property
|
||||
def config(self):
|
||||
"""Return CoreConfig object."""
|
||||
return self._config
|
||||
|
||||
@property
|
||||
def hardware(self):
|
||||
"""Return Hardware object."""
|
||||
return self._hardware
|
||||
|
||||
@property
|
||||
def docker(self):
|
||||
"""Return DockerAPI object."""
|
||||
return self._docker
|
||||
|
||||
@property
|
||||
def scheduler(self):
|
||||
"""Return Scheduler object."""
|
||||
return self._scheduler
|
||||
|
||||
@property
|
||||
def dns(self):
|
||||
"""Return DNSForward object."""
|
||||
return self._dns
|
||||
|
||||
@property
|
||||
def host_control(self):
|
||||
"""Return HostControl object."""
|
||||
return self._host_control
|
||||
|
||||
@property
|
||||
def homeassistant(self):
|
||||
"""Return HomeAssistant object."""
|
||||
return self._homeassistant
|
||||
|
||||
@homeassistant.setter
|
||||
def homeassistant(self, value):
|
||||
"""Set a HomeAssistant object."""
|
||||
if self._homeassistant:
|
||||
raise RuntimeError("HomeAssistant already set!")
|
||||
self._homeassistant = value
|
||||
|
||||
@property
|
||||
def supervisor(self):
|
||||
"""Return Supervisor object."""
|
||||
return self._supervisor
|
||||
|
||||
@supervisor.setter
|
||||
def supervisor(self, value):
|
||||
"""Set a Supervisor object."""
|
||||
if self._supervisor:
|
||||
raise RuntimeError("Supervisor already set!")
|
||||
self._supervisor = value
|
||||
|
||||
@property
|
||||
def api(self):
|
||||
"""Return API object."""
|
||||
return self._api
|
||||
|
||||
@api.setter
|
||||
def api(self, value):
|
||||
"""Set a API object."""
|
||||
if self._api:
|
||||
raise RuntimeError("API already set!")
|
||||
self._api = value
|
||||
|
||||
@property
|
||||
def updater(self):
|
||||
"""Return Updater object."""
|
||||
return self._updater
|
||||
|
||||
@updater.setter
|
||||
def updater(self, value):
|
||||
"""Set a Updater object."""
|
||||
if self._updater:
|
||||
raise RuntimeError("Updater already set!")
|
||||
self._updater = value
|
||||
|
||||
@property
|
||||
def addons(self):
|
||||
"""Return AddonManager object."""
|
||||
return self._addons
|
||||
|
||||
@addons.setter
|
||||
def addons(self, value):
|
||||
"""Set a AddonManager object."""
|
||||
if self._addons:
|
||||
raise RuntimeError("AddonManager already set!")
|
||||
self._addons = value
|
||||
|
||||
@property
|
||||
def snapshots(self):
|
||||
"""Return SnapshotsManager object."""
|
||||
return self._snapshots
|
||||
|
||||
@snapshots.setter
|
||||
def snapshots(self, value):
|
||||
"""Set a SnapshotsManager object."""
|
||||
if self._snapshots:
|
||||
raise RuntimeError("SnapshotsManager already set!")
|
||||
self._snapshots = value
|
||||
|
||||
@property
|
||||
def tasks(self):
|
||||
"""Return SnapshotsManager object."""
|
||||
return self._tasks
|
||||
|
||||
@tasks.setter
|
||||
def tasks(self, value):
|
||||
"""Set a Tasks object."""
|
||||
if self._tasks:
|
||||
raise RuntimeError("Tasks already set!")
|
||||
self._tasks = value
|
||||
|
||||
|
||||
class CoreSysAttributes(object):
|
||||
"""Inheret basic CoreSysAttributes."""
|
||||
|
||||
coresys = None
|
||||
|
||||
def __getattr__(self, name):
|
||||
"""Mapping to coresys."""
|
||||
if hasattr(self.coresys, name[1:]):
|
||||
return getattr(self.coresys, name[1:])
|
||||
raise AttributeError(f"Can't find {name} on {self.__class__}")
|
@@ -1,266 +0,0 @@
|
||||
"""Init file for HassIO docker object."""
|
||||
import asyncio
|
||||
from contextlib import suppress
|
||||
import logging
|
||||
|
||||
import docker
|
||||
|
||||
from ..tools import get_version_from_env
|
||||
|
||||
_LOGGER = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class DockerBase(object):
|
||||
"""Docker hassio wrapper."""
|
||||
|
||||
def __init__(self, config, loop, dock, image=None):
|
||||
"""Initialize docker base wrapper."""
|
||||
self.config = config
|
||||
self.loop = loop
|
||||
self.dock = dock
|
||||
self.image = image
|
||||
self.container = None
|
||||
self.version = None
|
||||
self._lock = asyncio.Lock(loop=loop)
|
||||
|
||||
@property
|
||||
def docker_name(self):
|
||||
"""Return name of docker container."""
|
||||
return None
|
||||
|
||||
@property
|
||||
def in_progress(self):
|
||||
"""Return True if a task is in progress."""
|
||||
return self._lock.locked()
|
||||
|
||||
async def install(self, tag):
|
||||
"""Pull docker image."""
|
||||
if self._lock.locked():
|
||||
_LOGGER.error("Can't excute install while a task is in progress")
|
||||
return False
|
||||
|
||||
async with self._lock:
|
||||
return await self.loop.run_in_executor(None, self._install, tag)
|
||||
|
||||
def _install(self, tag):
|
||||
"""Pull docker image.
|
||||
|
||||
Need run inside executor.
|
||||
"""
|
||||
try:
|
||||
_LOGGER.info("Pull image %s tag %s.", self.image, tag)
|
||||
image = self.dock.images.pull("{}:{}".format(self.image, tag))
|
||||
|
||||
image.tag(self.image, tag='latest')
|
||||
self.version = get_version_from_env(image.attrs['Config']['Env'])
|
||||
_LOGGER.info("Tag image %s with version %s as latest",
|
||||
self.image, self.version)
|
||||
except docker.errors.APIError as err:
|
||||
_LOGGER.error("Can't install %s:%s -> %s.", self.image, tag, err)
|
||||
return False
|
||||
return True
|
||||
|
||||
def exists(self):
|
||||
"""Return True if docker image exists in local repo.
|
||||
|
||||
Return a Future.
|
||||
"""
|
||||
return self.loop.run_in_executor(None, self._exists)
|
||||
|
||||
def _exists(self):
|
||||
"""Return True if docker image exists in local repo.
|
||||
|
||||
Need run inside executor.
|
||||
"""
|
||||
try:
|
||||
image = self.dock.images.get(self.image)
|
||||
self.version = get_version_from_env(image.attrs['Config']['Env'])
|
||||
except docker.errors.DockerException:
|
||||
return False
|
||||
|
||||
return True
|
||||
|
||||
def is_running(self):
|
||||
"""Return True if docker is Running.
|
||||
|
||||
Return a Future.
|
||||
"""
|
||||
return self.loop.run_in_executor(None, self._is_running)
|
||||
|
||||
def _is_running(self):
|
||||
"""Return True if docker is Running.
|
||||
|
||||
Need run inside executor.
|
||||
"""
|
||||
if not self.container:
|
||||
try:
|
||||
self.container = self.dock.containers.get(self.docker_name)
|
||||
self.version = get_version_from_env(
|
||||
self.container.attrs['Config']['Env'])
|
||||
except docker.errors.DockerException:
|
||||
return False
|
||||
else:
|
||||
self.container.reload()
|
||||
|
||||
return self.container.status == 'running'
|
||||
|
||||
async def attach(self):
|
||||
"""Attach to running docker container."""
|
||||
if self._lock.locked():
|
||||
_LOGGER.error("Can't excute attach while a task is in progress")
|
||||
return False
|
||||
|
||||
async with self._lock:
|
||||
return await self.loop.run_in_executor(None, self._attach)
|
||||
|
||||
def _attach(self):
|
||||
"""Attach to running docker container.
|
||||
|
||||
Need run inside executor.
|
||||
"""
|
||||
try:
|
||||
self.container = self.dock.containers.get(self.docker_name)
|
||||
self.image = self.container.attrs['Config']['Image']
|
||||
self.version = get_version_from_env(
|
||||
self.container.attrs['Config']['Env'])
|
||||
_LOGGER.info("Attach to image %s with version %s",
|
||||
self.image, self.version)
|
||||
except (docker.errors.DockerException, KeyError):
|
||||
_LOGGER.fatal(
|
||||
"Can't attach to %s docker container!", self.docker_name)
|
||||
return False
|
||||
|
||||
return True
|
||||
|
||||
async def run(self):
|
||||
"""Run docker image."""
|
||||
if self._lock.locked():
|
||||
_LOGGER.error("Can't excute run while a task is in progress")
|
||||
return False
|
||||
|
||||
async with self._lock:
|
||||
return await self.loop.run_in_executor(None, self._run)
|
||||
|
||||
def _run(self):
|
||||
"""Run docker image.
|
||||
|
||||
Need run inside executor.
|
||||
"""
|
||||
raise NotImplementedError()
|
||||
|
||||
async def stop(self):
|
||||
"""Stop/remove docker container."""
|
||||
if self._lock.locked():
|
||||
_LOGGER.error("Can't excute stop while a task is in progress")
|
||||
return False
|
||||
|
||||
async with self._lock:
|
||||
await self.loop.run_in_executor(None, self._stop)
|
||||
return True
|
||||
|
||||
def _stop(self):
|
||||
"""Stop/remove and remove docker container.
|
||||
|
||||
Need run inside executor.
|
||||
"""
|
||||
if not self.container:
|
||||
return
|
||||
|
||||
_LOGGER.info("Stop %s docker application", self.image)
|
||||
|
||||
self.container.reload()
|
||||
if self.container.status == 'running':
|
||||
with suppress(docker.errors.DockerException):
|
||||
self.container.stop()
|
||||
|
||||
with suppress(docker.errors.DockerException):
|
||||
self.container.remove(force=True)
|
||||
|
||||
self.container = None
|
||||
|
||||
async def remove(self):
|
||||
"""Remove docker container."""
|
||||
if self._lock.locked():
|
||||
_LOGGER.error("Can't excute remove while a task is in progress")
|
||||
return False
|
||||
|
||||
async with self._lock:
|
||||
return await self.loop.run_in_executor(None, self._remove)
|
||||
|
||||
def _remove(self):
|
||||
"""remove docker container.
|
||||
|
||||
Need run inside executor.
|
||||
"""
|
||||
if self._is_running():
|
||||
self._stop()
|
||||
|
||||
_LOGGER.info("Remove docker %s with latest and %s",
|
||||
self.image, self.version)
|
||||
|
||||
try:
|
||||
self.dock.images.remove(
|
||||
image="{}:latest".format(self.image), force=True)
|
||||
self.dock.images.remove(
|
||||
image="{}:{}".format(self.image, self.version), force=True)
|
||||
except docker.errors.ImageNotFound:
|
||||
return True
|
||||
except docker.errors.DockerException as err:
|
||||
_LOGGER.warning("Can't remove image %s -> %s", self.image, err)
|
||||
return False
|
||||
|
||||
return True
|
||||
|
||||
async def update(self, tag):
|
||||
"""Update a docker image."""
|
||||
if self._lock.locked():
|
||||
_LOGGER.error("Can't excute update while a task is in progress")
|
||||
return False
|
||||
|
||||
async with self._lock:
|
||||
return await self.loop.run_in_executor(None, self._update, tag)
|
||||
|
||||
def _update(self, tag):
|
||||
"""Update a docker image.
|
||||
|
||||
Need run inside executor.
|
||||
"""
|
||||
old_image = "{}:{}".format(self.image, self.version)
|
||||
|
||||
_LOGGER.info("Update docker %s with %s:%s",
|
||||
old_image, self.image, tag)
|
||||
|
||||
# update docker image
|
||||
if self._install(tag):
|
||||
_LOGGER.info("Cleanup old %s docker", old_image)
|
||||
self._stop()
|
||||
try:
|
||||
self.dock.images.remove(image=old_image, force=True)
|
||||
except docker.errors.DockerException as err:
|
||||
_LOGGER.warning(
|
||||
"Can't remove old image %s -> %s", old_image, err)
|
||||
return True
|
||||
|
||||
return False
|
||||
|
||||
async def logs(self):
|
||||
"""Return docker logs of container."""
|
||||
if self._lock.locked():
|
||||
_LOGGER.error("Can't excute logs while a task is in progress")
|
||||
return False
|
||||
|
||||
async with self._lock:
|
||||
return await self.loop.run_in_executor(None, self._logs)
|
||||
|
||||
def _logs(self):
|
||||
"""Return docker logs of container.
|
||||
|
||||
Need run inside executor.
|
||||
"""
|
||||
if not self.container:
|
||||
return
|
||||
|
||||
try:
|
||||
return self.container.logs(tail=100, stdout=True, stderr=True)
|
||||
except docker.errors.DockerException as err:
|
||||
_LOGGER.warning("Can't grap logs from %s -> %s", self.image, err)
|
@@ -1,108 +0,0 @@
|
||||
"""Init file for HassIO addon docker object."""
|
||||
import logging
|
||||
|
||||
import docker
|
||||
|
||||
from . import DockerBase
|
||||
from ..tools import get_version_from_env
|
||||
|
||||
_LOGGER = logging.getLogger(__name__)
|
||||
|
||||
HASS_DOCKER_NAME = 'homeassistant'
|
||||
|
||||
|
||||
class DockerAddon(DockerBase):
|
||||
"""Docker hassio wrapper for HomeAssistant."""
|
||||
|
||||
def __init__(self, config, loop, dock, addons_data, addon):
|
||||
"""Initialize docker homeassistant wrapper."""
|
||||
super().__init__(
|
||||
config, loop, dock, image=addons_data.get_image(addon))
|
||||
self.addon = addon
|
||||
self.addons_data = addons_data
|
||||
|
||||
@property
|
||||
def docker_name(self):
|
||||
"""Return name of docker container."""
|
||||
return "addon_{}".format(self.addon)
|
||||
|
||||
@property
|
||||
def volumes(self):
|
||||
"""Generate volumes for mappings."""
|
||||
volumes = {
|
||||
self.addons_data.path_extern_data(self.addon): {
|
||||
'bind': '/data', 'mode': 'rw'
|
||||
}}
|
||||
|
||||
if self.addons_data.map_config(self.addon):
|
||||
volumes.update({
|
||||
self.config.path_extern_config: {
|
||||
'bind': '/config', 'mode': 'rw'
|
||||
}})
|
||||
|
||||
if self.addons_data.map_ssl(self.addon):
|
||||
volumes.update({
|
||||
self.config.path_extern_ssl: {
|
||||
'bind': '/ssl', 'mode': 'rw'
|
||||
}})
|
||||
|
||||
if self.addons_data.map_addons(self.addon):
|
||||
volumes.update({
|
||||
self.config.path_extern_addons_local: {
|
||||
'bind': '/addons', 'mode': 'rw'
|
||||
}})
|
||||
|
||||
if self.addons_data.map_backup(self.addon):
|
||||
volumes.update({
|
||||
self.config.path_extern_backup: {
|
||||
'bind': '/backup', 'mode': 'rw'
|
||||
}})
|
||||
|
||||
return volumes
|
||||
|
||||
def _run(self):
|
||||
"""Run docker image.
|
||||
|
||||
Need run inside executor.
|
||||
"""
|
||||
if self._is_running():
|
||||
return
|
||||
|
||||
# cleanup old container
|
||||
self._stop()
|
||||
|
||||
try:
|
||||
self.container = self.dock.containers.run(
|
||||
self.image,
|
||||
name=self.docker_name,
|
||||
detach=True,
|
||||
network_mode='bridge',
|
||||
ports=self.addons_data.get_ports(self.addon),
|
||||
volumes=self.volumes,
|
||||
)
|
||||
|
||||
self.version = get_version_from_env(
|
||||
self.container.attrs['Config']['Env'])
|
||||
|
||||
_LOGGER.info("Start docker addon %s with version %s",
|
||||
self.image, self.version)
|
||||
|
||||
except docker.errors.DockerException as err:
|
||||
_LOGGER.error("Can't run %s -> %s", self.image, err)
|
||||
return False
|
||||
|
||||
return True
|
||||
|
||||
def _attach(self):
|
||||
"""Attach to running docker container.
|
||||
|
||||
Need run inside executor.
|
||||
"""
|
||||
try:
|
||||
self.container = self.dock.containers.get(self.docker_name)
|
||||
self.version = get_version_from_env(
|
||||
self.container.attrs['Config']['Env'])
|
||||
_LOGGER.info("Attach to image %s with version %s",
|
||||
self.image, self.version)
|
||||
except (docker.errors.DockerException, KeyError):
|
||||
pass
|
@@ -1,77 +0,0 @@
|
||||
"""Init file for HassIO docker object."""
|
||||
import logging
|
||||
|
||||
import docker
|
||||
|
||||
from . import DockerBase
|
||||
from ..tools import get_version_from_env
|
||||
|
||||
_LOGGER = logging.getLogger(__name__)
|
||||
|
||||
HASS_DOCKER_NAME = 'homeassistant'
|
||||
|
||||
|
||||
class DockerHomeAssistant(DockerBase):
|
||||
"""Docker hassio wrapper for HomeAssistant."""
|
||||
|
||||
def __init__(self, config, loop, dock):
|
||||
"""Initialize docker homeassistant wrapper."""
|
||||
super().__init__(config, loop, dock, image=config.homeassistant_image)
|
||||
|
||||
@property
|
||||
def docker_name(self):
|
||||
"""Return name of docker container."""
|
||||
return HASS_DOCKER_NAME
|
||||
|
||||
def _run(self):
|
||||
"""Run docker image.
|
||||
|
||||
Need run inside executor.
|
||||
"""
|
||||
if self._is_running():
|
||||
return
|
||||
|
||||
# cleanup old container
|
||||
self._stop()
|
||||
|
||||
try:
|
||||
self.container = self.dock.containers.run(
|
||||
self.image,
|
||||
name=self.docker_name,
|
||||
detach=True,
|
||||
privileged=True,
|
||||
network_mode='host',
|
||||
environment={
|
||||
'HASSIO': self.config.api_endpoint,
|
||||
},
|
||||
volumes={
|
||||
self.config.path_extern_config:
|
||||
{'bind': '/config', 'mode': 'rw'},
|
||||
self.config.path_extern_ssl:
|
||||
{'bind': '/ssl', 'mode': 'rw'},
|
||||
})
|
||||
|
||||
self.version = get_version_from_env(
|
||||
self.container.attrs['Config']['Env'])
|
||||
|
||||
_LOGGER.info("Start docker addon %s with version %s",
|
||||
self.image, self.version)
|
||||
|
||||
except docker.errors.DockerException as err:
|
||||
_LOGGER.error("Can't run %s -> %s", self.image, err)
|
||||
return False
|
||||
|
||||
return True
|
||||
|
||||
async def update(self, tag):
|
||||
"""Update homeassistant docker image."""
|
||||
if self._lock.locked():
|
||||
_LOGGER.error("Can't excute update while a task is in progress")
|
||||
return False
|
||||
|
||||
async with self._lock:
|
||||
if await self.loop.run_in_executor(None, self._update, tag):
|
||||
await self.loop.run_in_executor(None, self._run)
|
||||
return True
|
||||
|
||||
return False
|
@@ -1,83 +0,0 @@
|
||||
"""Init file for HassIO docker object."""
|
||||
import logging
|
||||
import os
|
||||
|
||||
import docker
|
||||
|
||||
from . import DockerBase
|
||||
from ..const import RESTART_EXIT_CODE
|
||||
|
||||
_LOGGER = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class DockerSupervisor(DockerBase):
|
||||
"""Docker hassio wrapper for HomeAssistant."""
|
||||
|
||||
def __init__(self, config, loop, dock, hassio, image=None):
|
||||
"""Initialize docker base wrapper."""
|
||||
super().__init__(config, loop, dock, image=image)
|
||||
|
||||
self.hassio = hassio
|
||||
|
||||
@property
|
||||
def docker_name(self):
|
||||
"""Return name of docker container."""
|
||||
return os.environ['SUPERVISOR_NAME']
|
||||
|
||||
async def update(self, tag):
|
||||
"""Update a supervisor docker image."""
|
||||
if self._lock.locked():
|
||||
_LOGGER.error("Can't excute update while a task is in progress")
|
||||
return False
|
||||
|
||||
_LOGGER.info("Update supervisor docker to %s:%s", self.image, tag)
|
||||
old_version = self.version
|
||||
|
||||
async with self._lock:
|
||||
if await self.loop.run_in_executor(None, self._install, tag):
|
||||
self.config.hassio_cleanup = old_version
|
||||
self.loop.create_task(self.hassio.stop(RESTART_EXIT_CODE))
|
||||
return True
|
||||
|
||||
return False
|
||||
|
||||
async def cleanup(self):
|
||||
"""Check if old supervisor version exists and cleanup."""
|
||||
if not self.config.hassio_cleanup:
|
||||
return
|
||||
|
||||
async with self._lock:
|
||||
if await self.loop.run_in_executor(None, self._cleanup):
|
||||
self.config.hassio_cleanup = None
|
||||
|
||||
def _cleanup(self):
|
||||
"""Remove old image.
|
||||
|
||||
Need run inside executor.
|
||||
"""
|
||||
old_image = "{}:{}".format(self.image, self.config.hassio_cleanup)
|
||||
|
||||
_LOGGER.info("Old supervisor docker found %s", old_image)
|
||||
try:
|
||||
self.dock.images.remove(image=old_image, force=True)
|
||||
except docker.errors.DockerException as err:
|
||||
_LOGGER.warning("Can't remove old image %s -> %s", old_image, err)
|
||||
return False
|
||||
|
||||
return True
|
||||
|
||||
async def run(self):
|
||||
"""Run docker image."""
|
||||
raise RuntimeError("Not support on supervisor docker container!")
|
||||
|
||||
async def install(self, tag):
|
||||
"""Pull docker image."""
|
||||
raise RuntimeError("Not support on supervisor docker container!")
|
||||
|
||||
async def stop(self):
|
||||
"""Stop/remove docker container."""
|
||||
raise RuntimeError("Not support on supervisor docker container!")
|
||||
|
||||
async def remove(self):
|
||||
"""Remove docker image."""
|
||||
raise RuntimeError("Not support on supervisor docker container!")
|
110
hassio/docker/__init__.py
Normal file
110
hassio/docker/__init__.py
Normal file
@@ -0,0 +1,110 @@
|
||||
"""Init file for HassIO docker object."""
|
||||
from contextlib import suppress
|
||||
import logging
|
||||
|
||||
import docker
|
||||
|
||||
from .network import DockerNetwork
|
||||
from ..const import SOCKET_DOCKER
|
||||
|
||||
_LOGGER = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class DockerAPI(object):
|
||||
"""Docker hassio wrapper.
|
||||
|
||||
This class is not AsyncIO safe!
|
||||
"""
|
||||
|
||||
def __init__(self):
|
||||
"""Initialize docker base wrapper."""
|
||||
self.docker = docker.DockerClient(
|
||||
base_url="unix:/{}".format(str(SOCKET_DOCKER)), version='auto')
|
||||
self.network = DockerNetwork(self.docker)
|
||||
|
||||
@property
|
||||
def images(self):
|
||||
"""Return api images."""
|
||||
return self.docker.images
|
||||
|
||||
@property
|
||||
def containers(self):
|
||||
"""Return api containers."""
|
||||
return self.docker.containers
|
||||
|
||||
@property
|
||||
def api(self):
|
||||
"""Return api containers."""
|
||||
return self.docker.api
|
||||
|
||||
def run(self, image, **kwargs):
|
||||
""""Create a docker and run it.
|
||||
|
||||
Need run inside executor.
|
||||
"""
|
||||
name = kwargs.get('name', image)
|
||||
network_mode = kwargs.get('network_mode')
|
||||
hostname = kwargs.get('hostname')
|
||||
|
||||
# setup network
|
||||
kwargs['dns_search'] = ["."]
|
||||
if network_mode:
|
||||
kwargs['dns'] = [str(self.network.supervisor)]
|
||||
kwargs['dns_opt'] = ["ndots:0"]
|
||||
else:
|
||||
kwargs['network'] = None
|
||||
|
||||
# create container
|
||||
try:
|
||||
container = self.docker.containers.create(image, **kwargs)
|
||||
except docker.errors.DockerException as err:
|
||||
_LOGGER.error("Can't create container from %s: %s", name, err)
|
||||
return False
|
||||
|
||||
# attach network
|
||||
if not network_mode:
|
||||
alias = [hostname] if hostname else None
|
||||
if self.network.attach_container(container, alias=alias):
|
||||
self.network.detach_default_bridge(container)
|
||||
else:
|
||||
_LOGGER.warning("Can't attach %s to hassio-net!", name)
|
||||
|
||||
# run container
|
||||
try:
|
||||
container.start()
|
||||
except docker.errors.DockerException as err:
|
||||
_LOGGER.error("Can't start %s: %s", name, err)
|
||||
return False
|
||||
|
||||
return True
|
||||
|
||||
def run_command(self, image, command=None, **kwargs):
|
||||
"""Create a temporary container and run command.
|
||||
|
||||
Need run inside executor.
|
||||
"""
|
||||
stdout = kwargs.get('stdout', True)
|
||||
stderr = kwargs.get('stderr', True)
|
||||
|
||||
_LOGGER.info("Run command '%s' on %s", command, image)
|
||||
try:
|
||||
container = self.docker.containers.run(
|
||||
image,
|
||||
command=command,
|
||||
network=self.network.name,
|
||||
**kwargs
|
||||
)
|
||||
|
||||
# wait until command is done
|
||||
exit_code = container.wait()
|
||||
output = container.logs(stdout=stdout, stderr=stderr)
|
||||
|
||||
except docker.errors.DockerException as err:
|
||||
_LOGGER.error("Can't execute command: %s", err)
|
||||
return (None, b"")
|
||||
|
||||
# cleanup container
|
||||
with suppress(docker.errors.DockerException):
|
||||
container.remove(force=True)
|
||||
|
||||
return (exit_code, output)
|
379
hassio/docker/addon.py
Normal file
379
hassio/docker/addon.py
Normal file
@@ -0,0 +1,379 @@
|
||||
"""Init file for HassIO addon docker object."""
|
||||
import logging
|
||||
import os
|
||||
|
||||
import docker
|
||||
import requests
|
||||
|
||||
from .interface import DockerInterface
|
||||
from .utils import docker_process
|
||||
from ..addons.build import AddonBuild
|
||||
from ..const import (
|
||||
MAP_CONFIG, MAP_SSL, MAP_ADDONS, MAP_BACKUP, MAP_SHARE)
|
||||
|
||||
_LOGGER = logging.getLogger(__name__)
|
||||
|
||||
AUDIO_DEVICE = "/dev/snd:/dev/snd:rwm"
|
||||
|
||||
|
||||
class DockerAddon(DockerInterface):
|
||||
"""Docker hassio wrapper for HomeAssistant."""
|
||||
|
||||
def __init__(self, coresys, slug):
|
||||
"""Initialize docker homeassistant wrapper."""
|
||||
super().__init__(coresys)
|
||||
self._id = slug
|
||||
|
||||
@property
|
||||
def addon(self):
|
||||
"""Return addon of docker image."""
|
||||
return self._addons.get(self._id)
|
||||
|
||||
@property
|
||||
def image(self):
|
||||
"""Return name of docker image."""
|
||||
return self.addon.image
|
||||
|
||||
@property
|
||||
def timeout(self):
|
||||
"""Return timeout for docker actions."""
|
||||
return self.addon.timeout
|
||||
|
||||
@property
|
||||
def version(self):
|
||||
"""Return version of docker image."""
|
||||
if not self.addon.legacy:
|
||||
return super().version
|
||||
return self.addon.version_installed
|
||||
|
||||
@property
|
||||
def arch(self):
|
||||
"""Return arch of docker image."""
|
||||
if not self.addon.legacy:
|
||||
return super().arch
|
||||
return self._arch
|
||||
|
||||
@property
|
||||
def name(self):
|
||||
"""Return name of docker container."""
|
||||
return "addon_{}".format(self.addon.slug)
|
||||
|
||||
@property
|
||||
def ipc(self):
|
||||
"""Return the IPC namespace."""
|
||||
if self.addon.host_ipc:
|
||||
return 'host'
|
||||
return None
|
||||
|
||||
@property
|
||||
def hostname(self):
|
||||
"""Return slug/id of addon."""
|
||||
return self.addon.slug.replace('_', '-')
|
||||
|
||||
@property
|
||||
def environment(self):
|
||||
"""Return environment for docker add-on."""
|
||||
addon_env = self.addon.environment or {}
|
||||
if self.addon.with_audio:
|
||||
addon_env.update({
|
||||
'ALSA_OUTPUT': self.addon.audio_output,
|
||||
'ALSA_INPUT': self.addon.audio_input,
|
||||
})
|
||||
|
||||
# Set api token if any API access is needed
|
||||
if self.addon.access_hassio_api or self.addon.access_homeassistant_api:
|
||||
addon_env['HASSIO_TOKEN'] = self.addon.uuid
|
||||
|
||||
return {
|
||||
**addon_env,
|
||||
'TZ': self._config.timezone,
|
||||
}
|
||||
|
||||
@property
|
||||
def devices(self):
|
||||
"""Return needed devices."""
|
||||
devices = self.addon.devices or []
|
||||
|
||||
# Use audio devices
|
||||
if self.addon.with_audio and AUDIO_DEVICE not in devices:
|
||||
devices.append(AUDIO_DEVICE)
|
||||
|
||||
# Auto mapping UART devices
|
||||
if self.addon.auto_uart:
|
||||
for device in self._hardware.serial_devices:
|
||||
devices.append(f"{device}:{device}:rwm")
|
||||
|
||||
# Return None if no devices is present
|
||||
return devices or None
|
||||
|
||||
@property
|
||||
def ports(self):
|
||||
"""Filter None from addon ports."""
|
||||
if not self.addon.ports:
|
||||
return None
|
||||
|
||||
return {
|
||||
container_port: host_port
|
||||
for container_port, host_port in self.addon.ports.items()
|
||||
if host_port
|
||||
}
|
||||
|
||||
@property
|
||||
def security_opt(self):
|
||||
"""Controlling security opt."""
|
||||
privileged = self.addon.privileged or []
|
||||
|
||||
# Disable AppArmor sinse it make troubles wit SYS_ADMIN
|
||||
if 'SYS_ADMIN' in privileged:
|
||||
return [
|
||||
"apparmor:unconfined",
|
||||
]
|
||||
return None
|
||||
|
||||
@property
|
||||
def tmpfs(self):
|
||||
"""Return tmpfs for docker add-on."""
|
||||
options = self.addon.tmpfs
|
||||
if options:
|
||||
return {"/tmpfs": f"{options}"}
|
||||
return None
|
||||
|
||||
@property
|
||||
def network_mapping(self):
|
||||
"""Return hosts mapping."""
|
||||
return {
|
||||
'homeassistant': self._docker.network.gateway,
|
||||
'hassio': self._docker.network.supervisor,
|
||||
}
|
||||
|
||||
@property
|
||||
def network_mode(self):
|
||||
"""Return network mode for addon."""
|
||||
if self.addon.host_network:
|
||||
return 'host'
|
||||
return None
|
||||
|
||||
@property
|
||||
def volumes(self):
|
||||
"""Generate volumes for mappings."""
|
||||
volumes = {
|
||||
str(self.addon.path_extern_data): {
|
||||
'bind': "/data", 'mode': 'rw'
|
||||
}}
|
||||
|
||||
addon_mapping = self.addon.map_volumes
|
||||
|
||||
# setup config mappings
|
||||
if MAP_CONFIG in addon_mapping:
|
||||
volumes.update({
|
||||
str(self._config.path_extern_config): {
|
||||
'bind': "/config", 'mode': addon_mapping[MAP_CONFIG]
|
||||
}})
|
||||
|
||||
if MAP_SSL in addon_mapping:
|
||||
volumes.update({
|
||||
str(self._config.path_extern_ssl): {
|
||||
'bind': "/ssl", 'mode': addon_mapping[MAP_SSL]
|
||||
}})
|
||||
|
||||
if MAP_ADDONS in addon_mapping:
|
||||
volumes.update({
|
||||
str(self._config.path_extern_addons_local): {
|
||||
'bind': "/addons", 'mode': addon_mapping[MAP_ADDONS]
|
||||
}})
|
||||
|
||||
if MAP_BACKUP in addon_mapping:
|
||||
volumes.update({
|
||||
str(self._config.path_extern_backup): {
|
||||
'bind': "/backup", 'mode': addon_mapping[MAP_BACKUP]
|
||||
}})
|
||||
|
||||
if MAP_SHARE in addon_mapping:
|
||||
volumes.update({
|
||||
str(self._config.path_extern_share): {
|
||||
'bind': "/share", 'mode': addon_mapping[MAP_SHARE]
|
||||
}})
|
||||
|
||||
# init other hardware mappings
|
||||
if self.addon.with_gpio:
|
||||
volumes.update({
|
||||
"/sys/class/gpio": {
|
||||
'bind': "/sys/class/gpio", 'mode': 'rw'
|
||||
},
|
||||
"/sys/devices/platform/soc": {
|
||||
'bind': "/sys/devices/platform/soc", 'mode': 'rw'
|
||||
},
|
||||
})
|
||||
|
||||
# host dbus system
|
||||
if self.addon.host_dbus:
|
||||
volumes.update({
|
||||
"/var/run/dbus": {
|
||||
'bind': "/var/run/dbus", 'mode': 'rw'
|
||||
}})
|
||||
|
||||
return volumes
|
||||
|
||||
def _run(self):
|
||||
"""Run docker image.
|
||||
|
||||
Need run inside executor.
|
||||
"""
|
||||
if self._is_running():
|
||||
return True
|
||||
|
||||
# cleanup
|
||||
self._stop()
|
||||
|
||||
# write config
|
||||
if not self.addon.write_options():
|
||||
return False
|
||||
|
||||
ret = self._docker.run(
|
||||
self.image,
|
||||
name=self.name,
|
||||
hostname=self.hostname,
|
||||
detach=True,
|
||||
init=True,
|
||||
ipc_mode=self.ipc,
|
||||
stdin_open=self.addon.with_stdin,
|
||||
network_mode=self.network_mode,
|
||||
ports=self.ports,
|
||||
extra_hosts=self.network_mapping,
|
||||
devices=self.devices,
|
||||
cap_add=self.addon.privileged,
|
||||
security_opt=self.security_opt,
|
||||
environment=self.environment,
|
||||
volumes=self.volumes,
|
||||
tmpfs=self.tmpfs
|
||||
)
|
||||
|
||||
if ret:
|
||||
_LOGGER.info("Start docker addon %s with version %s",
|
||||
self.image, self.version)
|
||||
|
||||
return ret
|
||||
|
||||
def _install(self, tag):
|
||||
"""Pull docker image or build it.
|
||||
|
||||
Need run inside executor.
|
||||
"""
|
||||
if self.addon.need_build:
|
||||
return self._build(tag)
|
||||
|
||||
return super()._install(tag)
|
||||
|
||||
def _build(self, tag):
|
||||
"""Build a docker container.
|
||||
|
||||
Need run inside executor.
|
||||
"""
|
||||
build_env = AddonBuild(self.coresys, self._id)
|
||||
|
||||
_LOGGER.info("Start build %s:%s", self.image, tag)
|
||||
try:
|
||||
image = self._docker.images.build(**build_env.get_docker_args(tag))
|
||||
|
||||
image.tag(self.image, tag='latest')
|
||||
self._meta = image.attrs
|
||||
|
||||
except (docker.errors.DockerException) as err:
|
||||
_LOGGER.error("Can't build %s:%s: %s", self.image, tag, err)
|
||||
return False
|
||||
|
||||
_LOGGER.info("Build %s:%s done", self.image, tag)
|
||||
return True
|
||||
|
||||
@docker_process
|
||||
def export_image(self, path):
|
||||
"""Export current images into a tar file."""
|
||||
return self._loop.run_in_executor(None, self._export_image, path)
|
||||
|
||||
def _export_image(self, tar_file):
|
||||
"""Export current images into a tar file.
|
||||
|
||||
Need run inside executor.
|
||||
"""
|
||||
try:
|
||||
image = self._docker.api.get_image(self.image)
|
||||
except docker.errors.DockerException as err:
|
||||
_LOGGER.error("Can't fetch image %s: %s", self.image, err)
|
||||
return False
|
||||
|
||||
try:
|
||||
with tar_file.open("wb") as write_tar:
|
||||
for chunk in image.stream():
|
||||
write_tar.write(chunk)
|
||||
except (OSError, requests.exceptions.ReadTimeout) as err:
|
||||
_LOGGER.error("Can't write tar file %s: %s", tar_file, err)
|
||||
return False
|
||||
|
||||
_LOGGER.info("Export image %s to %s", self.image, tar_file)
|
||||
return True
|
||||
|
||||
@docker_process
|
||||
def import_image(self, path, tag):
|
||||
"""Import a tar file as image."""
|
||||
return self._loop.run_in_executor(None, self._import_image, path, tag)
|
||||
|
||||
def _import_image(self, tar_file, tag):
|
||||
"""Import a tar file as image.
|
||||
|
||||
Need run inside executor.
|
||||
"""
|
||||
try:
|
||||
with tar_file.open("rb") as read_tar:
|
||||
self._docker.api.load_image(read_tar, quiet=True)
|
||||
|
||||
image = self._docker.images.get(self.image)
|
||||
image.tag(self.image, tag=tag)
|
||||
except (docker.errors.DockerException, OSError) as err:
|
||||
_LOGGER.error("Can't import image %s: %s", self.image, err)
|
||||
return False
|
||||
|
||||
_LOGGER.info("Import image %s and tag %s", tar_file, tag)
|
||||
self._meta = image.attrs
|
||||
self._cleanup()
|
||||
return True
|
||||
|
||||
def _restart(self):
|
||||
"""Restart docker container.
|
||||
|
||||
Addons prepare some thing on start and that is normaly not repeatable.
|
||||
Need run inside executor.
|
||||
"""
|
||||
self._stop()
|
||||
return self._run()
|
||||
|
||||
@docker_process
|
||||
def write_stdin(self, data):
|
||||
"""Write to add-on stdin."""
|
||||
return self._loop.run_in_executor(None, self._write_stdin, data)
|
||||
|
||||
def _write_stdin(self, data):
|
||||
"""Write to add-on stdin.
|
||||
|
||||
Need run inside executor.
|
||||
"""
|
||||
if not self._is_running():
|
||||
return False
|
||||
|
||||
try:
|
||||
# load needed docker objects
|
||||
container = self._docker.containers.get(self.name)
|
||||
socket = container.attach_socket(params={'stdin': 1, 'stream': 1})
|
||||
except docker.errors.DockerException as err:
|
||||
_LOGGER.error("Can't attach to %s stdin: %s", self.name, err)
|
||||
return False
|
||||
|
||||
try:
|
||||
# write to stdin
|
||||
data += b"\n"
|
||||
os.write(socket.fileno(), data)
|
||||
socket.close()
|
||||
except OSError as err:
|
||||
_LOGGER.error("Can't write to %s stdin: %s", self.name, err)
|
||||
return False
|
||||
|
||||
return True
|
111
hassio/docker/homeassistant.py
Normal file
111
hassio/docker/homeassistant.py
Normal file
@@ -0,0 +1,111 @@
|
||||
"""Init file for HassIO docker object."""
|
||||
import logging
|
||||
|
||||
import docker
|
||||
|
||||
from .interface import DockerInterface
|
||||
|
||||
_LOGGER = logging.getLogger(__name__)
|
||||
|
||||
HASS_DOCKER_NAME = 'homeassistant'
|
||||
|
||||
|
||||
class DockerHomeAssistant(DockerInterface):
|
||||
"""Docker hassio wrapper for HomeAssistant."""
|
||||
|
||||
@property
|
||||
def image(self):
|
||||
"""Return name of docker image."""
|
||||
return self._homeassistant.image
|
||||
|
||||
@property
|
||||
def name(self):
|
||||
"""Return name of docker container."""
|
||||
return HASS_DOCKER_NAME
|
||||
|
||||
@property
|
||||
def devices(self):
|
||||
"""Create list of special device to map into docker."""
|
||||
devices = []
|
||||
for device in self._hardware.serial_devices:
|
||||
devices.append(f"{device}:{device}:rwm")
|
||||
return devices or None
|
||||
|
||||
def _run(self):
|
||||
"""Run docker image.
|
||||
|
||||
Need run inside executor.
|
||||
"""
|
||||
if self._is_running():
|
||||
return False
|
||||
|
||||
# cleanup
|
||||
self._stop()
|
||||
|
||||
ret = self._docker.run(
|
||||
self.image,
|
||||
name=self.name,
|
||||
hostname=self.name,
|
||||
detach=True,
|
||||
privileged=True,
|
||||
init=True,
|
||||
devices=self.devices,
|
||||
network_mode='host',
|
||||
environment={
|
||||
'HASSIO': self._docker.network.supervisor,
|
||||
'TZ': self._config.timezone,
|
||||
'HASSIO_TOKEN': self._homeassistant.uuid,
|
||||
},
|
||||
volumes={
|
||||
str(self._config.path_extern_config):
|
||||
{'bind': '/config', 'mode': 'rw'},
|
||||
str(self._config.path_extern_ssl):
|
||||
{'bind': '/ssl', 'mode': 'ro'},
|
||||
str(self._config.path_extern_share):
|
||||
{'bind': '/share', 'mode': 'rw'},
|
||||
}
|
||||
)
|
||||
|
||||
if ret:
|
||||
_LOGGER.info("Start homeassistant %s with version %s",
|
||||
self.image, self.version)
|
||||
|
||||
return ret
|
||||
|
||||
def _execute_command(self, command):
|
||||
"""Create a temporary container and run command.
|
||||
|
||||
Need run inside executor.
|
||||
"""
|
||||
return self._docker.run_command(
|
||||
self.image,
|
||||
command,
|
||||
detach=True,
|
||||
stdout=True,
|
||||
stderr=True,
|
||||
environment={
|
||||
'TZ': self._config.timezone,
|
||||
},
|
||||
volumes={
|
||||
str(self._config.path_extern_config):
|
||||
{'bind': '/config', 'mode': 'ro'},
|
||||
str(self._config.path_extern_ssl):
|
||||
{'bind': '/ssl', 'mode': 'ro'},
|
||||
}
|
||||
)
|
||||
|
||||
def is_initialize(self):
|
||||
"""Return True if docker container exists."""
|
||||
return self._loop.run_in_executor(None, self._is_initialize)
|
||||
|
||||
def _is_initialize(self):
|
||||
"""Return True if docker container exists.
|
||||
|
||||
Need run inside executor.
|
||||
"""
|
||||
try:
|
||||
self._docker.containers.get(self.name)
|
||||
except docker.errors.DockerException:
|
||||
return False
|
||||
|
||||
return True
|
349
hassio/docker/interface.py
Normal file
349
hassio/docker/interface.py
Normal file
@@ -0,0 +1,349 @@
|
||||
"""Interface class for HassIO docker object."""
|
||||
import asyncio
|
||||
from contextlib import suppress
|
||||
import logging
|
||||
|
||||
import docker
|
||||
|
||||
from .utils import docker_process
|
||||
from .stats import DockerStats
|
||||
from ..const import LABEL_VERSION, LABEL_ARCH
|
||||
from ..coresys import CoreSysAttributes
|
||||
|
||||
_LOGGER = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class DockerInterface(CoreSysAttributes):
|
||||
"""Docker hassio interface."""
|
||||
|
||||
def __init__(self, coresys):
|
||||
"""Initialize docker base wrapper."""
|
||||
self.coresys = coresys
|
||||
self._meta = None
|
||||
self.lock = asyncio.Lock(loop=self._loop)
|
||||
|
||||
@property
|
||||
def timeout(self):
|
||||
"""Return timeout for docker actions."""
|
||||
return 30
|
||||
|
||||
@property
|
||||
def name(self):
|
||||
"""Return name of docker container."""
|
||||
return None
|
||||
|
||||
@property
|
||||
def image(self):
|
||||
"""Return name of docker image."""
|
||||
if not self._meta:
|
||||
return None
|
||||
return self._meta['Config']['Image']
|
||||
|
||||
@property
|
||||
def version(self):
|
||||
"""Return version of docker image."""
|
||||
if self._meta and LABEL_VERSION in self._meta['Config']['Labels']:
|
||||
return self._meta['Config']['Labels'][LABEL_VERSION]
|
||||
return None
|
||||
|
||||
@property
|
||||
def arch(self):
|
||||
"""Return arch of docker image."""
|
||||
if self._meta and LABEL_ARCH in self._meta['Config']['Labels']:
|
||||
return self._meta['Config']['Labels'][LABEL_ARCH]
|
||||
return None
|
||||
|
||||
@property
|
||||
def in_progress(self):
|
||||
"""Return True if a task is in progress."""
|
||||
return self.lock.locked()
|
||||
|
||||
@docker_process
|
||||
def install(self, tag):
|
||||
"""Pull docker image."""
|
||||
return self._loop.run_in_executor(None, self._install, tag)
|
||||
|
||||
def _install(self, tag):
|
||||
"""Pull docker image.
|
||||
|
||||
Need run inside executor.
|
||||
"""
|
||||
try:
|
||||
_LOGGER.info("Pull image %s tag %s.", self.image, tag)
|
||||
image = self._docker.images.pull(f"{self.image}:{tag}")
|
||||
|
||||
image.tag(self.image, tag='latest')
|
||||
self._meta = image.attrs
|
||||
except docker.errors.APIError as err:
|
||||
_LOGGER.error("Can't install %s:%s -> %s.", self.image, tag, err)
|
||||
return False
|
||||
|
||||
_LOGGER.info("Tag image %s with version %s as latest", self.image, tag)
|
||||
return True
|
||||
|
||||
def exists(self):
|
||||
"""Return True if docker image exists in local repo."""
|
||||
return self._loop.run_in_executor(None, self._exists)
|
||||
|
||||
def _exists(self):
|
||||
"""Return True if docker image exists in local repo.
|
||||
|
||||
Need run inside executor.
|
||||
"""
|
||||
try:
|
||||
image = self._docker.images.get(self.image)
|
||||
assert f"{self.image}:{self.version}" in image.tags
|
||||
except (docker.errors.DockerException, AssertionError):
|
||||
return False
|
||||
|
||||
return True
|
||||
|
||||
def is_running(self):
|
||||
"""Return True if docker is Running.
|
||||
|
||||
Return a Future.
|
||||
"""
|
||||
return self._loop.run_in_executor(None, self._is_running)
|
||||
|
||||
def _is_running(self):
|
||||
"""Return True if docker is Running.
|
||||
|
||||
Need run inside executor.
|
||||
"""
|
||||
try:
|
||||
container = self._docker.containers.get(self.name)
|
||||
image = self._docker.images.get(self.image)
|
||||
except docker.errors.DockerException:
|
||||
return False
|
||||
|
||||
# container is not running
|
||||
if container.status != 'running':
|
||||
return False
|
||||
|
||||
# we run on a old image, stop and start it
|
||||
if container.image.id != image.id:
|
||||
return False
|
||||
|
||||
return True
|
||||
|
||||
@docker_process
|
||||
def attach(self):
|
||||
"""Attach to running docker container."""
|
||||
return self._loop.run_in_executor(None, self._attach)
|
||||
|
||||
def _attach(self):
|
||||
"""Attach to running docker container.
|
||||
|
||||
Need run inside executor.
|
||||
"""
|
||||
try:
|
||||
if self.image:
|
||||
self._meta = self._docker.images.get(self.image).attrs
|
||||
else:
|
||||
self._meta = self._docker.containers.get(self.name).attrs
|
||||
except docker.errors.DockerException:
|
||||
return False
|
||||
|
||||
_LOGGER.info(
|
||||
"Attach to image %s with version %s", self.image, self.version)
|
||||
|
||||
return True
|
||||
|
||||
@docker_process
|
||||
def run(self):
|
||||
"""Run docker image."""
|
||||
return self._loop.run_in_executor(None, self._run)
|
||||
|
||||
def _run(self):
|
||||
"""Run docker image.
|
||||
|
||||
Need run inside executor.
|
||||
"""
|
||||
raise NotImplementedError()
|
||||
|
||||
@docker_process
|
||||
def stop(self):
|
||||
"""Stop/remove docker container."""
|
||||
return self._loop.run_in_executor(None, self._stop)
|
||||
|
||||
def _stop(self):
|
||||
"""Stop/remove and remove docker container.
|
||||
|
||||
Need run inside executor.
|
||||
"""
|
||||
try:
|
||||
container = self._docker.containers.get(self.name)
|
||||
except docker.errors.DockerException:
|
||||
return False
|
||||
|
||||
if container.status == 'running':
|
||||
_LOGGER.info("Stop %s docker application", self.image)
|
||||
with suppress(docker.errors.DockerException):
|
||||
container.stop(timeout=self.timeout)
|
||||
|
||||
with suppress(docker.errors.DockerException):
|
||||
_LOGGER.info("Clean %s docker application", self.image)
|
||||
container.remove(force=True)
|
||||
|
||||
return True
|
||||
|
||||
@docker_process
|
||||
def remove(self):
|
||||
"""Remove docker images."""
|
||||
return self._loop.run_in_executor(None, self._remove)
|
||||
|
||||
def _remove(self):
|
||||
"""remove docker images.
|
||||
|
||||
Need run inside executor.
|
||||
"""
|
||||
# cleanup container
|
||||
self._stop()
|
||||
|
||||
_LOGGER.info(
|
||||
"Remove docker %s with latest and %s", self.image, self.version)
|
||||
|
||||
try:
|
||||
with suppress(docker.errors.ImageNotFound):
|
||||
self._docker.images.remove(
|
||||
image=f"{self.image}:latest", force=True)
|
||||
|
||||
with suppress(docker.errors.ImageNotFound):
|
||||
self._docker.images.remove(
|
||||
image=f"{self.image}:{self.version}", force=True)
|
||||
|
||||
except docker.errors.DockerException as err:
|
||||
_LOGGER.warning("Can't remove image %s: %s", self.image, err)
|
||||
return False
|
||||
|
||||
self._meta = None
|
||||
return True
|
||||
|
||||
@docker_process
|
||||
def update(self, tag):
|
||||
"""Update a docker image."""
|
||||
return self._loop.run_in_executor(None, self._update, tag)
|
||||
|
||||
def _update(self, tag):
|
||||
"""Update a docker image.
|
||||
|
||||
Need run inside executor.
|
||||
"""
|
||||
_LOGGER.info(
|
||||
"Update docker %s with %s:%s", self.version, self.image, tag)
|
||||
|
||||
# update docker image
|
||||
if not self._install(tag):
|
||||
return False
|
||||
|
||||
# stop container & cleanup
|
||||
self._stop()
|
||||
self._cleanup()
|
||||
|
||||
return True
|
||||
|
||||
def logs(self):
|
||||
"""Return docker logs of container.
|
||||
|
||||
Return a Future.
|
||||
"""
|
||||
return self._loop.run_in_executor(None, self._logs)
|
||||
|
||||
def _logs(self):
|
||||
"""Return docker logs of container.
|
||||
|
||||
Need run inside executor.
|
||||
"""
|
||||
try:
|
||||
container = self._docker.containers.get(self.name)
|
||||
except docker.errors.DockerException:
|
||||
return b""
|
||||
|
||||
try:
|
||||
return container.logs(tail=100, stdout=True, stderr=True)
|
||||
except docker.errors.DockerException as err:
|
||||
_LOGGER.warning("Can't grap logs from %s: %s", self.image, err)
|
||||
|
||||
@docker_process
|
||||
def restart(self):
|
||||
"""Restart docker container."""
|
||||
return self._loop.run_in_executor(None, self._restart)
|
||||
|
||||
def _restart(self):
|
||||
"""Restart docker container.
|
||||
|
||||
Need run inside executor.
|
||||
"""
|
||||
try:
|
||||
container = self._docker.containers.get(self.name)
|
||||
except docker.errors.DockerException:
|
||||
return False
|
||||
|
||||
_LOGGER.info("Restart %s", self.image)
|
||||
|
||||
try:
|
||||
container.restart(timeout=self.timeout)
|
||||
except docker.errors.DockerException as err:
|
||||
_LOGGER.warning("Can't restart %s: %s", self.image, err)
|
||||
return False
|
||||
|
||||
return True
|
||||
|
||||
@docker_process
|
||||
def cleanup(self):
|
||||
"""Check if old version exists and cleanup."""
|
||||
return self._loop.run_in_executor(None, self._cleanup)
|
||||
|
||||
def _cleanup(self):
|
||||
"""Check if old version exists and cleanup.
|
||||
|
||||
Need run inside executor.
|
||||
"""
|
||||
try:
|
||||
latest = self._docker.images.get(self.image)
|
||||
except docker.errors.DockerException:
|
||||
_LOGGER.warning("Can't find %s for cleanup", self.image)
|
||||
return False
|
||||
|
||||
for image in self._docker.images.list(name=self.image):
|
||||
if latest.id == image.id:
|
||||
continue
|
||||
|
||||
with suppress(docker.errors.DockerException):
|
||||
_LOGGER.info("Cleanup docker images: %s", image.tags)
|
||||
self._docker.images.remove(image.id, force=True)
|
||||
|
||||
return True
|
||||
|
||||
@docker_process
|
||||
def execute_command(self, command):
|
||||
"""Create a temporary container and run command."""
|
||||
return self._loop.run_in_executor(None, self._execute_command, command)
|
||||
|
||||
def _execute_command(self, command):
|
||||
"""Create a temporary container and run command.
|
||||
|
||||
Need run inside executor.
|
||||
"""
|
||||
raise NotImplementedError()
|
||||
|
||||
def stats(self):
|
||||
"""Read and return stats from container."""
|
||||
return self._loop.run_in_executor(None, self._stats)
|
||||
|
||||
def _stats(self):
|
||||
"""Create a temporary container and run command.
|
||||
|
||||
Need run inside executor.
|
||||
"""
|
||||
try:
|
||||
container = self._docker.containers.get(self.name)
|
||||
except docker.errors.DockerException:
|
||||
return None
|
||||
|
||||
try:
|
||||
stats = container.stats(stream=False)
|
||||
return DockerStats(stats)
|
||||
except docker.errors.DockerException as err:
|
||||
_LOGGER.error("Can't read stats from %s: %s", self.name, err)
|
||||
return None
|
93
hassio/docker/network.py
Normal file
93
hassio/docker/network.py
Normal file
@@ -0,0 +1,93 @@
|
||||
"""Internal network manager for HassIO."""
|
||||
import logging
|
||||
|
||||
import docker
|
||||
|
||||
from ..const import DOCKER_NETWORK_MASK, DOCKER_NETWORK, DOCKER_NETWORK_RANGE
|
||||
|
||||
_LOGGER = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class DockerNetwork(object):
|
||||
"""Internal HassIO Network.
|
||||
|
||||
This class is not AsyncIO safe!
|
||||
"""
|
||||
|
||||
def __init__(self, dock):
|
||||
"""Initialize internal hassio network."""
|
||||
self.docker = dock
|
||||
self.network = self._get_network()
|
||||
|
||||
@property
|
||||
def name(self):
|
||||
"""Return name of network."""
|
||||
return DOCKER_NETWORK
|
||||
|
||||
@property
|
||||
def containers(self):
|
||||
"""Return of connected containers from network."""
|
||||
return self.network.containers
|
||||
|
||||
@property
|
||||
def gateway(self):
|
||||
"""Return gateway of the network."""
|
||||
return DOCKER_NETWORK_MASK[1]
|
||||
|
||||
@property
|
||||
def supervisor(self):
|
||||
"""Return supervisor of the network."""
|
||||
return DOCKER_NETWORK_MASK[2]
|
||||
|
||||
def _get_network(self):
|
||||
"""Get HassIO network."""
|
||||
try:
|
||||
return self.docker.networks.get(DOCKER_NETWORK)
|
||||
except docker.errors.NotFound:
|
||||
_LOGGER.info("Can't find HassIO network, create new network")
|
||||
|
||||
ipam_pool = docker.types.IPAMPool(
|
||||
subnet=str(DOCKER_NETWORK_MASK),
|
||||
gateway=str(self.gateway),
|
||||
iprange=str(DOCKER_NETWORK_RANGE)
|
||||
)
|
||||
|
||||
ipam_config = docker.types.IPAMConfig(pool_configs=[ipam_pool])
|
||||
|
||||
return self.docker.networks.create(
|
||||
DOCKER_NETWORK, driver='bridge', ipam=ipam_config,
|
||||
enable_ipv6=False, options={
|
||||
"com.docker.network.bridge.name": DOCKER_NETWORK,
|
||||
})
|
||||
|
||||
def attach_container(self, container, alias=None, ipv4=None):
|
||||
"""Attach container to hassio network.
|
||||
|
||||
Need run inside executor.
|
||||
"""
|
||||
ipv4 = str(ipv4) if ipv4 else None
|
||||
|
||||
try:
|
||||
self.network.connect(container, aliases=alias, ipv4_address=ipv4)
|
||||
except docker.errors.APIError as err:
|
||||
_LOGGER.error("Can't link container to hassio-net: %s", err)
|
||||
return False
|
||||
|
||||
self.network.reload()
|
||||
return True
|
||||
|
||||
def detach_default_bridge(self, container):
|
||||
"""Detach default docker bridge.
|
||||
|
||||
Need run inside executor.
|
||||
"""
|
||||
try:
|
||||
default_network = self.docker.networks.get('bridge')
|
||||
default_network.disconnect(container)
|
||||
|
||||
except docker.errors.NotFound:
|
||||
return
|
||||
|
||||
except docker.errors.APIError as err:
|
||||
_LOGGER.warning(
|
||||
"Can't disconnect container from default: %s", err)
|
90
hassio/docker/stats.py
Normal file
90
hassio/docker/stats.py
Normal file
@@ -0,0 +1,90 @@
|
||||
"""Calc & represent docker stats data."""
|
||||
from contextlib import suppress
|
||||
|
||||
|
||||
class DockerStats(object):
|
||||
"""Hold stats data from container inside."""
|
||||
|
||||
def __init__(self, stats):
|
||||
"""Initialize docker stats."""
|
||||
self._cpu = 0.0
|
||||
self._network_rx = 0
|
||||
self._network_tx = 0
|
||||
self._blk_read = 0
|
||||
self._blk_write = 0
|
||||
|
||||
try:
|
||||
self._memory_usage = stats['memory_stats']['usage']
|
||||
self._memory_limit = stats['memory_stats']['limit']
|
||||
except KeyError:
|
||||
self._memory_usage = 0
|
||||
self._memory_limit = 0
|
||||
|
||||
with suppress(KeyError):
|
||||
self._calc_cpu_percent(stats)
|
||||
|
||||
with suppress(KeyError):
|
||||
self._calc_network(stats['networks'])
|
||||
|
||||
with suppress(KeyError):
|
||||
self._calc_block_io(stats['blkio_stats'])
|
||||
|
||||
def _calc_cpu_percent(self, stats):
|
||||
"""Calculate CPU percent."""
|
||||
cpu_delta = stats['cpu_stats']['cpu_usage']['total_usage'] - \
|
||||
stats['precpu_stats']['cpu_usage']['total_usage']
|
||||
system_delta = stats['cpu_stats']['system_cpu_usage'] - \
|
||||
stats['precpu_stats']['system_cpu_usage']
|
||||
|
||||
if system_delta > 0.0 and cpu_delta > 0.0:
|
||||
self._cpu = (cpu_delta / system_delta) * \
|
||||
len(stats['cpu_stats']['cpu_usage']['percpu_usage']) * 100.0
|
||||
|
||||
def _calc_network(self, networks):
|
||||
"""Calculate Network IO stats."""
|
||||
for _, stats in networks.items():
|
||||
self._network_rx += stats['rx_bytes']
|
||||
self._network_tx += stats['tx_bytes']
|
||||
|
||||
def _calc_block_io(self, blkio):
|
||||
"""Calculate block IO stats."""
|
||||
for stats in blkio['io_service_bytes_recursive']:
|
||||
if stats['op'] == 'Read':
|
||||
self._blk_read += stats['value']
|
||||
elif stats['op'] == 'Write':
|
||||
self._blk_write += stats['value']
|
||||
|
||||
@property
|
||||
def cpu_percent(self):
|
||||
"""Return CPU percent."""
|
||||
return self._cpu
|
||||
|
||||
@property
|
||||
def memory_usage(self):
|
||||
"""Return memory usage."""
|
||||
return self._memory_usage
|
||||
|
||||
@property
|
||||
def memory_limit(self):
|
||||
"""Return memory limit."""
|
||||
return self._memory_limit
|
||||
|
||||
@property
|
||||
def network_rx(self):
|
||||
"""Return network rx stats."""
|
||||
return self._network_rx
|
||||
|
||||
@property
|
||||
def network_tx(self):
|
||||
"""Return network rx stats."""
|
||||
return self._network_tx
|
||||
|
||||
@property
|
||||
def blk_read(self):
|
||||
"""Return block IO read stats."""
|
||||
return self._blk_read
|
||||
|
||||
@property
|
||||
def blk_write(self):
|
||||
"""Return block IO write stats."""
|
||||
return self._blk_write
|
41
hassio/docker/supervisor.py
Normal file
41
hassio/docker/supervisor.py
Normal file
@@ -0,0 +1,41 @@
|
||||
"""Init file for HassIO docker object."""
|
||||
import logging
|
||||
import os
|
||||
|
||||
import docker
|
||||
|
||||
from .interface import DockerInterface
|
||||
from ..coresys import CoreSysAttributes
|
||||
|
||||
_LOGGER = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class DockerSupervisor(DockerInterface, CoreSysAttributes):
|
||||
"""Docker hassio wrapper for HomeAssistant."""
|
||||
|
||||
@property
|
||||
def name(self):
|
||||
"""Return name of docker container."""
|
||||
return os.environ['SUPERVISOR_NAME']
|
||||
|
||||
def _attach(self):
|
||||
"""Attach to running docker container.
|
||||
|
||||
Need run inside executor.
|
||||
"""
|
||||
try:
|
||||
container = self._docker.containers.get(self.name)
|
||||
except docker.errors.DockerException:
|
||||
return False
|
||||
|
||||
self._meta = container.attrs
|
||||
_LOGGER.info("Attach to supervisor %s with version %s",
|
||||
self.image, self.version)
|
||||
|
||||
# if already attach
|
||||
if container in self._docker.network.containers:
|
||||
return True
|
||||
|
||||
# attach to network
|
||||
return self._docker.network.attach_container(
|
||||
container, alias=['hassio'], ipv4=self._docker.network.supervisor)
|
20
hassio/docker/utils.py
Normal file
20
hassio/docker/utils.py
Normal file
@@ -0,0 +1,20 @@
|
||||
"""HassIO docker utilitys."""
|
||||
import logging
|
||||
|
||||
_LOGGER = logging.getLogger(__name__)
|
||||
|
||||
|
||||
# pylint: disable=protected-access
|
||||
def docker_process(method):
|
||||
"""Wrap function with only run once."""
|
||||
async def wrap_api(api, *args, **kwargs):
|
||||
"""Return api wrapper."""
|
||||
if api.lock.locked():
|
||||
_LOGGER.error(
|
||||
"Can't excute %s while a task is in progress", method.__name__)
|
||||
return False
|
||||
|
||||
async with api.lock:
|
||||
return await method(api, *args, **kwargs)
|
||||
|
||||
return wrap_api
|
287
hassio/homeassistant.py
Normal file
287
hassio/homeassistant.py
Normal file
@@ -0,0 +1,287 @@
|
||||
"""HomeAssistant control object."""
|
||||
import asyncio
|
||||
import logging
|
||||
import os
|
||||
import re
|
||||
|
||||
import aiohttp
|
||||
from aiohttp.hdrs import CONTENT_TYPE
|
||||
|
||||
from .const import (
|
||||
FILE_HASSIO_HOMEASSISTANT, ATTR_IMAGE, ATTR_LAST_VERSION, ATTR_UUID,
|
||||
ATTR_BOOT, ATTR_PASSWORD, ATTR_PORT, ATTR_SSL, ATTR_WATCHDOG,
|
||||
HEADER_HA_ACCESS, CONTENT_TYPE_JSON)
|
||||
from .coresys import CoreSysAttributes
|
||||
from .docker.homeassistant import DockerHomeAssistant
|
||||
from .utils import convert_to_ascii
|
||||
from .utils.json import JsonConfig
|
||||
from .validate import SCHEMA_HASS_CONFIG
|
||||
|
||||
_LOGGER = logging.getLogger(__name__)
|
||||
|
||||
RE_YAML_ERROR = re.compile(r"homeassistant\.util\.yaml")
|
||||
|
||||
|
||||
class HomeAssistant(JsonConfig, CoreSysAttributes):
|
||||
"""Hass core object for handle it."""
|
||||
|
||||
def __init__(self, coresys):
|
||||
"""Initialize hass object."""
|
||||
super().__init__(FILE_HASSIO_HOMEASSISTANT, SCHEMA_HASS_CONFIG)
|
||||
self.coresys = coresys
|
||||
self.instance = DockerHomeAssistant(coresys)
|
||||
|
||||
async def load(self):
|
||||
"""Prepare HomeAssistant object."""
|
||||
if await self.instance.attach():
|
||||
return
|
||||
|
||||
_LOGGER.info("No HomeAssistant docker %s found.", self.image)
|
||||
await self.install_landingpage()
|
||||
|
||||
@property
|
||||
def api_ip(self):
|
||||
"""Return IP of HomeAssistant instance."""
|
||||
return self._docker.network.gateway
|
||||
|
||||
@property
|
||||
def api_port(self):
|
||||
"""Return network port to home-assistant instance."""
|
||||
return self._data[ATTR_PORT]
|
||||
|
||||
@api_port.setter
|
||||
def api_port(self, value):
|
||||
"""Set network port for home-assistant instance."""
|
||||
self._data[ATTR_PORT] = value
|
||||
|
||||
@property
|
||||
def api_password(self):
|
||||
"""Return password for home-assistant instance."""
|
||||
return self._data.get(ATTR_PASSWORD)
|
||||
|
||||
@api_password.setter
|
||||
def api_password(self, value):
|
||||
"""Set password for home-assistant instance."""
|
||||
self._data[ATTR_PASSWORD] = value
|
||||
|
||||
@property
|
||||
def api_ssl(self):
|
||||
"""Return if we need ssl to home-assistant instance."""
|
||||
return self._data[ATTR_SSL]
|
||||
|
||||
@api_ssl.setter
|
||||
def api_ssl(self, value):
|
||||
"""Set SSL for home-assistant instance."""
|
||||
self._data[ATTR_SSL] = value
|
||||
|
||||
@property
|
||||
def api_url(self):
|
||||
"""Return API url to Home-Assistant."""
|
||||
return "{}://{}:{}".format(
|
||||
'https' if self.api_ssl else 'http', self.api_ip, self.api_port
|
||||
)
|
||||
|
||||
@property
|
||||
def watchdog(self):
|
||||
"""Return True if the watchdog should protect Home-Assistant."""
|
||||
return self._data[ATTR_WATCHDOG]
|
||||
|
||||
@watchdog.setter
|
||||
def watchdog(self, value):
|
||||
"""Return True if the watchdog should protect Home-Assistant."""
|
||||
self._data[ATTR_WATCHDOG] = value
|
||||
|
||||
@property
|
||||
def version(self):
|
||||
"""Return version of running homeassistant."""
|
||||
return self.instance.version
|
||||
|
||||
@property
|
||||
def last_version(self):
|
||||
"""Return last available version of homeassistant."""
|
||||
if self.is_custom_image:
|
||||
return self._data.get(ATTR_LAST_VERSION)
|
||||
return self._updater.version_homeassistant
|
||||
|
||||
@last_version.setter
|
||||
def last_version(self, value):
|
||||
"""Set last available version of homeassistant."""
|
||||
if value:
|
||||
self._data[ATTR_LAST_VERSION] = value
|
||||
else:
|
||||
self._data.pop(ATTR_LAST_VERSION, None)
|
||||
|
||||
@property
|
||||
def image(self):
|
||||
"""Return image name of hass containter."""
|
||||
if self._data.get(ATTR_IMAGE):
|
||||
return self._data[ATTR_IMAGE]
|
||||
return os.environ['HOMEASSISTANT_REPOSITORY']
|
||||
|
||||
@image.setter
|
||||
def image(self, value):
|
||||
"""Set image name of hass containter."""
|
||||
if value:
|
||||
self._data[ATTR_IMAGE] = value
|
||||
else:
|
||||
self._data.pop(ATTR_IMAGE, None)
|
||||
|
||||
@property
|
||||
def is_custom_image(self):
|
||||
"""Return True if a custom image is used."""
|
||||
return all(attr in self._data for attr in
|
||||
(ATTR_IMAGE, ATTR_LAST_VERSION))
|
||||
|
||||
@property
|
||||
def boot(self):
|
||||
"""Return True if home-assistant boot is enabled."""
|
||||
return self._data[ATTR_BOOT]
|
||||
|
||||
@boot.setter
|
||||
def boot(self, value):
|
||||
"""Set home-assistant boot options."""
|
||||
self._data[ATTR_BOOT] = value
|
||||
|
||||
@property
|
||||
def uuid(self):
|
||||
"""Return a UUID of this HomeAssistant."""
|
||||
return self._data[ATTR_UUID]
|
||||
|
||||
async def install_landingpage(self):
|
||||
"""Install a landingpage."""
|
||||
_LOGGER.info("Setup HomeAssistant landingpage")
|
||||
while True:
|
||||
if await self.instance.install('landingpage'):
|
||||
break
|
||||
_LOGGER.warning("Fails install landingpage, retry after 60sec")
|
||||
await asyncio.sleep(60, loop=self._loop)
|
||||
|
||||
# run landingpage after installation
|
||||
await self.instance.run()
|
||||
|
||||
async def install(self):
|
||||
"""Install a landingpage."""
|
||||
_LOGGER.info("Setup HomeAssistant")
|
||||
while True:
|
||||
# read homeassistant tag and install it
|
||||
if not self.last_version:
|
||||
await self._updater.reload()
|
||||
|
||||
tag = self.last_version
|
||||
if tag and await self.instance.install(tag):
|
||||
break
|
||||
_LOGGER.warning("Error on install HomeAssistant. Retry in 60sec")
|
||||
await asyncio.sleep(60, loop=self._loop)
|
||||
|
||||
# finishing
|
||||
_LOGGER.info("HomeAssistant docker now installed")
|
||||
if self.boot:
|
||||
await self.instance.run()
|
||||
await self.instance.cleanup()
|
||||
|
||||
async def update(self, version=None):
|
||||
"""Update HomeAssistant version."""
|
||||
version = version or self.last_version
|
||||
running = await self.instance.is_running()
|
||||
exists = await self.instance.exists()
|
||||
|
||||
if exists and version == self.instance.version:
|
||||
_LOGGER.info("Version %s is already installed", version)
|
||||
return False
|
||||
|
||||
try:
|
||||
return await self.instance.update(version)
|
||||
finally:
|
||||
if running:
|
||||
await self.instance.run()
|
||||
|
||||
def run(self):
|
||||
"""Run HomeAssistant docker.
|
||||
|
||||
Return a coroutine.
|
||||
"""
|
||||
return self.instance.run()
|
||||
|
||||
def stop(self):
|
||||
"""Stop HomeAssistant docker.
|
||||
|
||||
Return a coroutine.
|
||||
"""
|
||||
return self.instance.stop()
|
||||
|
||||
def restart(self):
|
||||
"""Restart HomeAssistant docker.
|
||||
|
||||
Return a coroutine.
|
||||
"""
|
||||
return self.instance.restart()
|
||||
|
||||
def logs(self):
|
||||
"""Get HomeAssistant docker logs.
|
||||
|
||||
Return a coroutine.
|
||||
"""
|
||||
return self.instance.logs()
|
||||
|
||||
def stats(self):
|
||||
"""Return stats of HomeAssistant.
|
||||
|
||||
Return a coroutine.
|
||||
"""
|
||||
return self.instance.stats()
|
||||
|
||||
def is_running(self):
|
||||
"""Return True if docker container is running.
|
||||
|
||||
Return a coroutine.
|
||||
"""
|
||||
return self.instance.is_running()
|
||||
|
||||
def is_initialize(self):
|
||||
"""Return True if a docker container is exists.
|
||||
|
||||
Return a coroutine.
|
||||
"""
|
||||
return self.instance.is_initialize()
|
||||
|
||||
@property
|
||||
def in_progress(self):
|
||||
"""Return True if a task is in progress."""
|
||||
return self.instance.in_progress
|
||||
|
||||
async def check_config(self):
|
||||
"""Run homeassistant config check."""
|
||||
exit_code, log = await self.instance.execute_command(
|
||||
"python3 -m homeassistant -c /config --script check_config"
|
||||
)
|
||||
|
||||
# if not valid
|
||||
if exit_code is None:
|
||||
return (False, "")
|
||||
|
||||
# parse output
|
||||
log = convert_to_ascii(log)
|
||||
if exit_code != 0 or RE_YAML_ERROR.search(log):
|
||||
return (False, log)
|
||||
return (True, log)
|
||||
|
||||
async def check_api_state(self):
|
||||
"""Check if Home-Assistant up and running."""
|
||||
url = f"{self.api_url}/api/"
|
||||
header = {CONTENT_TYPE: CONTENT_TYPE_JSON}
|
||||
|
||||
if self.api_password:
|
||||
header.update({HEADER_HA_ACCESS: self.api_password})
|
||||
|
||||
try:
|
||||
# pylint: disable=bad-continuation
|
||||
async with self._websession_ssl.get(
|
||||
url, headers=header, timeout=30) as request:
|
||||
status = request.status
|
||||
|
||||
except (asyncio.TimeoutError, aiohttp.ClientError):
|
||||
return False
|
||||
|
||||
if status not in (200, 201):
|
||||
_LOGGER.warning("Home-Assistant API config missmatch")
|
||||
return True
|
1
hassio/misc/__init__.py
Normal file
1
hassio/misc/__init__.py
Normal file
@@ -0,0 +1 @@
|
||||
"""Special object and tools for Hass.io."""
|
42
hassio/misc/dns.py
Normal file
42
hassio/misc/dns.py
Normal file
@@ -0,0 +1,42 @@
|
||||
"""Setup the internal DNS service for host applications."""
|
||||
import asyncio
|
||||
import logging
|
||||
import shlex
|
||||
|
||||
_LOGGER = logging.getLogger(__name__)
|
||||
|
||||
COMMAND = "socat UDP-RECVFROM:53,fork UDP-SENDTO:127.0.0.11:53"
|
||||
|
||||
|
||||
class DNSForward(object):
|
||||
"""Manage DNS forwarding to internal DNS."""
|
||||
|
||||
def __init__(self, loop):
|
||||
"""Initialize DNS forwarding."""
|
||||
self.loop = loop
|
||||
self.proc = None
|
||||
|
||||
async def start(self):
|
||||
"""Start DNS forwarding."""
|
||||
try:
|
||||
self.proc = await asyncio.create_subprocess_exec(
|
||||
*shlex.split(COMMAND),
|
||||
stdin=asyncio.subprocess.DEVNULL,
|
||||
stdout=asyncio.subprocess.DEVNULL,
|
||||
stderr=asyncio.subprocess.DEVNULL,
|
||||
loop=self.loop
|
||||
)
|
||||
except OSError as err:
|
||||
_LOGGER.error("Can't start DNS forwarding: %s", err)
|
||||
else:
|
||||
_LOGGER.info("Start DNS port forwarding for host add-ons")
|
||||
|
||||
async def stop(self):
|
||||
"""Stop DNS forwarding."""
|
||||
if not self.proc:
|
||||
_LOGGER.warning("DNS forwarding is not running!")
|
||||
return
|
||||
|
||||
self.proc.kill()
|
||||
await self.proc.wait()
|
||||
_LOGGER.info("Stop DNS forwarding")
|
121
hassio/misc/hardware.py
Normal file
121
hassio/misc/hardware.py
Normal file
@@ -0,0 +1,121 @@
|
||||
"""Read hardware info from system."""
|
||||
from datetime import datetime
|
||||
import logging
|
||||
from pathlib import Path
|
||||
import re
|
||||
|
||||
import pyudev
|
||||
|
||||
from ..const import ATTR_NAME, ATTR_TYPE, ATTR_DEVICES
|
||||
|
||||
_LOGGER = logging.getLogger(__name__)
|
||||
|
||||
ASOUND_CARDS = Path("/proc/asound/cards")
|
||||
RE_CARDS = re.compile(r"(\d+) \[(\w*) *\]: (.*\w)")
|
||||
|
||||
ASOUND_DEVICES = Path("/proc/asound/devices")
|
||||
RE_DEVICES = re.compile(r"\[.*(\d+)- (\d+).*\]: ([\w ]*)")
|
||||
|
||||
PROC_STAT = Path("/proc/stat")
|
||||
RE_BOOT_TIME = re.compile(r"btime (\d+)")
|
||||
|
||||
GPIO_DEVICES = Path("/sys/class/gpio")
|
||||
RE_TTY = re.compile(r"tty[A-Z]+")
|
||||
|
||||
|
||||
class Hardware(object):
|
||||
"""Represent a interface to procfs, sysfs and udev."""
|
||||
|
||||
def __init__(self):
|
||||
"""Init hardware object."""
|
||||
self.context = pyudev.Context()
|
||||
|
||||
@property
|
||||
def serial_devices(self):
|
||||
"""Return all serial and connected devices."""
|
||||
dev_list = set()
|
||||
for device in self.context.list_devices(subsystem='tty'):
|
||||
if 'ID_VENDOR' in device or RE_TTY.search(device.device_node):
|
||||
dev_list.add(device.device_node)
|
||||
|
||||
return dev_list
|
||||
|
||||
@property
|
||||
def input_devices(self):
|
||||
"""Return all input devices."""
|
||||
dev_list = set()
|
||||
for device in self.context.list_devices(subsystem='input'):
|
||||
if 'NAME' in device:
|
||||
dev_list.add(device['NAME'].replace('"', ''))
|
||||
|
||||
return dev_list
|
||||
|
||||
@property
|
||||
def disk_devices(self):
|
||||
"""Return all disk devices."""
|
||||
dev_list = set()
|
||||
for device in self.context.list_devices(subsystem='block'):
|
||||
if device.device_node.startswith('/dev/sd'):
|
||||
dev_list.add(device.device_node)
|
||||
|
||||
return dev_list
|
||||
|
||||
@property
|
||||
def audio_devices(self):
|
||||
"""Return all available audio interfaces."""
|
||||
try:
|
||||
with ASOUND_CARDS.open('r') as cards_file:
|
||||
cards = cards_file.read()
|
||||
with ASOUND_DEVICES.open('r') as devices_file:
|
||||
devices = devices_file.read()
|
||||
except OSError as err:
|
||||
_LOGGER.error("Can't read asound data: %s", err)
|
||||
return {}
|
||||
|
||||
audio_list = {}
|
||||
|
||||
# parse cards
|
||||
for match in RE_CARDS.finditer(cards):
|
||||
audio_list[match.group(1)] = {
|
||||
ATTR_NAME: match.group(3),
|
||||
ATTR_TYPE: match.group(2),
|
||||
ATTR_DEVICES: {},
|
||||
}
|
||||
|
||||
# parse devices
|
||||
for match in RE_DEVICES.finditer(devices):
|
||||
try:
|
||||
audio_list[match.group(1)][ATTR_DEVICES][match.group(2)] = \
|
||||
match.group(3)
|
||||
except KeyError:
|
||||
_LOGGER.warning("Wrong audio device found %s", match.group(0))
|
||||
continue
|
||||
|
||||
return audio_list
|
||||
|
||||
@property
|
||||
def gpio_devices(self):
|
||||
"""Return list of GPIO interface on device."""
|
||||
dev_list = set()
|
||||
for interface in GPIO_DEVICES.glob("gpio*"):
|
||||
dev_list.add(interface.name)
|
||||
|
||||
return dev_list
|
||||
|
||||
@property
|
||||
def last_boot(self):
|
||||
"""Return last boot time."""
|
||||
try:
|
||||
with PROC_STAT.open("r") as stat_file:
|
||||
stats = stat_file.read()
|
||||
except OSError as err:
|
||||
_LOGGER.error("Can't read stat data: %s", err)
|
||||
return None
|
||||
|
||||
# parse stat file
|
||||
found = RE_BOOT_TIME.search(stats)
|
||||
if not found:
|
||||
_LOGGER.error("Can't found last boot time!")
|
||||
return None
|
||||
|
||||
return datetime.utcfromtimestamp(int(found.group(1)))
|
@@ -5,7 +5,7 @@ import logging
|
||||
|
||||
import async_timeout
|
||||
|
||||
from .const import (
|
||||
from ..const import (
|
||||
SOCKET_HC, ATTR_LAST_VERSION, ATTR_VERSION, ATTR_TYPE, ATTR_FEATURES,
|
||||
ATTR_HOSTNAME, ATTR_OS)
|
||||
|
||||
@@ -17,6 +17,7 @@ UNKNOWN = 'unknown'
|
||||
FEATURES_SHUTDOWN = 'shutdown'
|
||||
FEATURES_REBOOT = 'reboot'
|
||||
FEATURES_UPDATE = 'update'
|
||||
FEATURES_HOSTNAME = 'hostname'
|
||||
FEATURES_NETWORK_INFO = 'network_info'
|
||||
FEATURES_NETWORK_CONTROL = 'network_control'
|
||||
|
||||
@@ -117,3 +118,7 @@ class HostControl(object):
|
||||
if version:
|
||||
return self._send_command("update {}".format(version))
|
||||
return self._send_command("update")
|
||||
|
||||
def set_hostname(self, hostname):
|
||||
"""Update hostname on host."""
|
||||
return self._send_command("hostname {}".format(hostname))
|
75
hassio/misc/scheduler.py
Normal file
75
hassio/misc/scheduler.py
Normal file
@@ -0,0 +1,75 @@
|
||||
"""Schedule for HassIO."""
|
||||
import logging
|
||||
from datetime import date, datetime, time, timedelta
|
||||
|
||||
_LOGGER = logging.getLogger(__name__)
|
||||
|
||||
INTERVAL = 'interval'
|
||||
REPEAT = 'repeat'
|
||||
CALL = 'callback'
|
||||
TASK = 'task'
|
||||
|
||||
|
||||
class Scheduler(object):
|
||||
"""Schedule task inside HassIO."""
|
||||
|
||||
def __init__(self, loop):
|
||||
"""Initialize task schedule."""
|
||||
self.loop = loop
|
||||
self._data = {}
|
||||
self.suspend = False
|
||||
|
||||
def register_task(self, coro_callback, interval, repeat=True):
|
||||
"""Schedule a coroutine.
|
||||
|
||||
The coroutien need to be a callback without arguments.
|
||||
"""
|
||||
task_id = hash(coro_callback)
|
||||
|
||||
# generate data
|
||||
opts = {
|
||||
CALL: coro_callback,
|
||||
INTERVAL: interval,
|
||||
REPEAT: repeat,
|
||||
}
|
||||
|
||||
# schedule task
|
||||
self._data[task_id] = opts
|
||||
self._schedule_task(interval, task_id)
|
||||
|
||||
return task_id
|
||||
|
||||
def _run_task(self, task_id):
|
||||
"""Run a scheduled task."""
|
||||
data = self._data[task_id]
|
||||
|
||||
if not self.suspend:
|
||||
self.loop.create_task(data[CALL]())
|
||||
|
||||
if data[REPEAT]:
|
||||
self._schedule_task(data[INTERVAL], task_id)
|
||||
else:
|
||||
self._data.pop(task_id)
|
||||
|
||||
def _schedule_task(self, interval, task_id):
|
||||
"""Schedule a task on loop."""
|
||||
if isinstance(interval, (int, float)):
|
||||
job = self.loop.call_later(interval, self._run_task, task_id)
|
||||
elif isinstance(interval, time):
|
||||
today = datetime.combine(date.today(), interval)
|
||||
tomorrow = datetime.combine(
|
||||
date.today() + timedelta(days=1), interval)
|
||||
|
||||
# check if we run it today or next day
|
||||
if today > datetime.today():
|
||||
calc = today
|
||||
else:
|
||||
calc = tomorrow
|
||||
|
||||
job = self.loop.call_at(calc.timestamp(), self._run_task, task_id)
|
||||
else:
|
||||
_LOGGER.fatal("Unknow interval %s (type: %s) for scheduler %s",
|
||||
interval, type(interval), task_id)
|
||||
|
||||
# Store job
|
||||
self._data[task_id][TASK] = job
|
@@ -1,63 +0,0 @@
|
||||
"""Schedule for HassIO."""
|
||||
import logging
|
||||
|
||||
_LOGGER = logging.getLogger(__name__)
|
||||
|
||||
SEC = 'seconds'
|
||||
REPEAT = 'repeat'
|
||||
CALL = 'callback'
|
||||
TASK = 'task'
|
||||
|
||||
|
||||
class Scheduler(object):
|
||||
"""Schedule task inside HassIO."""
|
||||
|
||||
def __init__(self, loop):
|
||||
"""Initialize task schedule."""
|
||||
self.loop = loop
|
||||
self._data = {}
|
||||
self._stop = False
|
||||
|
||||
def stop(self):
|
||||
"""Stop to execute tasks in scheduler."""
|
||||
self._stop = True
|
||||
|
||||
def register_task(self, coro_callback, seconds, repeat=True,
|
||||
now=False):
|
||||
"""Schedule a coroutine.
|
||||
|
||||
The coroutien need to be a callback without arguments.
|
||||
"""
|
||||
idx = hash(coro_callback)
|
||||
|
||||
# generate data
|
||||
opts = {
|
||||
CALL: coro_callback,
|
||||
SEC: seconds,
|
||||
REPEAT: repeat,
|
||||
}
|
||||
self._data[idx] = opts
|
||||
|
||||
# schedule task
|
||||
if now:
|
||||
self._run_task(idx)
|
||||
else:
|
||||
task = self.loop.call_later(seconds, self._run_task, idx)
|
||||
self._data[idx][TASK] = task
|
||||
|
||||
return idx
|
||||
|
||||
def _run_task(self, idx):
|
||||
"""Run a scheduled task."""
|
||||
data = self._data.pop(idx)
|
||||
|
||||
# stop execute tasks
|
||||
if self._stop:
|
||||
return
|
||||
|
||||
self.loop.create_task(data[CALL]())
|
||||
|
||||
if data[REPEAT]:
|
||||
task = self.loop.call_later(data[SEC], self._run_task, idx)
|
||||
data[TASK] = task
|
||||
self._data[idx] = data
|
325
hassio/snapshots/__init__.py
Normal file
325
hassio/snapshots/__init__.py
Normal file
@@ -0,0 +1,325 @@
|
||||
"""Snapshot system control."""
|
||||
import asyncio
|
||||
from datetime import datetime
|
||||
import logging
|
||||
from pathlib import Path
|
||||
import tarfile
|
||||
|
||||
from .snapshot import Snapshot
|
||||
from .utils import create_slug
|
||||
from ..const import (
|
||||
ATTR_SLUG, FOLDER_HOMEASSISTANT, SNAPSHOT_FULL, SNAPSHOT_PARTIAL)
|
||||
from ..coresys import CoreSysAttributes
|
||||
|
||||
_LOGGER = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class SnapshotsManager(CoreSysAttributes):
|
||||
"""Manage snapshots."""
|
||||
|
||||
def __init__(self, coresys):
|
||||
"""Initialize a snapshot manager."""
|
||||
self.coresys = coresys
|
||||
self.snapshots_obj = {}
|
||||
self.lock = asyncio.Lock(loop=coresys.loop)
|
||||
|
||||
@property
|
||||
def list_snapshots(self):
|
||||
"""Return a list of all snapshot object."""
|
||||
return set(self.snapshots_obj.values())
|
||||
|
||||
def get(self, slug):
|
||||
"""Return snapshot object."""
|
||||
return self.snapshots_obj.get(slug)
|
||||
|
||||
def _create_snapshot(self, name, sys_type):
|
||||
"""Initialize a new snapshot object from name."""
|
||||
date_str = datetime.utcnow().isoformat()
|
||||
slug = create_slug(name, date_str)
|
||||
tar_file = Path(self._config.path_backup, "{}.tar".format(slug))
|
||||
|
||||
# init object
|
||||
snapshot = Snapshot(self.coresys, tar_file)
|
||||
snapshot.create(slug, name, date_str, sys_type)
|
||||
|
||||
# set general data
|
||||
snapshot.store_homeassistant()
|
||||
snapshot.store_repositories()
|
||||
|
||||
return snapshot
|
||||
|
||||
def load(self):
|
||||
"""Load exists snapshots data.
|
||||
|
||||
Return a coroutine.
|
||||
"""
|
||||
return self.reload()
|
||||
|
||||
async def reload(self):
|
||||
"""Load exists backups."""
|
||||
self.snapshots_obj = {}
|
||||
|
||||
async def _load_snapshot(tar_file):
|
||||
"""Internal function to load snapshot."""
|
||||
snapshot = Snapshot(self.coresys, tar_file)
|
||||
if await snapshot.load():
|
||||
self.snapshots_obj[snapshot.slug] = snapshot
|
||||
|
||||
tasks = [_load_snapshot(tar_file) for tar_file in
|
||||
self._config.path_backup.glob("*.tar")]
|
||||
|
||||
_LOGGER.info("Found %d snapshot files", len(tasks))
|
||||
if tasks:
|
||||
await asyncio.wait(tasks, loop=self._loop)
|
||||
|
||||
def remove(self, snapshot):
|
||||
"""Remove a snapshot."""
|
||||
try:
|
||||
snapshot.tar_file.unlink()
|
||||
self.snapshots_obj.pop(snapshot.slug, None)
|
||||
except OSError as err:
|
||||
_LOGGER.error("Can't remove snapshot %s: %s", snapshot.slug, err)
|
||||
return False
|
||||
|
||||
return True
|
||||
|
||||
async def do_snapshot_full(self, name=""):
|
||||
"""Create a full snapshot."""
|
||||
if self.lock.locked():
|
||||
_LOGGER.error("It is already a snapshot/restore process running")
|
||||
return False
|
||||
|
||||
snapshot = self._create_snapshot(name, SNAPSHOT_FULL)
|
||||
_LOGGER.info("Full-Snapshot %s start", snapshot.slug)
|
||||
try:
|
||||
self._scheduler.suspend = True
|
||||
await self.lock.acquire()
|
||||
|
||||
async with snapshot:
|
||||
# snapshot addons
|
||||
tasks = []
|
||||
for addon in self._addons.list_addons:
|
||||
if not addon.is_installed:
|
||||
continue
|
||||
tasks.append(snapshot.import_addon(addon))
|
||||
|
||||
if tasks:
|
||||
_LOGGER.info("Full-Snapshot %s run %d addons",
|
||||
snapshot.slug, len(tasks))
|
||||
await asyncio.wait(tasks, loop=self._loop)
|
||||
|
||||
# snapshot folders
|
||||
_LOGGER.info("Full-Snapshot %s store folders", snapshot.slug)
|
||||
await snapshot.store_folders()
|
||||
|
||||
except (OSError, ValueError, tarfile.TarError) as err:
|
||||
_LOGGER.info("Full-Snapshot %s error: %s", snapshot.slug, err)
|
||||
return False
|
||||
|
||||
else:
|
||||
_LOGGER.info("Full-Snapshot %s done", snapshot.slug)
|
||||
self.snapshots_obj[snapshot.slug] = snapshot
|
||||
return True
|
||||
|
||||
finally:
|
||||
self._scheduler.suspend = False
|
||||
self.lock.release()
|
||||
|
||||
async def do_snapshot_partial(self, name="", addons=None, folders=None):
|
||||
"""Create a partial snapshot."""
|
||||
if self.lock.locked():
|
||||
_LOGGER.error("It is already a snapshot/restore process running")
|
||||
return False
|
||||
|
||||
addons = addons or []
|
||||
folders = folders or []
|
||||
snapshot = self._create_snapshot(name, SNAPSHOT_PARTIAL)
|
||||
|
||||
_LOGGER.info("Partial-Snapshot %s start", snapshot.slug)
|
||||
try:
|
||||
self._scheduler.suspend = True
|
||||
await self.lock.acquire()
|
||||
|
||||
async with snapshot:
|
||||
# snapshot addons
|
||||
tasks = []
|
||||
for slug in addons:
|
||||
addon = self._addons.get(slug)
|
||||
if addon.is_installed:
|
||||
tasks.append(snapshot.import_addon(addon))
|
||||
|
||||
if tasks:
|
||||
_LOGGER.info("Partial-Snapshot %s run %d addons",
|
||||
snapshot.slug, len(tasks))
|
||||
await asyncio.wait(tasks, loop=self._loop)
|
||||
|
||||
# snapshot folders
|
||||
_LOGGER.info("Partial-Snapshot %s store folders %s",
|
||||
snapshot.slug, folders)
|
||||
await snapshot.store_folders(folders)
|
||||
|
||||
except (OSError, ValueError, tarfile.TarError) as err:
|
||||
_LOGGER.info("Partial-Snapshot %s error: %s", snapshot.slug, err)
|
||||
return False
|
||||
|
||||
else:
|
||||
_LOGGER.info("Partial-Snapshot %s done", snapshot.slug)
|
||||
self.snapshots_obj[snapshot.slug] = snapshot
|
||||
return True
|
||||
|
||||
finally:
|
||||
self._scheduler.suspend = False
|
||||
self.lock.release()
|
||||
|
||||
async def do_restore_full(self, snapshot):
|
||||
"""Restore a snapshot."""
|
||||
if self.lock.locked():
|
||||
_LOGGER.error("It is already a snapshot/restore process running")
|
||||
return False
|
||||
|
||||
if snapshot.sys_type != SNAPSHOT_FULL:
|
||||
_LOGGER.error(
|
||||
"Full-Restore %s is only a partial snapshot!", snapshot.slug)
|
||||
return False
|
||||
|
||||
_LOGGER.info("Full-Restore %s start", snapshot.slug)
|
||||
try:
|
||||
self._scheduler.suspend = True
|
||||
await self.lock.acquire()
|
||||
|
||||
async with snapshot:
|
||||
# stop system
|
||||
tasks = []
|
||||
tasks.append(self._homeassistant.stop())
|
||||
|
||||
for addon in self._addons.list_addons:
|
||||
if addon.is_installed:
|
||||
tasks.append(addon.stop())
|
||||
|
||||
await asyncio.wait(tasks, loop=self._loop)
|
||||
|
||||
# restore folders
|
||||
_LOGGER.info("Full-Restore %s restore folders", snapshot.slug)
|
||||
await snapshot.restore_folders()
|
||||
|
||||
# start homeassistant restore
|
||||
_LOGGER.info("Full-Restore %s restore Home-Assistant",
|
||||
snapshot.slug)
|
||||
snapshot.restore_homeassistant()
|
||||
task_hass = self._loop.create_task(
|
||||
self._homeassistant.update(snapshot.homeassistant_version))
|
||||
|
||||
# restore repositories
|
||||
_LOGGER.info("Full-Restore %s restore Repositories",
|
||||
snapshot.slug)
|
||||
await snapshot.restore_repositories()
|
||||
|
||||
# restore addons
|
||||
tasks = []
|
||||
actual_addons = \
|
||||
set(addon.slug for addon in self._addons.list_addons
|
||||
if addon.is_installed)
|
||||
restore_addons = \
|
||||
set(data[ATTR_SLUG] for data in snapshot.addons)
|
||||
remove_addons = actual_addons - restore_addons
|
||||
|
||||
_LOGGER.info("Full-Restore %s restore addons %s, remove %s",
|
||||
snapshot.slug, restore_addons, remove_addons)
|
||||
|
||||
for slug in remove_addons:
|
||||
addon = self._addons.get(slug)
|
||||
if addon:
|
||||
tasks.append(addon.uninstall())
|
||||
else:
|
||||
_LOGGER.warning("Can't remove addon %s", snapshot.slug)
|
||||
|
||||
for slug in restore_addons:
|
||||
addon = self._addons.get(slug)
|
||||
if addon:
|
||||
tasks.append(snapshot.export_addon(addon))
|
||||
else:
|
||||
_LOGGER.warning("Can't restore addon %s", slug)
|
||||
|
||||
if tasks:
|
||||
_LOGGER.info("Full-Restore %s restore addons tasks %d",
|
||||
snapshot.slug, len(tasks))
|
||||
await asyncio.wait(tasks, loop=self._loop)
|
||||
|
||||
# finish homeassistant task
|
||||
_LOGGER.info("Full-Restore %s wait until homeassistant ready",
|
||||
snapshot.slug)
|
||||
await task_hass
|
||||
await self._homeassistant.run()
|
||||
|
||||
except (OSError, ValueError, tarfile.TarError) as err:
|
||||
_LOGGER.info("Full-Restore %s error: %s", snapshot.slug, err)
|
||||
return False
|
||||
|
||||
else:
|
||||
_LOGGER.info("Full-Restore %s done", snapshot.slug)
|
||||
return True
|
||||
|
||||
finally:
|
||||
self._scheduler.suspend = False
|
||||
self.lock.release()
|
||||
|
||||
async def do_restore_partial(self, snapshot, homeassistant=False,
|
||||
addons=None, folders=None):
|
||||
"""Restore a snapshot."""
|
||||
if self.lock.locked():
|
||||
_LOGGER.error("It is already a snapshot/restore process running")
|
||||
return False
|
||||
|
||||
addons = addons or []
|
||||
folders = folders or []
|
||||
|
||||
_LOGGER.info("Partial-Restore %s start", snapshot.slug)
|
||||
try:
|
||||
self._scheduler.suspend = True
|
||||
await self.lock.acquire()
|
||||
|
||||
async with snapshot:
|
||||
tasks = []
|
||||
|
||||
if FOLDER_HOMEASSISTANT in folders:
|
||||
await self._homeassistant.stop()
|
||||
|
||||
if folders:
|
||||
_LOGGER.info("Partial-Restore %s restore folders %s",
|
||||
snapshot.slug, folders)
|
||||
await snapshot.restore_folders(folders)
|
||||
|
||||
if homeassistant:
|
||||
_LOGGER.info("Partial-Restore %s restore Home-Assistant",
|
||||
snapshot.slug)
|
||||
snapshot.restore_homeassistant()
|
||||
tasks.append(self._homeassistant.update(
|
||||
snapshot.homeassistant_version))
|
||||
|
||||
for slug in addons:
|
||||
addon = self._addons.get(slug)
|
||||
if addon:
|
||||
tasks.append(snapshot.export_addon(addon))
|
||||
else:
|
||||
_LOGGER.warning("Can't restore addon %s",
|
||||
snapshot.slug)
|
||||
|
||||
if tasks:
|
||||
_LOGGER.info("Partial-Restore %s run %d tasks",
|
||||
snapshot.slug, len(tasks))
|
||||
await asyncio.wait(tasks, loop=self._loop)
|
||||
|
||||
# make sure homeassistant run agen
|
||||
await self._homeassistant.run()
|
||||
|
||||
except (OSError, ValueError, tarfile.TarError) as err:
|
||||
_LOGGER.info("Partial-Restore %s error: %s", snapshot.slug, err)
|
||||
return False
|
||||
|
||||
else:
|
||||
_LOGGER.info("Partial-Restore %s done", snapshot.slug)
|
||||
return True
|
||||
|
||||
finally:
|
||||
self._scheduler.suspend = False
|
||||
self.lock.release()
|
380
hassio/snapshots/snapshot.py
Normal file
380
hassio/snapshots/snapshot.py
Normal file
@@ -0,0 +1,380 @@
|
||||
"""Represent a snapshot file."""
|
||||
import asyncio
|
||||
import json
|
||||
import logging
|
||||
from pathlib import Path
|
||||
import tarfile
|
||||
from tempfile import TemporaryDirectory
|
||||
|
||||
import voluptuous as vol
|
||||
from voluptuous.humanize import humanize_error
|
||||
|
||||
from .validate import SCHEMA_SNAPSHOT, ALL_FOLDERS
|
||||
from .utils import remove_folder
|
||||
from ..const import (
|
||||
ATTR_SLUG, ATTR_NAME, ATTR_DATE, ATTR_ADDONS, ATTR_REPOSITORIES,
|
||||
ATTR_HOMEASSISTANT, ATTR_FOLDERS, ATTR_VERSION, ATTR_TYPE, ATTR_IMAGE,
|
||||
ATTR_PORT, ATTR_SSL, ATTR_PASSWORD, ATTR_WATCHDOG, ATTR_BOOT,
|
||||
ATTR_LAST_VERSION)
|
||||
from ..coresys import CoreSysAttributes
|
||||
from ..utils.json import write_json_file
|
||||
|
||||
_LOGGER = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class Snapshot(CoreSysAttributes):
|
||||
"""A signle hassio snapshot."""
|
||||
|
||||
def __init__(self, coresys, tar_file):
|
||||
"""Initialize a snapshot."""
|
||||
self.coresys = coresys
|
||||
self.tar_file = tar_file
|
||||
self._data = {}
|
||||
self._tmp = None
|
||||
|
||||
@property
|
||||
def slug(self):
|
||||
"""Return snapshot slug."""
|
||||
return self._data.get(ATTR_SLUG)
|
||||
|
||||
@property
|
||||
def sys_type(self):
|
||||
"""Return snapshot type."""
|
||||
return self._data.get(ATTR_TYPE)
|
||||
|
||||
@property
|
||||
def name(self):
|
||||
"""Return snapshot name."""
|
||||
return self._data[ATTR_NAME]
|
||||
|
||||
@property
|
||||
def date(self):
|
||||
"""Return snapshot date."""
|
||||
return self._data[ATTR_DATE]
|
||||
|
||||
@property
|
||||
def addons(self):
|
||||
"""Return snapshot date."""
|
||||
return self._data[ATTR_ADDONS]
|
||||
|
||||
@property
|
||||
def folders(self):
|
||||
"""Return list of saved folders."""
|
||||
return self._data[ATTR_FOLDERS]
|
||||
|
||||
@property
|
||||
def repositories(self):
|
||||
"""Return snapshot date."""
|
||||
return self._data[ATTR_REPOSITORIES]
|
||||
|
||||
@repositories.setter
|
||||
def repositories(self, value):
|
||||
"""Set snapshot date."""
|
||||
self._data[ATTR_REPOSITORIES] = value
|
||||
|
||||
@property
|
||||
def homeassistant_version(self):
|
||||
"""Return snapshot homeassistant version."""
|
||||
return self._data[ATTR_HOMEASSISTANT].get(ATTR_VERSION)
|
||||
|
||||
@homeassistant_version.setter
|
||||
def homeassistant_version(self, value):
|
||||
"""Set snapshot homeassistant version."""
|
||||
self._data[ATTR_HOMEASSISTANT][ATTR_VERSION] = value
|
||||
|
||||
@property
|
||||
def homeassistant_last_version(self):
|
||||
"""Return snapshot homeassistant last version (custom)."""
|
||||
return self._data[ATTR_HOMEASSISTANT].get(ATTR_LAST_VERSION)
|
||||
|
||||
@homeassistant_last_version.setter
|
||||
def homeassistant_last_version(self, value):
|
||||
"""Set snapshot homeassistant last version (custom)."""
|
||||
self._data[ATTR_HOMEASSISTANT][ATTR_LAST_VERSION] = value
|
||||
|
||||
@property
|
||||
def homeassistant_image(self):
|
||||
"""Return snapshot homeassistant custom image."""
|
||||
return self._data[ATTR_HOMEASSISTANT].get(ATTR_IMAGE)
|
||||
|
||||
@homeassistant_image.setter
|
||||
def homeassistant_image(self, value):
|
||||
"""Set snapshot homeassistant custom image."""
|
||||
self._data[ATTR_HOMEASSISTANT][ATTR_IMAGE] = value
|
||||
|
||||
@property
|
||||
def homeassistant_ssl(self):
|
||||
"""Return snapshot homeassistant api ssl."""
|
||||
return self._data[ATTR_HOMEASSISTANT].get(ATTR_SSL)
|
||||
|
||||
@homeassistant_ssl.setter
|
||||
def homeassistant_ssl(self, value):
|
||||
"""Set snapshot homeassistant api ssl."""
|
||||
self._data[ATTR_HOMEASSISTANT][ATTR_SSL] = value
|
||||
|
||||
@property
|
||||
def homeassistant_port(self):
|
||||
"""Return snapshot homeassistant api port."""
|
||||
return self._data[ATTR_HOMEASSISTANT].get(ATTR_PORT)
|
||||
|
||||
@homeassistant_port.setter
|
||||
def homeassistant_port(self, value):
|
||||
"""Set snapshot homeassistant api port."""
|
||||
self._data[ATTR_HOMEASSISTANT][ATTR_PORT] = value
|
||||
|
||||
@property
|
||||
def homeassistant_password(self):
|
||||
"""Return snapshot homeassistant api password."""
|
||||
return self._data[ATTR_HOMEASSISTANT].get(ATTR_PASSWORD)
|
||||
|
||||
@homeassistant_password.setter
|
||||
def homeassistant_password(self, value):
|
||||
"""Set snapshot homeassistant api password."""
|
||||
self._data[ATTR_HOMEASSISTANT][ATTR_PASSWORD] = value
|
||||
|
||||
@property
|
||||
def homeassistant_watchdog(self):
|
||||
"""Return snapshot homeassistant watchdog options."""
|
||||
return self._data[ATTR_HOMEASSISTANT].get(ATTR_WATCHDOG)
|
||||
|
||||
@homeassistant_watchdog.setter
|
||||
def homeassistant_watchdog(self, value):
|
||||
"""Set snapshot homeassistant watchdog options."""
|
||||
self._data[ATTR_HOMEASSISTANT][ATTR_WATCHDOG] = value
|
||||
|
||||
@property
|
||||
def homeassistant_boot(self):
|
||||
"""Return snapshot homeassistant boot options."""
|
||||
return self._data[ATTR_HOMEASSISTANT].get(ATTR_BOOT)
|
||||
|
||||
@homeassistant_boot.setter
|
||||
def homeassistant_boot(self, value):
|
||||
"""Set snapshot homeassistant boot options."""
|
||||
self._data[ATTR_HOMEASSISTANT][ATTR_BOOT] = value
|
||||
|
||||
@property
|
||||
def size(self):
|
||||
"""Return snapshot size."""
|
||||
if not self.tar_file.is_file():
|
||||
return 0
|
||||
return self.tar_file.stat().st_size / 1048576 # calc mbyte
|
||||
|
||||
def create(self, slug, name, date, sys_type):
|
||||
"""Initialize a new snapshot."""
|
||||
# init metadata
|
||||
self._data[ATTR_SLUG] = slug
|
||||
self._data[ATTR_NAME] = name
|
||||
self._data[ATTR_DATE] = date
|
||||
self._data[ATTR_TYPE] = sys_type
|
||||
|
||||
# Add defaults
|
||||
self._data = SCHEMA_SNAPSHOT(self._data)
|
||||
|
||||
async def load(self):
|
||||
"""Read snapshot.json from tar file."""
|
||||
if not self.tar_file.is_file():
|
||||
_LOGGER.error("No tarfile %s", self.tar_file)
|
||||
return False
|
||||
|
||||
def _load_file():
|
||||
"""Read snapshot.json."""
|
||||
with tarfile.open(self.tar_file, "r:") as snapshot:
|
||||
json_file = snapshot.extractfile("./snapshot.json")
|
||||
return json_file.read()
|
||||
|
||||
# read snapshot.json
|
||||
try:
|
||||
raw = await self._loop.run_in_executor(None, _load_file)
|
||||
except (tarfile.TarError, KeyError) as err:
|
||||
_LOGGER.error(
|
||||
"Can't read snapshot tarfile %s: %s", self.tar_file, err)
|
||||
return False
|
||||
|
||||
# parse data
|
||||
try:
|
||||
raw_dict = json.loads(raw)
|
||||
except json.JSONDecodeError as err:
|
||||
_LOGGER.error("Can't read data for %s: %s", self.tar_file, err)
|
||||
return False
|
||||
|
||||
# validate
|
||||
try:
|
||||
self._data = SCHEMA_SNAPSHOT(raw_dict)
|
||||
except vol.Invalid as err:
|
||||
_LOGGER.error("Can't validate data for %s: %s", self.tar_file,
|
||||
humanize_error(raw_dict, err))
|
||||
return False
|
||||
|
||||
return True
|
||||
|
||||
async def __aenter__(self):
|
||||
"""Async context to open a snapshot."""
|
||||
self._tmp = TemporaryDirectory(dir=str(self._config.path_tmp))
|
||||
|
||||
# create a snapshot
|
||||
if not self.tar_file.is_file():
|
||||
return self
|
||||
|
||||
# extract a exists snapshot
|
||||
def _extract_snapshot():
|
||||
"""Extract a snapshot."""
|
||||
with tarfile.open(self.tar_file, "r:") as tar:
|
||||
tar.extractall(path=self._tmp.name)
|
||||
|
||||
await self._loop.run_in_executor(None, _extract_snapshot)
|
||||
|
||||
async def __aexit__(self, exception_type, exception_value, traceback):
|
||||
"""Async context to close a snapshot."""
|
||||
# exists snapshot or exception on build
|
||||
if self.tar_file.is_file() or exception_type is not None:
|
||||
self._tmp.cleanup()
|
||||
return
|
||||
|
||||
# validate data
|
||||
try:
|
||||
self._data = SCHEMA_SNAPSHOT(self._data)
|
||||
except vol.Invalid as err:
|
||||
_LOGGER.error("Invalid data for %s: %s", self.tar_file,
|
||||
humanize_error(self._data, err))
|
||||
raise ValueError("Invalid config") from None
|
||||
|
||||
# new snapshot, build it
|
||||
def _create_snapshot():
|
||||
"""Create a new snapshot."""
|
||||
with tarfile.open(self.tar_file, "w:") as tar:
|
||||
tar.add(self._tmp.name, arcname=".")
|
||||
|
||||
try:
|
||||
write_json_file(Path(self._tmp.name, "snapshot.json"), self._data)
|
||||
await self._loop.run_in_executor(None, _create_snapshot)
|
||||
except (OSError, json.JSONDecodeError) as err:
|
||||
_LOGGER.error("Can't write snapshot: %s", err)
|
||||
finally:
|
||||
self._tmp.cleanup()
|
||||
|
||||
async def import_addon(self, addon):
|
||||
"""Add a addon into snapshot."""
|
||||
snapshot_file = Path(self._tmp.name, "{}.tar.gz".format(addon.slug))
|
||||
|
||||
if not await addon.snapshot(snapshot_file):
|
||||
_LOGGER.error("Can't make snapshot from %s", addon.slug)
|
||||
return False
|
||||
|
||||
# store to config
|
||||
self._data[ATTR_ADDONS].append({
|
||||
ATTR_SLUG: addon.slug,
|
||||
ATTR_NAME: addon.name,
|
||||
ATTR_VERSION: addon.version_installed,
|
||||
})
|
||||
|
||||
return True
|
||||
|
||||
async def export_addon(self, addon):
|
||||
"""Restore a addon from snapshot."""
|
||||
snapshot_file = Path(self._tmp.name, "{}.tar.gz".format(addon.slug))
|
||||
|
||||
if not await addon.restore(snapshot_file):
|
||||
_LOGGER.error("Can't restore snapshot for %s", addon.slug)
|
||||
return False
|
||||
|
||||
return True
|
||||
|
||||
async def store_folders(self, folder_list=None):
|
||||
"""Backup hassio data into snapshot."""
|
||||
folder_list = set(folder_list or ALL_FOLDERS)
|
||||
|
||||
def _folder_save(name):
|
||||
"""Intenal function to snapshot a folder."""
|
||||
slug_name = name.replace("/", "_")
|
||||
snapshot_tar = Path(self._tmp.name, "{}.tar.gz".format(slug_name))
|
||||
origin_dir = Path(self._config.path_hassio, name)
|
||||
|
||||
try:
|
||||
_LOGGER.info("Snapshot folder %s", name)
|
||||
with tarfile.open(snapshot_tar, "w:gz",
|
||||
compresslevel=1) as tar_file:
|
||||
tar_file.add(origin_dir, arcname=".")
|
||||
|
||||
_LOGGER.info("Snapshot folder %s done", name)
|
||||
self._data[ATTR_FOLDERS].append(name)
|
||||
except (tarfile.TarError, OSError) as err:
|
||||
_LOGGER.warning("Can't snapshot folder %s: %s", name, err)
|
||||
|
||||
# run tasks
|
||||
tasks = [self._loop.run_in_executor(None, _folder_save, folder)
|
||||
for folder in folder_list]
|
||||
if tasks:
|
||||
await asyncio.wait(tasks, loop=self._loop)
|
||||
|
||||
async def restore_folders(self, folder_list=None):
|
||||
"""Backup hassio data into snapshot."""
|
||||
folder_list = set(folder_list or self.folders)
|
||||
|
||||
def _folder_restore(name):
|
||||
"""Intenal function to restore a folder."""
|
||||
slug_name = name.replace("/", "_")
|
||||
snapshot_tar = Path(self._tmp.name, "{}.tar.gz".format(slug_name))
|
||||
origin_dir = Path(self._config.path_hassio, name)
|
||||
|
||||
# clean old stuff
|
||||
if origin_dir.is_dir():
|
||||
remove_folder(origin_dir)
|
||||
|
||||
try:
|
||||
_LOGGER.info("Restore folder %s", name)
|
||||
with tarfile.open(snapshot_tar, "r:gz") as tar_file:
|
||||
tar_file.extractall(path=origin_dir)
|
||||
_LOGGER.info("Restore folder %s done", name)
|
||||
except (tarfile.TarError, OSError) as err:
|
||||
_LOGGER.warning("Can't restore folder %s: %s", name, err)
|
||||
|
||||
# run tasks
|
||||
tasks = [self._loop.run_in_executor(None, _folder_restore, folder)
|
||||
for folder in folder_list]
|
||||
if tasks:
|
||||
await asyncio.wait(tasks, loop=self._loop)
|
||||
|
||||
def store_homeassistant(self):
|
||||
"""Read all data from homeassistant object."""
|
||||
self.homeassistant_version = self._homeassistant.version
|
||||
self.homeassistant_watchdog = self._homeassistant.watchdog
|
||||
self.homeassistant_boot = self._homeassistant.boot
|
||||
|
||||
# custom image
|
||||
if self._homeassistant.is_custom_image:
|
||||
self.homeassistant_image = self._homeassistant.image
|
||||
self.homeassistant_last_version = self._homeassistant.last_version
|
||||
|
||||
# api
|
||||
self.homeassistant_port = self._homeassistant.api_port
|
||||
self.homeassistant_ssl = self._homeassistant.api_ssl
|
||||
self.homeassistant_password = self._homeassistant.api_password
|
||||
|
||||
def restore_homeassistant(self):
|
||||
"""Write all data to homeassistant object."""
|
||||
self._homeassistant.watchdog = self.homeassistant_watchdog
|
||||
self._homeassistant.boot = self.homeassistant_boot
|
||||
|
||||
# custom image
|
||||
if self.homeassistant_image:
|
||||
self._homeassistant.image = self.homeassistant_image
|
||||
self._homeassistant.last_version = self.homeassistant_last_version
|
||||
|
||||
# api
|
||||
self._homeassistant.api_port = self.homeassistant_port
|
||||
self._homeassistant.api_ssl = self.homeassistant_ssl
|
||||
self._homeassistant.api_password = self.homeassistant_password
|
||||
|
||||
# save
|
||||
self._homeassistant.save_data()
|
||||
|
||||
def store_repositories(self):
|
||||
"""Store repository list into snapshot."""
|
||||
self.repositories = self._config.addons_repositories
|
||||
|
||||
def restore_repositories(self):
|
||||
"""Restore repositories from snapshot.
|
||||
|
||||
Return a coroutine.
|
||||
"""
|
||||
return self._addons.load_repositories(self.repositories)
|
21
hassio/snapshots/utils.py
Normal file
21
hassio/snapshots/utils.py
Normal file
@@ -0,0 +1,21 @@
|
||||
"""Util addons functions."""
|
||||
import hashlib
|
||||
import shutil
|
||||
|
||||
|
||||
def create_slug(name, date_str):
|
||||
"""Generate a hash from repository."""
|
||||
key = "{} - {}".format(date_str, name).lower().encode()
|
||||
return hashlib.sha1(key).hexdigest()[:8]
|
||||
|
||||
|
||||
def remove_folder(folder):
|
||||
"""Remove folder data but not the folder itself."""
|
||||
for obj in folder.iterdir():
|
||||
try:
|
||||
if obj.is_dir():
|
||||
shutil.rmtree(str(obj), ignore_errors=True)
|
||||
else:
|
||||
obj.unlink()
|
||||
except (OSError, shutil.Error):
|
||||
pass
|
50
hassio/snapshots/validate.py
Normal file
50
hassio/snapshots/validate.py
Normal file
@@ -0,0 +1,50 @@
|
||||
"""Validate some things around restore."""
|
||||
|
||||
import voluptuous as vol
|
||||
|
||||
from ..const import (
|
||||
ATTR_REPOSITORIES, ATTR_ADDONS, ATTR_NAME, ATTR_SLUG, ATTR_DATE,
|
||||
ATTR_VERSION, ATTR_HOMEASSISTANT, ATTR_FOLDERS, ATTR_TYPE, ATTR_IMAGE,
|
||||
ATTR_PASSWORD, ATTR_PORT, ATTR_SSL, ATTR_WATCHDOG, ATTR_BOOT,
|
||||
ATTR_LAST_VERSION,
|
||||
FOLDER_SHARE, FOLDER_HOMEASSISTANT, FOLDER_ADDONS, FOLDER_SSL,
|
||||
SNAPSHOT_FULL, SNAPSHOT_PARTIAL)
|
||||
from ..validate import NETWORK_PORT, REPOSITORIES, DOCKER_IMAGE
|
||||
|
||||
ALL_FOLDERS = [FOLDER_HOMEASSISTANT, FOLDER_SHARE, FOLDER_ADDONS, FOLDER_SSL]
|
||||
|
||||
|
||||
def unique_addons(addons_list):
|
||||
"""Validate that a add-on is unique."""
|
||||
single = set([addon[ATTR_SLUG] for addon in addons_list])
|
||||
|
||||
if len(single) != len(addons_list):
|
||||
raise vol.Invalid("Invalid addon list on snapshot!")
|
||||
return addons_list
|
||||
|
||||
|
||||
# pylint: disable=no-value-for-parameter
|
||||
SCHEMA_SNAPSHOT = vol.Schema({
|
||||
vol.Required(ATTR_SLUG): vol.Coerce(str),
|
||||
vol.Required(ATTR_TYPE): vol.In([SNAPSHOT_FULL, SNAPSHOT_PARTIAL]),
|
||||
vol.Required(ATTR_NAME): vol.Coerce(str),
|
||||
vol.Required(ATTR_DATE): vol.Coerce(str),
|
||||
vol.Optional(ATTR_HOMEASSISTANT, default=dict): vol.Schema({
|
||||
vol.Required(ATTR_VERSION): vol.Coerce(str),
|
||||
vol.Inclusive(ATTR_IMAGE, 'custom_hass'): DOCKER_IMAGE,
|
||||
vol.Inclusive(ATTR_LAST_VERSION, 'custom_hass'): vol.Coerce(str),
|
||||
vol.Optional(ATTR_BOOT, default=True): vol.Boolean(),
|
||||
vol.Optional(ATTR_SSL, default=False): vol.Boolean(),
|
||||
vol.Optional(ATTR_PORT, default=8123): NETWORK_PORT,
|
||||
vol.Optional(ATTR_PASSWORD): vol.Any(None, vol.Coerce(str)),
|
||||
vol.Optional(ATTR_WATCHDOG, default=True): vol.Boolean(),
|
||||
}, extra=vol.REMOVE_EXTRA),
|
||||
vol.Optional(ATTR_FOLDERS, default=list):
|
||||
vol.All([vol.In(ALL_FOLDERS)], vol.Unique()),
|
||||
vol.Optional(ATTR_ADDONS, default=list): vol.All([vol.Schema({
|
||||
vol.Required(ATTR_SLUG): vol.Coerce(str),
|
||||
vol.Required(ATTR_NAME): vol.Coerce(str),
|
||||
vol.Required(ATTR_VERSION): vol.Coerce(str),
|
||||
}, extra=vol.REMOVE_EXTRA)], unique_addons),
|
||||
vol.Optional(ATTR_REPOSITORIES, default=list): REPOSITORIES,
|
||||
}, extra=vol.ALLOW_EXTRA)
|
82
hassio/supervisor.py
Normal file
82
hassio/supervisor.py
Normal file
@@ -0,0 +1,82 @@
|
||||
"""HomeAssistant control object."""
|
||||
import logging
|
||||
|
||||
from .coresys import CoreSysAttributes
|
||||
from .docker.supervisor import DockerSupervisor
|
||||
|
||||
_LOGGER = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class Supervisor(CoreSysAttributes):
|
||||
"""Hass core object for handle it."""
|
||||
|
||||
def __init__(self, coresys):
|
||||
"""Initialize hass object."""
|
||||
self.coresys = coresys
|
||||
self.instance = DockerSupervisor(coresys)
|
||||
|
||||
async def load(self):
|
||||
"""Prepare HomeAssistant object."""
|
||||
if not await self.instance.attach():
|
||||
_LOGGER.fatal("Can't setup supervisor docker container!")
|
||||
await self.instance.cleanup()
|
||||
|
||||
@property
|
||||
def need_update(self):
|
||||
"""Return True if a update is available."""
|
||||
return self.version != self.last_version
|
||||
|
||||
@property
|
||||
def version(self):
|
||||
"""Return version of running homeassistant."""
|
||||
return self.instance.version
|
||||
|
||||
@property
|
||||
def last_version(self):
|
||||
"""Return last available version of homeassistant."""
|
||||
return self._updater.version_hassio
|
||||
|
||||
@property
|
||||
def image(self):
|
||||
"""Return image name of hass containter."""
|
||||
return self.instance.image
|
||||
|
||||
@property
|
||||
def arch(self):
|
||||
"""Return arch of hass.io containter."""
|
||||
return self.instance.arch
|
||||
|
||||
async def update(self, version=None):
|
||||
"""Update HomeAssistant version."""
|
||||
version = version or self.last_version
|
||||
|
||||
if version == self._supervisor.version:
|
||||
_LOGGER.warning("Version %s is already installed", version)
|
||||
return
|
||||
|
||||
_LOGGER.info("Update supervisor to version %s", version)
|
||||
if await self.instance.install(version):
|
||||
self._loop.call_later(1, self._loop.stop)
|
||||
return True
|
||||
|
||||
_LOGGER.error("Update of hass.io fails!")
|
||||
return False
|
||||
|
||||
@property
|
||||
def in_progress(self):
|
||||
"""Return True if a task is in progress."""
|
||||
return self.instance.in_progress
|
||||
|
||||
def logs(self):
|
||||
"""Get Supervisor docker logs.
|
||||
|
||||
Return a coroutine.
|
||||
"""
|
||||
return self.instance.logs()
|
||||
|
||||
def stats(self):
|
||||
"""Return stats of Supervisor.
|
||||
|
||||
Return a coroutine.
|
||||
"""
|
||||
return self.instance.stats()
|
130
hassio/tasks.py
Normal file
130
hassio/tasks.py
Normal file
@@ -0,0 +1,130 @@
|
||||
"""Multible tasks."""
|
||||
import asyncio
|
||||
import logging
|
||||
|
||||
from .coresys import CoreSysAttributes
|
||||
|
||||
_LOGGER = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class Tasks(CoreSysAttributes):
|
||||
"""Handle Tasks inside HassIO."""
|
||||
|
||||
RUN_UPDATE_SUPERVISOR = 29100
|
||||
RUN_UPDATE_ADDONS = 57600
|
||||
|
||||
RUN_RELOAD_ADDONS = 21600
|
||||
RUN_RELOAD_SNAPSHOTS = 72000
|
||||
RUN_RELOAD_HOST_CONTROL = 72000
|
||||
RUN_RELOAD_UPDATER = 21600
|
||||
|
||||
RUN_WATCHDOG_HOMEASSISTANT_DOCKER = 15
|
||||
RUN_WATCHDOG_HOMEASSISTANT_API = 300
|
||||
|
||||
def __init__(self, coresys):
|
||||
"""Initialize Tasks."""
|
||||
self.coresys = coresys
|
||||
self.jobs = set()
|
||||
self._data = {}
|
||||
|
||||
async def load(self):
|
||||
"""Add Tasks to scheduler."""
|
||||
self.jobs.add(self._scheduler.register_task(
|
||||
self._update_addons, self.RUN_UPDATE_ADDONS))
|
||||
self.jobs.add(self._scheduler.register_task(
|
||||
self._update_supervisor, self.RUN_UPDATE_SUPERVISOR))
|
||||
|
||||
self.jobs.add(self._scheduler.register_task(
|
||||
self._addons.reload, self.RUN_RELOAD_ADDONS))
|
||||
self.jobs.add(self._scheduler.register_task(
|
||||
self._updater.reload, self.RUN_RELOAD_UPDATER))
|
||||
self.jobs.add(self._scheduler.register_task(
|
||||
self._snapshots.reload, self.RUN_RELOAD_SNAPSHOTS))
|
||||
self.jobs.add(self._scheduler.register_task(
|
||||
self._host_control.load, self.RUN_RELOAD_HOST_CONTROL))
|
||||
|
||||
self.jobs.add(self._scheduler.register_task(
|
||||
self._watchdog_homeassistant_docker,
|
||||
self.RUN_WATCHDOG_HOMEASSISTANT_DOCKER))
|
||||
self.jobs.add(self._scheduler.register_task(
|
||||
self._watchdog_homeassistant_api,
|
||||
self.RUN_WATCHDOG_HOMEASSISTANT_API))
|
||||
|
||||
_LOGGER.info("All core tasks are scheduled")
|
||||
|
||||
async def _update_addons(self):
|
||||
"""Check if a update is available of a addon and update it."""
|
||||
tasks = []
|
||||
for addon in self._addons.list_addons:
|
||||
if not addon.is_installed or not addon.auto_update:
|
||||
continue
|
||||
|
||||
if addon.version_installed == addon.last_version:
|
||||
continue
|
||||
|
||||
if addon.test_udpate_schema():
|
||||
tasks.append(addon.update())
|
||||
else:
|
||||
_LOGGER.warning(
|
||||
"Addon %s will be ignore, schema tests fails", addon.slug)
|
||||
|
||||
if tasks:
|
||||
_LOGGER.info("Addon auto update process %d tasks", len(tasks))
|
||||
await asyncio.wait(tasks, loop=self._loop)
|
||||
|
||||
async def _update_supervisor(self):
|
||||
"""Check and run update of supervisor hassio."""
|
||||
if not self._supervisor.need_update:
|
||||
return
|
||||
|
||||
# don't perform a update on beta/dev channel
|
||||
if self._updater.beta_channel:
|
||||
_LOGGER.warning("Ignore Hass.io update on beta upstream!")
|
||||
return
|
||||
|
||||
_LOGGER.info("Found new Hass.io version")
|
||||
await self._supervisor.update()
|
||||
|
||||
async def _watchdog_homeassistant_docker(self):
|
||||
"""Check running state of docker and start if they is close."""
|
||||
# if Home-Assistant is active
|
||||
if not await self._homeassistant.is_initialize() or \
|
||||
not self._homeassistant.watchdog:
|
||||
return
|
||||
|
||||
# if Home-Assistant is running
|
||||
if self._homeassistant.in_progress or \
|
||||
await self._homeassistant.is_running():
|
||||
return
|
||||
|
||||
_LOGGER.warning("Watchdog found a problem with Home-Assistant docker!")
|
||||
await self._homeassistant.run()
|
||||
|
||||
async def _watchdog_homeassistant_api(self):
|
||||
"""Create scheduler task for montoring running state of API.
|
||||
|
||||
Try 2 times to call API before we restart Home-Assistant. Maybe we had
|
||||
a delay in our system.
|
||||
"""
|
||||
retry_scan = self._data.get('HASS_WATCHDOG_API', 0)
|
||||
|
||||
# If Home-Assistant is active
|
||||
if not await self._homeassistant.is_initialize() or \
|
||||
not self._homeassistant.watchdog:
|
||||
return
|
||||
|
||||
# If Home-Assistant API is up
|
||||
if self._homeassistant.in_progress or \
|
||||
await self._homeassistant.check_api_state():
|
||||
return
|
||||
|
||||
# Look like we run into a problem
|
||||
retry_scan += 1
|
||||
if retry_scan == 1:
|
||||
self._data['HASS_WATCHDOG_API'] = retry_scan
|
||||
_LOGGER.warning("Watchdog miss API response from Home-Assistant")
|
||||
return
|
||||
|
||||
_LOGGER.error("Watchdog found a problem with Home-Assistant API!")
|
||||
await self._homeassistant.restart()
|
||||
self._data['HASS_WATCHDOG_API'] = 0
|
@@ -1,92 +0,0 @@
|
||||
"""Tools file for HassIO."""
|
||||
import asyncio
|
||||
import json
|
||||
import logging
|
||||
import re
|
||||
import socket
|
||||
|
||||
import aiohttp
|
||||
import async_timeout
|
||||
|
||||
from .const import URL_HASSIO_VERSION, URL_HASSIO_VERSION_BETA
|
||||
|
||||
_LOGGER = logging.getLogger(__name__)
|
||||
|
||||
_RE_VERSION = re.compile(r"VERSION=(.*)")
|
||||
_IMAGE_ARCH = re.compile(r".*/([a-z0-9]*)-hassio-supervisor")
|
||||
|
||||
|
||||
async def fetch_last_versions(websession, beta=False):
|
||||
"""Fetch current versions from github.
|
||||
|
||||
Is a coroutine.
|
||||
"""
|
||||
url = URL_HASSIO_VERSION_BETA if beta else URL_HASSIO_VERSION
|
||||
try:
|
||||
with async_timeout.timeout(10, loop=websession.loop):
|
||||
async with websession.get(url) as request:
|
||||
return await request.json(content_type=None)
|
||||
|
||||
except (aiohttp.ClientError, asyncio.TimeoutError, KeyError) as err:
|
||||
_LOGGER.warning("Can't fetch versions from %s! %s", url, err)
|
||||
|
||||
except json.JSONDecodeError as err:
|
||||
_LOGGER.warning("Can't parse versions from %s! %s", url, err)
|
||||
|
||||
|
||||
def get_arch_from_image(image):
|
||||
"""Return arch from hassio image name."""
|
||||
found = _IMAGE_ARCH.match(image)
|
||||
if found:
|
||||
return found.group(1)
|
||||
|
||||
|
||||
def get_version_from_env(env_list):
|
||||
"""Extract Version from ENV list."""
|
||||
for env in env_list:
|
||||
found = _RE_VERSION.match(env)
|
||||
if found:
|
||||
return found.group(1)
|
||||
|
||||
_LOGGER.error("Can't find VERSION in env")
|
||||
return None
|
||||
|
||||
|
||||
def get_local_ip(loop):
|
||||
"""Retrieve local IP address.
|
||||
|
||||
Return a future.
|
||||
"""
|
||||
def local_ip():
|
||||
"""Return local ip."""
|
||||
try:
|
||||
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
|
||||
|
||||
# Use Google Public DNS server to determine own IP
|
||||
sock.connect(('8.8.8.8', 80))
|
||||
|
||||
return sock.getsockname()[0]
|
||||
except socket.error:
|
||||
return socket.gethostbyname(socket.gethostname())
|
||||
finally:
|
||||
sock.close()
|
||||
|
||||
return loop.run_in_executor(None, local_ip)
|
||||
|
||||
|
||||
def write_json_file(jsonfile, data):
|
||||
"""Write a json file."""
|
||||
try:
|
||||
json_str = json.dumps(data, indent=2)
|
||||
with jsonfile.open('w') as conf_file:
|
||||
conf_file.write(json_str)
|
||||
except (OSError, json.JSONDecodeError):
|
||||
return False
|
||||
|
||||
return True
|
||||
|
||||
|
||||
def read_json_file(jsonfile):
|
||||
"""Read a json file and return a dict."""
|
||||
with jsonfile.open('r') as cfile:
|
||||
return json.loads(cfile.read())
|
92
hassio/updater.py
Normal file
92
hassio/updater.py
Normal file
@@ -0,0 +1,92 @@
|
||||
"""Fetch last versions from webserver."""
|
||||
import asyncio
|
||||
from datetime import timedelta
|
||||
import json
|
||||
import logging
|
||||
|
||||
import aiohttp
|
||||
import async_timeout
|
||||
|
||||
from .const import (
|
||||
URL_HASSIO_VERSION, FILE_HASSIO_UPDATER, ATTR_HOMEASSISTANT, ATTR_HASSIO,
|
||||
ATTR_BETA_CHANNEL)
|
||||
from .coresys import CoreSysAttributes
|
||||
from .utils import AsyncThrottle
|
||||
from .utils.json import JsonConfig
|
||||
from .validate import SCHEMA_UPDATER_CONFIG
|
||||
|
||||
_LOGGER = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class Updater(JsonConfig, CoreSysAttributes):
|
||||
"""Fetch last versions from version.json."""
|
||||
|
||||
def __init__(self, coresys):
|
||||
"""Initialize updater."""
|
||||
super().__init__(FILE_HASSIO_UPDATER, SCHEMA_UPDATER_CONFIG)
|
||||
self.coresys = coresys
|
||||
|
||||
def load(self):
|
||||
"""Update internal data.
|
||||
|
||||
Return a coroutine.
|
||||
"""
|
||||
return self.reload()
|
||||
|
||||
@property
|
||||
def version_homeassistant(self):
|
||||
"""Return last version of homeassistant."""
|
||||
return self._data.get(ATTR_HOMEASSISTANT)
|
||||
|
||||
@property
|
||||
def version_hassio(self):
|
||||
"""Return last version of hassio."""
|
||||
return self._data.get(ATTR_HASSIO)
|
||||
|
||||
@property
|
||||
def upstream(self):
|
||||
"""Return Upstream branch for version."""
|
||||
if self.beta_channel:
|
||||
return 'dev'
|
||||
return 'master'
|
||||
|
||||
@property
|
||||
def beta_channel(self):
|
||||
"""Return True if we run in beta upstream."""
|
||||
return self._data[ATTR_BETA_CHANNEL]
|
||||
|
||||
@beta_channel.setter
|
||||
def beta_channel(self, value):
|
||||
"""Set beta upstream mode."""
|
||||
self._data[ATTR_BETA_CHANNEL] = bool(value)
|
||||
|
||||
@AsyncThrottle(timedelta(seconds=60))
|
||||
async def reload(self):
|
||||
"""Fetch current versions from github.
|
||||
|
||||
Is a coroutine.
|
||||
"""
|
||||
url = URL_HASSIO_VERSION.format(self.upstream)
|
||||
try:
|
||||
_LOGGER.info("Fetch update data from %s", url)
|
||||
with async_timeout.timeout(10, loop=self._loop):
|
||||
async with self._websession.get(url) as request:
|
||||
data = await request.json(content_type=None)
|
||||
|
||||
except (aiohttp.ClientError, asyncio.TimeoutError, KeyError) as err:
|
||||
_LOGGER.warning("Can't fetch versions from %s: %s", url, err)
|
||||
return
|
||||
|
||||
except json.JSONDecodeError as err:
|
||||
_LOGGER.warning("Can't parse versions from %s: %s", url, err)
|
||||
return
|
||||
|
||||
# data valid?
|
||||
if not data:
|
||||
_LOGGER.warning("Invalid data from %s", url)
|
||||
return
|
||||
|
||||
# update versions
|
||||
self._data[ATTR_HOMEASSISTANT] = data.get('homeassistant')
|
||||
self._data[ATTR_HASSIO] = data.get('hassio')
|
||||
self.save_data()
|
34
hassio/utils/__init__.py
Normal file
34
hassio/utils/__init__.py
Normal file
@@ -0,0 +1,34 @@
|
||||
"""Tools file for HassIO."""
|
||||
from datetime import datetime
|
||||
import re
|
||||
|
||||
RE_STRING = re.compile(r"\x1b(\[.*?[@-~]|\].*?(\x07|\x1b\\))")
|
||||
|
||||
|
||||
def convert_to_ascii(raw):
|
||||
"""Convert binary to ascii and remove colors."""
|
||||
return RE_STRING.sub("", raw.decode())
|
||||
|
||||
|
||||
class AsyncThrottle(object):
|
||||
"""
|
||||
Decorator that prevents a function from being called more than once every
|
||||
time period.
|
||||
"""
|
||||
def __init__(self, delta):
|
||||
"""Initialize async throttle."""
|
||||
self.throttle_period = delta
|
||||
self.time_of_last_call = datetime.min
|
||||
|
||||
def __call__(self, method):
|
||||
"""Throttle function"""
|
||||
async def wrapper(*args, **kwargs):
|
||||
"""Throttle function wrapper"""
|
||||
now = datetime.now()
|
||||
time_since_last_call = now - self.time_of_last_call
|
||||
|
||||
if time_since_last_call > self.throttle_period:
|
||||
self.time_of_last_call = now
|
||||
return await method(*args, **kwargs)
|
||||
|
||||
return wrapper
|
76
hassio/utils/dt.py
Normal file
76
hassio/utils/dt.py
Normal file
@@ -0,0 +1,76 @@
|
||||
"""Tools file for HassIO."""
|
||||
import asyncio
|
||||
from datetime import datetime, timedelta, timezone
|
||||
import logging
|
||||
import re
|
||||
|
||||
import aiohttp
|
||||
import async_timeout
|
||||
import pytz
|
||||
|
||||
_LOGGER = logging.getLogger(__name__)
|
||||
|
||||
FREEGEOIP_URL = "https://freegeoip.io/json/"
|
||||
|
||||
# Copyright (c) Django Software Foundation and individual contributors.
|
||||
# All rights reserved.
|
||||
# https://github.com/django/django/blob/master/LICENSE
|
||||
DATETIME_RE = re.compile(
|
||||
r'(?P<year>\d{4})-(?P<month>\d{1,2})-(?P<day>\d{1,2})'
|
||||
r'[T ](?P<hour>\d{1,2}):(?P<minute>\d{1,2})'
|
||||
r'(?::(?P<second>\d{1,2})(?:\.(?P<microsecond>\d{1,6})\d{0,6})?)?'
|
||||
r'(?P<tzinfo>Z|[+-]\d{2}(?::?\d{2})?)?$'
|
||||
)
|
||||
|
||||
|
||||
async def fetch_timezone(websession):
|
||||
"""Read timezone from freegeoip."""
|
||||
data = {}
|
||||
try:
|
||||
with async_timeout.timeout(10, loop=websession.loop):
|
||||
async with websession.get(FREEGEOIP_URL) as request:
|
||||
data = await request.json()
|
||||
|
||||
except (aiohttp.ClientError, asyncio.TimeoutError, KeyError) as err:
|
||||
_LOGGER.warning("Can't fetch freegeoip data: %s", err)
|
||||
|
||||
except ValueError as err:
|
||||
_LOGGER.warning("Error on parse freegeoip data: %s", err)
|
||||
|
||||
return data.get('time_zone', 'UTC')
|
||||
|
||||
|
||||
# Copyright (c) Django Software Foundation and individual contributors.
|
||||
# All rights reserved.
|
||||
# https://github.com/django/django/blob/master/LICENSE
|
||||
def parse_datetime(dt_str):
|
||||
"""Parse a string and return a datetime.datetime.
|
||||
|
||||
This function supports time zone offsets. When the input contains one,
|
||||
the output uses a timezone with a fixed offset from UTC.
|
||||
Raises ValueError if the input is well formatted but not a valid datetime.
|
||||
Returns None if the input isn't well formatted.
|
||||
"""
|
||||
match = DATETIME_RE.match(dt_str)
|
||||
if not match:
|
||||
return None
|
||||
kws = match.groupdict() # type: Dict[str, Any]
|
||||
if kws['microsecond']:
|
||||
kws['microsecond'] = kws['microsecond'].ljust(6, '0')
|
||||
tzinfo_str = kws.pop('tzinfo')
|
||||
|
||||
tzinfo = None # type: Optional[dt.tzinfo]
|
||||
if tzinfo_str == 'Z':
|
||||
tzinfo = pytz.utc
|
||||
elif tzinfo_str is not None:
|
||||
offset_mins = int(tzinfo_str[-2:]) if len(tzinfo_str) > 3 else 0
|
||||
offset_hours = int(tzinfo_str[1:3])
|
||||
offset = timedelta(hours=offset_hours, minutes=offset_mins)
|
||||
if tzinfo_str[0] == '-':
|
||||
offset = -offset
|
||||
tzinfo = timezone(offset)
|
||||
else:
|
||||
tzinfo = None
|
||||
kws = {k: int(v) for k, v in kws.items() if v is not None}
|
||||
kws['tzinfo'] = tzinfo
|
||||
return datetime(**kws)
|
73
hassio/utils/json.py
Normal file
73
hassio/utils/json.py
Normal file
@@ -0,0 +1,73 @@
|
||||
"""Tools file for HassIO."""
|
||||
import json
|
||||
import logging
|
||||
|
||||
import voluptuous as vol
|
||||
from voluptuous.humanize import humanize_error
|
||||
|
||||
_LOGGER = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def write_json_file(jsonfile, data):
|
||||
"""Write a json file."""
|
||||
json_str = json.dumps(data, indent=2)
|
||||
with jsonfile.open('w') as conf_file:
|
||||
conf_file.write(json_str)
|
||||
|
||||
|
||||
def read_json_file(jsonfile):
|
||||
"""Read a json file and return a dict."""
|
||||
with jsonfile.open('r') as cfile:
|
||||
return json.loads(cfile.read())
|
||||
|
||||
|
||||
class JsonConfig(object):
|
||||
"""Hass core object for handle it."""
|
||||
|
||||
def __init__(self, json_file, schema):
|
||||
"""Initialize hass object."""
|
||||
self._file = json_file
|
||||
self._schema = schema
|
||||
self._data = {}
|
||||
|
||||
self.read_data()
|
||||
|
||||
def read_data(self):
|
||||
"""Read json file & validate."""
|
||||
if self._file.is_file():
|
||||
try:
|
||||
self._data = read_json_file(self._file)
|
||||
except (OSError, json.JSONDecodeError):
|
||||
_LOGGER.warning("Can't read %s", self._file)
|
||||
self._data = {}
|
||||
|
||||
# Validate
|
||||
try:
|
||||
self._data = self._schema(self._data)
|
||||
except vol.Invalid as ex:
|
||||
_LOGGER.error("Can't parse %s: %s",
|
||||
self._file, humanize_error(self._data, ex))
|
||||
|
||||
# Reset data to default
|
||||
_LOGGER.warning("Reset %s to default", self._file)
|
||||
self._data = self._schema({})
|
||||
|
||||
def save_data(self):
|
||||
"""Store data to config file."""
|
||||
# Validate
|
||||
try:
|
||||
self._data = self._schema(self._data)
|
||||
except vol.Invalid as ex:
|
||||
_LOGGER.error("Can't parse data: %s",
|
||||
humanize_error(self._data, ex))
|
||||
|
||||
# Load last valid data
|
||||
_LOGGER.warning("Reset %s to last version", self._file)
|
||||
self.save_data()
|
||||
return
|
||||
|
||||
# write
|
||||
try:
|
||||
write_json_file(self._file, self._data)
|
||||
except (OSError, json.JSONDecodeError) as err:
|
||||
_LOGGER.error("Can't store config in %s: %s", self._file, err)
|
96
hassio/validate.py
Normal file
96
hassio/validate.py
Normal file
@@ -0,0 +1,96 @@
|
||||
"""Validate functions."""
|
||||
import uuid
|
||||
|
||||
import voluptuous as vol
|
||||
import pytz
|
||||
|
||||
from .const import (
|
||||
ATTR_IMAGE, ATTR_LAST_VERSION, ATTR_BETA_CHANNEL, ATTR_TIMEZONE,
|
||||
ATTR_ADDONS_CUSTOM_LIST, ATTR_AUDIO_OUTPUT, ATTR_AUDIO_INPUT,
|
||||
ATTR_PASSWORD, ATTR_HOMEASSISTANT, ATTR_HASSIO, ATTR_BOOT, ATTR_LAST_BOOT,
|
||||
ATTR_SSL, ATTR_PORT, ATTR_WATCHDOG, ATTR_WAIT_BOOT, ATTR_UUID)
|
||||
|
||||
|
||||
NETWORK_PORT = vol.All(vol.Coerce(int), vol.Range(min=1, max=65535))
|
||||
ALSA_CHANNEL = vol.Match(r"\d+,\d+")
|
||||
WAIT_BOOT = vol.All(vol.Coerce(int), vol.Range(min=1, max=60))
|
||||
DOCKER_IMAGE = vol.Match(r"^[\w{}]+/[\-\w{}]+$")
|
||||
|
||||
# pylint: disable=no-value-for-parameter
|
||||
REPOSITORIES = vol.All([vol.Url()], vol.Unique())
|
||||
|
||||
|
||||
def validate_timezone(timezone):
|
||||
"""Validate voluptuous timezone."""
|
||||
try:
|
||||
pytz.timezone(timezone)
|
||||
except pytz.exceptions.UnknownTimeZoneError:
|
||||
raise vol.Invalid(
|
||||
"Invalid time zone passed in. Valid options can be found here: "
|
||||
"http://en.wikipedia.org/wiki/List_of_tz_database_time_zones") \
|
||||
from None
|
||||
|
||||
return timezone
|
||||
|
||||
|
||||
# pylint: disable=inconsistent-return-statements
|
||||
def convert_to_docker_ports(data):
|
||||
"""Convert data into docker port list."""
|
||||
# dynamic ports
|
||||
if data is None:
|
||||
return None
|
||||
|
||||
# single port
|
||||
if isinstance(data, int):
|
||||
return NETWORK_PORT(data)
|
||||
|
||||
# port list
|
||||
if isinstance(data, list) and len(data) > 2:
|
||||
return vol.Schema([NETWORK_PORT])(data)
|
||||
|
||||
# ip port mapping
|
||||
if isinstance(data, list) and len(data) == 2:
|
||||
return (vol.Coerce(str)(data[0]), NETWORK_PORT(data[1]))
|
||||
|
||||
raise vol.Invalid("Can't validate docker host settings")
|
||||
|
||||
|
||||
DOCKER_PORTS = vol.Schema({
|
||||
vol.All(vol.Coerce(str), vol.Match(r"^\d+(?:/tcp|/udp)?$")):
|
||||
convert_to_docker_ports,
|
||||
})
|
||||
|
||||
|
||||
# pylint: disable=no-value-for-parameter
|
||||
SCHEMA_HASS_CONFIG = vol.Schema({
|
||||
vol.Optional(ATTR_UUID, default=lambda: uuid.uuid4().hex):
|
||||
vol.Match(r"^[0-9a-f]{32}$"),
|
||||
vol.Optional(ATTR_BOOT, default=True): vol.Boolean(),
|
||||
vol.Inclusive(ATTR_IMAGE, 'custom_hass'): DOCKER_IMAGE,
|
||||
vol.Inclusive(ATTR_LAST_VERSION, 'custom_hass'): vol.Coerce(str),
|
||||
vol.Optional(ATTR_PORT, default=8123): NETWORK_PORT,
|
||||
vol.Optional(ATTR_PASSWORD): vol.Any(None, vol.Coerce(str)),
|
||||
vol.Optional(ATTR_SSL, default=False): vol.Boolean(),
|
||||
vol.Optional(ATTR_WATCHDOG, default=True): vol.Boolean(),
|
||||
}, extra=vol.REMOVE_EXTRA)
|
||||
|
||||
|
||||
# pylint: disable=no-value-for-parameter
|
||||
SCHEMA_UPDATER_CONFIG = vol.Schema({
|
||||
vol.Optional(ATTR_BETA_CHANNEL, default=False): vol.Boolean(),
|
||||
vol.Optional(ATTR_HOMEASSISTANT): vol.Coerce(str),
|
||||
vol.Optional(ATTR_HASSIO): vol.Coerce(str),
|
||||
}, extra=vol.REMOVE_EXTRA)
|
||||
|
||||
|
||||
# pylint: disable=no-value-for-parameter
|
||||
SCHEMA_HASSIO_CONFIG = vol.Schema({
|
||||
vol.Optional(ATTR_TIMEZONE, default='UTC'): validate_timezone,
|
||||
vol.Optional(ATTR_LAST_BOOT): vol.Coerce(str),
|
||||
vol.Optional(ATTR_ADDONS_CUSTOM_LIST, default=[
|
||||
"https://github.com/hassio-addons/repository",
|
||||
]): REPOSITORIES,
|
||||
vol.Optional(ATTR_AUDIO_OUTPUT): ALSA_CHANNEL,
|
||||
vol.Optional(ATTR_AUDIO_INPUT): ALSA_CHANNEL,
|
||||
vol.Optional(ATTR_WAIT_BOOT, default=5): WAIT_BOOT,
|
||||
}, extra=vol.REMOVE_EXTRA)
|
1
home-assistant-polymer
Submodule
1
home-assistant-polymer
Submodule
Submodule home-assistant-polymer added at c3e35a27ba
BIN
misc/hassio.png
BIN
misc/hassio.png
Binary file not shown.
Before Width: | Height: | Size: 42 KiB After Width: | Height: | Size: 37 KiB |
@@ -1 +1 @@
|
||||
<mxfile userAgent="Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.81 Safari/537.36" version="6.5.6" editor="www.draw.io" type="device"><diagram name="Page-1">5Vptc6M2EP41/ng3gHj9mPiSy820c5n6Q3sfsVBsNTJyhYid/voKkABZkOBY+KYtmYnR6pVn99ld1l6A5e74laX77a80Q2ThOdlxAb4sPC8OY/G/Erw2At9xG8GG4awR9QQr/DeSQkdKS5yhQhvIKSUc73UhpHmOINdkKWP0oA97okTfdZ9ukCFYwZSY0t9xxrdS6oZJ1/GA8GYrt469sOlYp/B5w2iZy/0WHniqr6Z7l6q15IMW2zSjh54I3C3AklHKm7vdcYlIBa2CrZl3P9LbnpuhnE+Z4DUTXlJSInXikIipt09UrCAOyF8lKOFfJVUdn4paZTdigNjtKD5ERw206DtIYKrenLJdSrrJ4m5TfX5fqX3E2Zqtmg4JS7urd9hijlb7FFbtg7A2MWjLd0S03Oo0mJAlJZTVowXYKIRQyAvO6DPq9Tj1Jc+/kutLvF4Q4+g4CqHbKkbYO6I7xNmrGKImJKCZIm09SKRuD53l+Arobc9oQjkulca6aZfuFCZupM6G9QcM/X3LcaW31WvB0e5CNGGG1vF6CE0QggRkrb7sAhhNBNCzAKBvAPiFwmfELkUOokCQ/trI+SZy3hBywAJyoYHcw9JArXaFqJpRUe9MLscQDXN5HQd+4NjB0A8DHcPQxDBwTAgDCxAmBl4oE3FINinjW7qheUruOumtjmgPPXTE/I9K/DkKZPOH6srFwZq+QDV/yBX+RJy/ygiclpwKUbfxL5Tu5RrNUavzvQ20eBxaMihHRTJ4p2yDeM9uTHUwRFKOX/TVLwFX5RK20fXeQDcB3im+deMRMSweALGfBbp/JdCj0Xxi3UX48xIMN6wSjNMEYlXuEXvBhXAJagOm+h7Sovj2fTTBaMXr0aSjMwP3fbdluKflMgybVEN3aFmA4sy347ZAoLstMJB1uPGA33JtRE3Xm4Nbbo9Yyou13NJ4VbuxeUnkqveOHouiK7EIzOO6NHh1dE/iQtc89VyFwIPfVK9YQgCJYBqGSnyPidpzqm5QnpmLCWFvqcFMfrm0qlgvvlZQUm8cvaxJrPLpRjy6wLByU9dxRSmKn6CtLFR3Rd5A/t56HS1/9224ovDKXHE/O3qQ/+zG8aWBfiKtPmjxwLR4d0Sn1i3enyVUSJ30srCJCPYcTk5zpHmb8xQ2Vl+AJXtp+WpPYdeKPa5ZUrjJMpoXhhqLbbqvbveMQlQU73sn3ZVN9lX34qr9fZMTCt07XhiBxANhEHtx7PhgpqRqyJN5bmB6ssSCI1O1nDmJ0rVOHdWlqYAkU59uc7zoXEAAOfWR4vq9Q5WqneE0Wq3Q0FJO6hdSz1ynobKxTm0U7dNMs5PYJCjk1KxYKX6WO9IMALcVOzAUyKdrRB5pgTmmuRiyppzTnRhAqo7btoitVVbrMna3xg3Bm2oup+fRvCvEnpZu5QYWiHxS0wEDNR0wkJBYqciaNJ5AUifSWOq/x1LX5OgUOk5Ity8PgO97LQshEng/L0SqvXsMPBwOpvcmBO+LWg2SiZDQMrs4Tl6FQInuz3xnIKeP5iovgLcLo9K4P5DEn8mRmTLEXqzt3hyaQ3qj0faDNPFNmjTmaz+S+icmc+pN7YVAMP6tjfNQrkcjIUzZ5fQL62uAfkH1Z4d+CThJJ4boN1TdsxLBopnY17f7yGaWOT9lP8i+YAb2TVZjYJDkK+bbuekxFp2QmwUomocevnppvQo94v9LcEpCnaOR5dgU/idjk/m9+G9oX71qUYbReBXl30s+Vf6dgXyi2f0WqlFG93szcPcP</diagram></mxfile>
|
||||
<mxfile userAgent="Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/63.0.3239.132 Safari/537.36" version="7.9.5" editor="www.draw.io" type="device"><diagram name="Page-1" id="535f6c39-9b73-04c2-941c-82630de90f1a">5VrLcqM4FP0aLzsFiOcycefRVTPVqfFippdYKLYmMvII4cd8/QiQDEKQ4Bicnmp7Yevqybk691zJnoH55vDI4u36d5ogMnOs5DADX2eOY1vAER+F5VhZ3DCoDCuGE9moNizwv0j1lNYcJyjTGnJKCcdb3QhpmiLINVvMGN3rzV4o0WfdxitkGBYwJqb1T5zwtbTaflRXPCG8WsupQ8evKpYxfF0xmqdyvpkDXspXVb2J1VjyQbN1nNB9wwTuZ2DOKOXVt81hjkiBrYKt6vfQU3taN0MpH9JB+mkXkxypFftEdL17oWIEsUB+lKD4/+RUVXzJSpfdigZitoP4EBUl0KJuL4EpalPKNjGpO4tvq+Lz+0LNI9ZWTVVVSFhOszr7NeZosY1hUd6L7SYarfmGiJJdrAYTMqeEsrK1ABv5EAp7xhl9RY0aq3zJ9S/k+B14SdMOMY4ODZPE7xHRDeLsKJqo2ghUXeRe9yLp2329c1wF9LqxaXzZLpabdXUaunaY+CJ91u0/YPjvW4oLvy2OGUebC9GECVqGyy40gQ8ikJz8NS6AwUAAnREAdA0Av1L4itilyEHkCdJfGznXRM7pQg6MgJxvIPc0N1ArQyEqehTUO5PLIUTdXF6GnutZ42Do+p6OoW9i6FkmhN4IEEYGXigROiSLlPE1XdE0Jve19U5HtIEeOmD+V2G+CTxZ/KGqUrGwqs5TxR9yhL8R50epwHHOqTDVE/9G6VaO0Qt1RnMG5fKlyvOYrRDXtknxYG+6gyESc7zTBfgScFUuMTa6zhvoRiLxaeFbFp4Rw+IBELsS6O5ngR705hPLWuHPSzBsv0gw2gnEIt8itsOZCAlqAqbqnuIs+/a9N8E4mZe9SUe9Dez3w5YRnuZz369SDT2gJR4KE3ecsAU8PWyBjqzDDjvilj2GatrOFNyyG8RSUezELY1XZRgbSqJMMIPfFqcCYYBEbA4MlfkBE7WKQVyz1WmkQbbgs8gGpolwmhd0J7Tkoy62A9xAzIe6EKWJOZgwNobqTPjn80sc64Sfpl0qHjSSKzHKl1vx6ALDIppdJ2LFKHyBYyWresRyOtL8U3DS0nx3jIjlX5kr9o2l5wI3dhhemg8MpFWDLilNkcaVN9NmjRHAZITal9dnhDuJ4kifNZK5kRAe7tC+awqYs92Jzx922Kdpk2veTHzAgRoIvd4832d9InK52zrx/rjrrqE1pqduk4SmmeGvbB1vi69bRiHKsvd1RhelwarzIF6lcleHAMFSy/EDEDnA90InDC0XTJRFd2mSY3umJkUjSJK6vJsypNWltuRcmtTJsNck2Sgn2/FClez6THF50JQuV2ei9rlJjVDRUnZyGjfnZ45TUdkYp9wUp6cZtk9Ck6CQU/OKUvEz35CqAbgrqIChQD5eIvJMM8wxTUWTJeWcbkQDUlTcnX610K7Sy98t6jFuCV4VfTk9j+b1zXv7rl5OMAKRW5d4oOMSD3SklqNcwZs0HkBSK9BY6r7HUtvk6BA6XkXzztTxQYqofkH8KZIZtZgGA/f7vRm9CcHbrHSDZCIkNE8u1smrECjS45lrdZzOgqnuk8DbN+Fyc3/gOHYmRybK5RtaW58Bq0U6vWo7jCauSRO1WydXUre1ZdrRdDwJBP0/01lP+bJXCWHMLqefX7466OcV73HoF4FWOtFFv67r3FEULJiIfc19H4yZZU5P2WHs867BvsFu9AySPGK+npoefeqE7MRDwTT0cNWh9Sr0CH8VcYp8naPBZdrk/xraZP4R4g+0LY5alGHUf4vy/yWfusifgHyiWP/5rXJG/Q9DcP8f</diagram></mxfile>
|
16
setup.py
16
setup.py
@@ -12,7 +12,7 @@ setup(
|
||||
url='https://home-assistant.io/',
|
||||
description=('Open-source private cloud os for Home-Assistant'
|
||||
' based on ResinOS'),
|
||||
long_description=('A maintenainless private cloud operator system that'
|
||||
long_description=('A maintainless private cloud operator system that'
|
||||
'setup a Home-Assistant instance. Based on ResinOS'),
|
||||
classifiers=[
|
||||
'Intended Audience :: End Users/Desktop',
|
||||
@@ -24,12 +24,20 @@ setup(
|
||||
'Topic :: Scientific/Engineering :: Atmospheric Science',
|
||||
'Development Status :: 5 - Production/Stable',
|
||||
'Intended Audience :: Developers',
|
||||
'Programming Language :: Python :: 3.5',
|
||||
'Programming Language :: Python :: 3.6',
|
||||
],
|
||||
keywords=['docker', 'home-assistant', 'api'],
|
||||
zip_safe=False,
|
||||
platforms='any',
|
||||
packages=['hassio', 'hassio.dock', 'hassio.api', 'hassio.addons'],
|
||||
packages=[
|
||||
'hassio',
|
||||
'hassio.docker',
|
||||
'hassio.addons',
|
||||
'hassio.api',
|
||||
'hassio.misc',
|
||||
'hassio.utils',
|
||||
'hassio.snapshots'
|
||||
],
|
||||
include_package_data=True,
|
||||
install_requires=[
|
||||
'async_timeout',
|
||||
@@ -38,5 +46,7 @@ setup(
|
||||
'colorlog',
|
||||
'voluptuous',
|
||||
'gitpython',
|
||||
'pytz',
|
||||
'pyudev'
|
||||
]
|
||||
)
|
||||
|
4
tox.ini
4
tox.ini
@@ -2,8 +2,6 @@
|
||||
envlist = lint
|
||||
|
||||
[testenv]
|
||||
setenv =
|
||||
PYTHONPATH = {toxinidir}:{toxinidir}/hassio
|
||||
deps =
|
||||
flake8
|
||||
pylint
|
||||
@@ -13,4 +11,4 @@ basepython = python3
|
||||
ignore_errors = True
|
||||
commands =
|
||||
flake8 hassio
|
||||
pylint hassio
|
||||
pylint --rcfile pylintrc hassio
|
||||
|
11
version.json
11
version.json
@@ -1,7 +1,8 @@
|
||||
{
|
||||
"hassio": "0.20",
|
||||
"homeassistant": "0.43.2",
|
||||
"resinos": "0.6",
|
||||
"resinhup": "0.1",
|
||||
"generic": "0.3"
|
||||
"hassio": "0.84",
|
||||
"homeassistant": "0.61.1",
|
||||
"resinos": "1.1",
|
||||
"resinhup": "0.3",
|
||||
"generic": "0.3",
|
||||
"cluster": "0.1"
|
||||
}
|
||||
|
Reference in New Issue
Block a user