Add influx 2.0 and InfluxCloud support to InfluxDB integration (#35392)

This commit is contained in:
mdegat01 2020-06-12 15:29:46 -04:00 committed by GitHub
parent bf95658e24
commit f9bc0c9dab
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
6 changed files with 1484 additions and 795 deletions

View File

@ -5,12 +5,17 @@ import queue
import re import re
import threading import threading
import time import time
from typing import Dict
from influxdb import InfluxDBClient, exceptions from influxdb import InfluxDBClient, exceptions
from influxdb_client import InfluxDBClient as InfluxDBClientV2
from influxdb_client.client.write_api import ASYNCHRONOUS, SYNCHRONOUS
from influxdb_client.rest import ApiException
import requests.exceptions import requests.exceptions
import voluptuous as vol import voluptuous as vol
from homeassistant.const import ( from homeassistant.const import (
CONF_API_VERSION,
CONF_DOMAINS, CONF_DOMAINS,
CONF_ENTITIES, CONF_ENTITIES,
CONF_EXCLUDE, CONF_EXCLUDE,
@ -20,6 +25,8 @@ from homeassistant.const import (
CONF_PATH, CONF_PATH,
CONF_PORT, CONF_PORT,
CONF_SSL, CONF_SSL,
CONF_TOKEN,
CONF_URL,
CONF_USERNAME, CONF_USERNAME,
CONF_VERIFY_SSL, CONF_VERIFY_SSL,
EVENT_HOMEASSISTANT_STOP, EVENT_HOMEASSISTANT_STOP,
@ -34,6 +41,8 @@ from homeassistant.helpers.entity_values import EntityValues
_LOGGER = logging.getLogger(__name__) _LOGGER = logging.getLogger(__name__)
CONF_DB_NAME = "database" CONF_DB_NAME = "database"
CONF_BUCKET = "bucket"
CONF_ORG = "organization"
CONF_TAGS = "tags" CONF_TAGS = "tags"
CONF_DEFAULT_MEASUREMENT = "default_measurement" CONF_DEFAULT_MEASUREMENT = "default_measurement"
CONF_OVERRIDE_MEASUREMENT = "override_measurement" CONF_OVERRIDE_MEASUREMENT = "override_measurement"
@ -44,9 +53,14 @@ CONF_COMPONENT_CONFIG_DOMAIN = "component_config_domain"
CONF_RETRY_COUNT = "max_retries" CONF_RETRY_COUNT = "max_retries"
DEFAULT_DATABASE = "home_assistant" DEFAULT_DATABASE = "home_assistant"
DEFAULT_HOST_V2 = "us-west-2-1.aws.cloud2.influxdata.com"
DEFAULT_SSL_V2 = True
DEFAULT_BUCKET = "Home Assistant"
DEFAULT_VERIFY_SSL = True DEFAULT_VERIFY_SSL = True
DOMAIN = "influxdb" DEFAULT_API_VERSION = "1"
DOMAIN = "influxdb"
API_VERSION_2 = "2"
TIMEOUT = 5 TIMEOUT = 5
RETRY_DELAY = 20 RETRY_DELAY = 20
QUEUE_BACKLOG_SECONDS = 30 QUEUE_BACKLOG_SECONDS = 30
@ -55,18 +69,80 @@ RETRY_INTERVAL = 60 # seconds
BATCH_TIMEOUT = 1 BATCH_TIMEOUT = 1
BATCH_BUFFER_SIZE = 100 BATCH_BUFFER_SIZE = 100
COMPONENT_CONFIG_SCHEMA_ENTRY = vol.Schema( DB_CONNECTION_FAILURE_MSG = ()
{vol.Optional(CONF_OVERRIDE_MEASUREMENT): cv.string}
def create_influx_url(conf: Dict) -> Dict:
"""Build URL used from config inputs and default when necessary."""
if conf[CONF_API_VERSION] == API_VERSION_2:
if CONF_SSL not in conf:
conf[CONF_SSL] = DEFAULT_SSL_V2
if CONF_HOST not in conf:
conf[CONF_HOST] = DEFAULT_HOST_V2
url = conf[CONF_HOST]
if conf[CONF_SSL]:
url = f"https://{url}"
else:
url = f"http://{url}"
if CONF_PORT in conf:
url = f"{url}:{conf[CONF_PORT]}"
if CONF_PATH in conf:
url = f"{url}{conf[CONF_PATH]}"
conf[CONF_URL] = url
return conf
def validate_version_specific_config(conf: Dict) -> Dict:
"""Ensure correct config fields are provided based on API version used."""
if conf[CONF_API_VERSION] == API_VERSION_2:
if CONF_TOKEN not in conf:
raise vol.Invalid(
f"{CONF_TOKEN} and {CONF_BUCKET} are required when {CONF_API_VERSION} is {API_VERSION_2}"
) )
CONFIG_SCHEMA = vol.Schema( if CONF_USERNAME in conf:
{ raise vol.Invalid(
DOMAIN: vol.All( f"{CONF_USERNAME} and {CONF_PASSWORD} are only allowed when {CONF_API_VERSION} is {DEFAULT_API_VERSION}"
vol.Schema( )
{
else:
if CONF_TOKEN in conf:
raise vol.Invalid(
f"{CONF_TOKEN} and {CONF_BUCKET} are only allowed when {CONF_API_VERSION} is {API_VERSION_2}"
)
return conf
COMPONENT_CONFIG_SCHEMA_CONNECTION = {
# Connection config for V1 and V2 APIs.
vol.Optional(CONF_API_VERSION, default=DEFAULT_API_VERSION): vol.All(
vol.Coerce(str), vol.In([DEFAULT_API_VERSION, API_VERSION_2]),
),
vol.Optional(CONF_HOST): cv.string, vol.Optional(CONF_HOST): cv.string,
vol.Optional(CONF_PATH): cv.string,
vol.Optional(CONF_PORT): cv.port,
vol.Optional(CONF_SSL): cv.boolean,
# Connection config for V1 API only.
vol.Inclusive(CONF_USERNAME, "authentication"): cv.string, vol.Inclusive(CONF_USERNAME, "authentication"): cv.string,
vol.Inclusive(CONF_PASSWORD, "authentication"): cv.string, vol.Inclusive(CONF_PASSWORD, "authentication"): cv.string,
vol.Optional(CONF_DB_NAME, default=DEFAULT_DATABASE): cv.string,
vol.Optional(CONF_VERIFY_SSL, default=DEFAULT_VERIFY_SSL): cv.boolean,
# Connection config for V2 API only.
vol.Inclusive(CONF_TOKEN, "v2_authentication"): cv.string,
vol.Inclusive(CONF_ORG, "v2_authentication"): cv.string,
vol.Optional(CONF_BUCKET, default=DEFAULT_BUCKET): cv.string,
}
_CONFIG_SCHEMA_ENTRY = vol.Schema({vol.Optional(CONF_OVERRIDE_MEASUREMENT): cv.string})
_CONFIG_SCHEMA = vol.Schema(
{
vol.Optional(CONF_EXCLUDE, default={}): vol.Schema( vol.Optional(CONF_EXCLUDE, default={}): vol.Schema(
{ {
vol.Optional(CONF_ENTITIES, default=[]): cv.entity_ids, vol.Optional(CONF_ENTITIES, default=[]): cv.entity_ids,
@ -83,34 +159,32 @@ CONFIG_SCHEMA = vol.Schema(
), ),
} }
), ),
vol.Optional(CONF_DB_NAME, default=DEFAULT_DATABASE): cv.string,
vol.Optional(CONF_PATH): cv.string,
vol.Optional(CONF_PORT): cv.port,
vol.Optional(CONF_SSL): cv.boolean,
vol.Optional(CONF_RETRY_COUNT, default=0): cv.positive_int, vol.Optional(CONF_RETRY_COUNT, default=0): cv.positive_int,
vol.Optional(CONF_DEFAULT_MEASUREMENT): cv.string, vol.Optional(CONF_DEFAULT_MEASUREMENT): cv.string,
vol.Optional(CONF_OVERRIDE_MEASUREMENT): cv.string, vol.Optional(CONF_OVERRIDE_MEASUREMENT): cv.string,
vol.Optional(CONF_TAGS, default={}): vol.Schema( vol.Optional(CONF_TAGS, default={}): vol.Schema({cv.string: cv.string}),
{cv.string: cv.string}
),
vol.Optional(CONF_TAGS_ATTRIBUTES, default=[]): vol.All( vol.Optional(CONF_TAGS_ATTRIBUTES, default=[]): vol.All(
cv.ensure_list, [cv.string] cv.ensure_list, [cv.string]
), ),
vol.Optional(
CONF_VERIFY_SSL, default=DEFAULT_VERIFY_SSL
): cv.boolean,
vol.Optional(CONF_COMPONENT_CONFIG, default={}): vol.Schema( vol.Optional(CONF_COMPONENT_CONFIG, default={}): vol.Schema(
{cv.entity_id: COMPONENT_CONFIG_SCHEMA_ENTRY} {cv.entity_id: _CONFIG_SCHEMA_ENTRY}
), ),
vol.Optional(CONF_COMPONENT_CONFIG_GLOB, default={}): vol.Schema( vol.Optional(CONF_COMPONENT_CONFIG_GLOB, default={}): vol.Schema(
{cv.string: COMPONENT_CONFIG_SCHEMA_ENTRY} {cv.string: _CONFIG_SCHEMA_ENTRY}
), ),
vol.Optional(CONF_COMPONENT_CONFIG_DOMAIN, default={}): vol.Schema( vol.Optional(CONF_COMPONENT_CONFIG_DOMAIN, default={}): vol.Schema(
{cv.string: COMPONENT_CONFIG_SCHEMA_ENTRY} {cv.string: _CONFIG_SCHEMA_ENTRY}
), ),
} }
) )
)
CONFIG_SCHEMA = vol.Schema(
{
DOMAIN: vol.All(
_CONFIG_SCHEMA.extend(COMPONENT_CONFIG_SCHEMA_CONNECTION),
validate_version_specific_config,
create_influx_url,
),
}, },
extra=vol.ALLOW_EXTRA, extra=vol.ALLOW_EXTRA,
) )
@ -119,17 +193,54 @@ RE_DIGIT_TAIL = re.compile(r"^[^\.]*\d+\.?\d+[^\.]*$")
RE_DECIMAL = re.compile(r"[^\d.]+") RE_DECIMAL = re.compile(r"[^\d.]+")
def get_influx_connection(client_kwargs, bucket):
"""Create and check the correct influx connection for the API version."""
if bucket is not None:
# Test connection by synchronously writing nothing.
# If config is valid this will generate a `Bad Request` exception but not make anything.
# If config is invalid we will output an error.
# Hopefully a better way to test connection is added in the future.
try:
influx = InfluxDBClientV2(**client_kwargs)
influx.write_api(write_options=SYNCHRONOUS).write(bucket=bucket)
except ApiException as exc:
# 400 is the success state since it means we can write we just gave a bad point.
if exc.status != 400:
raise exc
else:
influx = InfluxDBClient(**client_kwargs)
influx.write_points([])
return influx
def setup(hass, config): def setup(hass, config):
"""Set up the InfluxDB component.""" """Set up the InfluxDB component."""
conf = config[DOMAIN] conf = config[DOMAIN]
use_v2_api = conf[CONF_API_VERSION] == API_VERSION_2
bucket = None
kwargs = { kwargs = {
"database": conf[CONF_DB_NAME],
"verify_ssl": conf[CONF_VERIFY_SSL],
"timeout": TIMEOUT, "timeout": TIMEOUT,
} }
if use_v2_api:
kwargs["url"] = conf[CONF_URL]
kwargs["token"] = conf[CONF_TOKEN]
kwargs["org"] = conf[CONF_ORG]
bucket = conf[CONF_BUCKET]
else:
kwargs["database"] = conf[CONF_DB_NAME]
kwargs["verify_ssl"] = conf[CONF_VERIFY_SSL]
if CONF_USERNAME in conf:
kwargs["username"] = conf[CONF_USERNAME]
if CONF_PASSWORD in conf:
kwargs["password"] = conf[CONF_PASSWORD]
if CONF_HOST in conf: if CONF_HOST in conf:
kwargs["host"] = conf[CONF_HOST] kwargs["host"] = conf[CONF_HOST]
@ -139,12 +250,6 @@ def setup(hass, config):
if CONF_PORT in conf: if CONF_PORT in conf:
kwargs["port"] = conf[CONF_PORT] kwargs["port"] = conf[CONF_PORT]
if CONF_USERNAME in conf:
kwargs["username"] = conf[CONF_USERNAME]
if CONF_PASSWORD in conf:
kwargs["password"] = conf[CONF_PASSWORD]
if CONF_SSL in conf: if CONF_SSL in conf:
kwargs["ssl"] = conf[CONF_SSL] kwargs["ssl"] = conf[CONF_SSL]
@ -166,10 +271,11 @@ def setup(hass, config):
max_tries = conf.get(CONF_RETRY_COUNT) max_tries = conf.get(CONF_RETRY_COUNT)
try: try:
influx = InfluxDBClient(**kwargs) influx = get_influx_connection(kwargs, bucket)
influx.write_points([]) if use_v2_api:
write_api = influx.write_api(write_options=ASYNCHRONOUS)
except (exceptions.InfluxDBClientError, requests.exceptions.ConnectionError) as exc: except (exceptions.InfluxDBClientError, requests.exceptions.ConnectionError) as exc:
_LOGGER.warning( _LOGGER.error(
"Database host is not accessible due to '%s', please " "Database host is not accessible due to '%s', please "
"check your entries in the configuration file (host, " "check your entries in the configuration file (host, "
"port, etc.) and verify that the database exists and is " "port, etc.) and verify that the database exists and is "
@ -179,6 +285,17 @@ def setup(hass, config):
) )
event_helper.call_later(hass, RETRY_INTERVAL, lambda _: setup(hass, config)) event_helper.call_later(hass, RETRY_INTERVAL, lambda _: setup(hass, config))
return True return True
except ApiException as exc:
_LOGGER.error(
"Bucket is not accessible due to '%s', please "
"check your entries in the configuration file (url, org, "
"bucket, etc.) and verify that the org and bucket exist and the "
"provided token has WRITE access. Retrying again in %s seconds.",
exc,
RETRY_INTERVAL,
)
event_helper.call_later(hass, RETRY_INTERVAL, lambda _: setup(hass, config))
return True
def event_to_json(event): def event_to_json(event):
"""Add an event to the outgoing Influx list.""" """Add an event to the outgoing Influx list."""
@ -270,7 +387,15 @@ def setup(hass, config):
return json return json
instance = hass.data[DOMAIN] = InfluxThread(hass, influx, event_to_json, max_tries) if use_v2_api:
instance = hass.data[DOMAIN] = InfluxThread(
hass, None, bucket, write_api, event_to_json, max_tries
)
else:
instance = hass.data[DOMAIN] = InfluxThread(
hass, influx, None, None, event_to_json, max_tries
)
instance.start() instance.start()
def shutdown(event): def shutdown(event):
@ -287,11 +412,13 @@ def setup(hass, config):
class InfluxThread(threading.Thread): class InfluxThread(threading.Thread):
"""A threaded event handler class.""" """A threaded event handler class."""
def __init__(self, hass, influx, event_to_json, max_tries): def __init__(self, hass, influx, bucket, write_api, event_to_json, max_tries):
"""Initialize the listener.""" """Initialize the listener."""
threading.Thread.__init__(self, name="InfluxDB") threading.Thread.__init__(self, name="InfluxDB")
self.queue = queue.Queue() self.queue = queue.Queue()
self.influx = influx self.influx = influx
self.bucket = bucket
self.write_api = write_api
self.event_to_json = event_to_json self.event_to_json = event_to_json
self.max_tries = max_tries self.max_tries = max_tries
self.write_errors = 0 self.write_errors = 0
@ -346,9 +473,11 @@ class InfluxThread(threading.Thread):
def write_to_influxdb(self, json): def write_to_influxdb(self, json):
"""Write preprocessed events to influxdb, with retry.""" """Write preprocessed events to influxdb, with retry."""
for retry in range(self.max_tries + 1): for retry in range(self.max_tries + 1):
try: try:
if self.write_api is not None:
self.write_api.write(bucket=self.bucket, record=json)
else:
self.influx.write_points(json) self.influx.write_points(json)
if self.write_errors: if self.write_errors:
@ -361,6 +490,7 @@ class InfluxThread(threading.Thread):
exceptions.InfluxDBClientError, exceptions.InfluxDBClientError,
exceptions.InfluxDBServerError, exceptions.InfluxDBServerError,
OSError, OSError,
ApiException,
) as err: ) as err:
if retry < self.max_tries: if retry < self.max_tries:
time.sleep(RETRY_DELAY) time.sleep(RETRY_DELAY)

View File

@ -2,6 +2,6 @@
"domain": "influxdb", "domain": "influxdb",
"name": "InfluxDB", "name": "InfluxDB",
"documentation": "https://www.home-assistant.io/integrations/influxdb", "documentation": "https://www.home-assistant.io/integrations/influxdb",
"requirements": ["influxdb==5.2.3"], "requirements": ["influxdb==5.2.3", "influxdb-client==1.6.0"],
"codeowners": ["@fabaff"] "codeowners": ["@fabaff"]
} }

View File

@ -1,18 +1,25 @@
"""InfluxDB component which allows you to get data from an Influx database.""" """InfluxDB component which allows you to get data from an Influx database."""
from datetime import timedelta from datetime import timedelta
import logging import logging
from typing import Dict
from influxdb import InfluxDBClient, exceptions from influxdb import InfluxDBClient, exceptions
from influxdb_client import InfluxDBClient as InfluxDBClientV2
from influxdb_client.rest import ApiException
import voluptuous as vol import voluptuous as vol
from homeassistant.components.sensor import PLATFORM_SCHEMA from homeassistant.components.sensor import PLATFORM_SCHEMA
from homeassistant.const import ( from homeassistant.const import (
CONF_API_VERSION,
CONF_HOST, CONF_HOST,
CONF_NAME, CONF_NAME,
CONF_PASSWORD, CONF_PASSWORD,
CONF_PATH,
CONF_PORT, CONF_PORT,
CONF_SSL, CONF_SSL,
CONF_TOKEN,
CONF_UNIT_OF_MEASUREMENT, CONF_UNIT_OF_MEASUREMENT,
CONF_URL,
CONF_USERNAME, CONF_USERNAME,
CONF_VALUE_TEMPLATE, CONF_VALUE_TEMPLATE,
CONF_VERIFY_SSL, CONF_VERIFY_SSL,
@ -23,79 +30,161 @@ import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.entity import Entity from homeassistant.helpers.entity import Entity
from homeassistant.util import Throttle from homeassistant.util import Throttle
from . import CONF_DB_NAME from . import (
API_VERSION_2,
COMPONENT_CONFIG_SCHEMA_CONNECTION,
CONF_BUCKET,
CONF_DB_NAME,
CONF_ORG,
DEFAULT_API_VERSION,
create_influx_url,
validate_version_specific_config,
)
_LOGGER = logging.getLogger(__name__) _LOGGER = logging.getLogger(__name__)
DEFAULT_HOST = "localhost"
DEFAULT_PORT = 8086
DEFAULT_DATABASE = "home_assistant"
DEFAULT_SSL = False
DEFAULT_VERIFY_SSL = False
DEFAULT_GROUP_FUNCTION = "mean" DEFAULT_GROUP_FUNCTION = "mean"
DEFAULT_FIELD = "value" DEFAULT_FIELD = "value"
CONF_QUERIES = "queries" CONF_QUERIES = "queries"
CONF_QUERIES_FLUX = "queries_flux"
CONF_GROUP_FUNCTION = "group_function" CONF_GROUP_FUNCTION = "group_function"
CONF_FIELD = "field" CONF_FIELD = "field"
CONF_MEASUREMENT_NAME = "measurement" CONF_MEASUREMENT_NAME = "measurement"
CONF_WHERE = "where" CONF_WHERE = "where"
CONF_RANGE_START = "range_start"
CONF_RANGE_STOP = "range_stop"
CONF_FUNCTION = "function"
CONF_QUERY = "query"
CONF_IMPORTS = "imports"
DEFAULT_RANGE_START = "-15m"
DEFAULT_RANGE_STOP = "now()"
MIN_TIME_BETWEEN_UPDATES = timedelta(seconds=60) MIN_TIME_BETWEEN_UPDATES = timedelta(seconds=60)
_QUERY_SCHEME = vol.Schema( _QUERY_SENSOR_SCHEMA = vol.Schema(
{ {
vol.Required(CONF_NAME): cv.string, vol.Required(CONF_NAME): cv.string,
vol.Required(CONF_MEASUREMENT_NAME): cv.string,
vol.Required(CONF_WHERE): cv.template,
vol.Optional(CONF_UNIT_OF_MEASUREMENT): cv.string,
vol.Optional(CONF_VALUE_TEMPLATE): cv.template, vol.Optional(CONF_VALUE_TEMPLATE): cv.template,
vol.Optional(CONF_DB_NAME, default=DEFAULT_DATABASE): cv.string, vol.Optional(CONF_UNIT_OF_MEASUREMENT): cv.string,
vol.Optional(CONF_GROUP_FUNCTION, default=DEFAULT_GROUP_FUNCTION): cv.string,
vol.Optional(CONF_FIELD, default=DEFAULT_FIELD): cv.string,
} }
) )
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend( _QUERY_SCHEMA = {
"InfluxQL": _QUERY_SENSOR_SCHEMA.extend(
{ {
vol.Required(CONF_QUERIES): [_QUERY_SCHEME], vol.Optional(CONF_DB_NAME): cv.string,
vol.Optional(CONF_HOST, default=DEFAULT_HOST): cv.string, vol.Required(CONF_MEASUREMENT_NAME): cv.string,
vol.Optional(CONF_PORT, default=DEFAULT_PORT): cv.port, vol.Optional(
vol.Inclusive(CONF_USERNAME, "authentication"): cv.string, CONF_GROUP_FUNCTION, default=DEFAULT_GROUP_FUNCTION
vol.Inclusive(CONF_PASSWORD, "authentication"): cv.string, ): cv.string,
vol.Optional(CONF_SSL, default=DEFAULT_SSL): cv.boolean, vol.Optional(CONF_FIELD, default=DEFAULT_FIELD): cv.string,
vol.Optional(CONF_VERIFY_SSL, default=DEFAULT_VERIFY_SSL): cv.boolean, vol.Required(CONF_WHERE): cv.template,
} }
),
"Flux": _QUERY_SENSOR_SCHEMA.extend(
{
vol.Optional(CONF_BUCKET): cv.string,
vol.Optional(CONF_RANGE_START, default=DEFAULT_RANGE_START): cv.string,
vol.Optional(CONF_RANGE_STOP, default=DEFAULT_RANGE_STOP): cv.string,
vol.Required(CONF_QUERY): cv.template,
vol.Optional(CONF_IMPORTS): vol.All(cv.ensure_list, [cv.string]),
vol.Optional(CONF_GROUP_FUNCTION): cv.string,
}
),
}
def validate_query_format_for_version(conf: Dict) -> Dict:
"""Ensure queries are provided in correct format based on API version."""
if conf[CONF_API_VERSION] == API_VERSION_2:
if CONF_QUERIES_FLUX not in conf:
raise vol.Invalid(
f"{CONF_QUERIES_FLUX} is required when {CONF_API_VERSION} is {API_VERSION_2}"
)
else:
if CONF_QUERIES not in conf:
raise vol.Invalid(
f"{CONF_QUERIES} is required when {CONF_API_VERSION} is {DEFAULT_API_VERSION}"
)
return conf
PLATFORM_SCHEMA = vol.All(
PLATFORM_SCHEMA.extend(COMPONENT_CONFIG_SCHEMA_CONNECTION).extend(
{
vol.Exclusive(CONF_QUERIES, "queries"): [_QUERY_SCHEMA["InfluxQL"]],
vol.Exclusive(CONF_QUERIES_FLUX, "queries"): [_QUERY_SCHEMA["Flux"]],
}
),
validate_version_specific_config,
validate_query_format_for_version,
create_influx_url,
) )
def setup_platform(hass, config, add_entities, discovery_info=None): def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the InfluxDB component.""" """Set up the InfluxDB component."""
use_v2_api = config[CONF_API_VERSION] == API_VERSION_2
queries = None
if use_v2_api:
influx_conf = { influx_conf = {
"host": config[CONF_HOST], "url": config[CONF_URL],
"password": config.get(CONF_PASSWORD), "token": config[CONF_TOKEN],
"port": config.get(CONF_PORT), "org": config[CONF_ORG],
"ssl": config[CONF_SSL], }
"username": config.get(CONF_USERNAME), bucket = config[CONF_BUCKET]
"verify_ssl": config.get(CONF_VERIFY_SSL), queries = config[CONF_QUERIES_FLUX]
for v2_query in queries:
if CONF_BUCKET not in v2_query:
v2_query[CONF_BUCKET] = bucket
else:
influx_conf = {
"database": config[CONF_DB_NAME],
"verify_ssl": config[CONF_VERIFY_SSL],
} }
dev = [] if CONF_USERNAME in config:
influx_conf["username"] = config[CONF_USERNAME]
for query in config.get(CONF_QUERIES): if CONF_PASSWORD in config:
sensor = InfluxSensor(hass, influx_conf, query) influx_conf["password"] = config[CONF_PASSWORD]
if CONF_HOST in config:
influx_conf["host"] = config[CONF_HOST]
if CONF_PATH in config:
influx_conf["path"] = config[CONF_PATH]
if CONF_PORT in config:
influx_conf["port"] = config[CONF_PORT]
if CONF_SSL in config:
influx_conf["ssl"] = config[CONF_SSL]
queries = config[CONF_QUERIES]
entities = []
for query in queries:
sensor = InfluxSensor(hass, influx_conf, query, use_v2_api)
if sensor.connected: if sensor.connected:
dev.append(sensor) entities.append(sensor)
add_entities(dev, True) add_entities(entities, True)
class InfluxSensor(Entity): class InfluxSensor(Entity):
"""Implementation of a Influxdb sensor.""" """Implementation of a Influxdb sensor."""
def __init__(self, hass, influx_conf, query): def __init__(self, hass, influx_conf, query, use_v2_api):
"""Initialize the sensor.""" """Initialize the sensor."""
self._name = query.get(CONF_NAME) self._name = query.get(CONF_NAME)
self._unit_of_measurement = query.get(CONF_UNIT_OF_MEASUREMENT) self._unit_of_measurement = query.get(CONF_UNIT_OF_MEASUREMENT)
value_template = query.get(CONF_VALUE_TEMPLATE) value_template = query.get(CONF_VALUE_TEMPLATE)
@ -104,26 +193,48 @@ class InfluxSensor(Entity):
self._value_template.hass = hass self._value_template.hass = hass
else: else:
self._value_template = None self._value_template = None
database = query.get(CONF_DB_NAME)
self._state = None self._state = None
self._hass = hass self._hass = hass
if use_v2_api:
influx = InfluxDBClientV2(**influx_conf)
query_api = influx.query_api()
query_clause = query.get(CONF_QUERY)
query_clause.hass = hass
bucket = query[CONF_BUCKET]
else:
if CONF_DB_NAME in query:
kwargs = influx_conf.copy()
kwargs[CONF_DB_NAME] = query[CONF_DB_NAME]
else:
kwargs = influx_conf
influx = InfluxDBClient(**kwargs)
where_clause = query.get(CONF_WHERE) where_clause = query.get(CONF_WHERE)
where_clause.hass = hass where_clause.hass = hass
query_api = None
influx = InfluxDBClient(
host=influx_conf["host"],
port=influx_conf["port"],
username=influx_conf["username"],
password=influx_conf["password"],
database=database,
ssl=influx_conf["ssl"],
verify_ssl=influx_conf["verify_ssl"],
)
try: try:
if query_api is not None:
query_api.query(
f'from(bucket: "{bucket}") |> range(start: -1ms) |> keep(columns: ["_time"]) |> limit(n: 1)'
)
self.connected = True
self.data = InfluxSensorDataV2(
query_api,
bucket,
query.get(CONF_RANGE_START),
query.get(CONF_RANGE_STOP),
query_clause,
query.get(CONF_IMPORTS),
query.get(CONF_GROUP_FUNCTION),
)
else:
influx.query("SHOW SERIES LIMIT 1;") influx.query("SHOW SERIES LIMIT 1;")
self.connected = True self.connected = True
self.data = InfluxSensorData( self.data = InfluxSensorDataV1(
influx, influx,
query.get(CONF_GROUP_FUNCTION), query.get(CONF_GROUP_FUNCTION),
query.get(CONF_FIELD), query.get(CONF_FIELD),
@ -138,6 +249,15 @@ class InfluxSensor(Entity):
exc, exc,
) )
self.connected = False self.connected = False
except ApiException as exc:
_LOGGER.error(
"Bucket is not accessible due to '%s', please "
"check your entries in the configuration file (url, org, "
"bucket, etc.) and verify that the org and bucket exist and the "
"provided token has READ access.",
exc,
)
self.connected = False
@property @property
def name(self): def name(self):
@ -173,8 +293,76 @@ class InfluxSensor(Entity):
self._state = value self._state = value
class InfluxSensorData: class InfluxSensorDataV2:
"""Class for handling the data retrieval.""" """Class for handling the data retrieval with v2 API."""
def __init__(
self, query_api, bucket, range_start, range_stop, query, imports, group
):
"""Initialize the data object."""
self.query_api = query_api
self.bucket = bucket
self.range_start = range_start
self.range_stop = range_stop
self.query = query
self.imports = imports
self.group = group
self.value = None
self.full_query = None
self.query_prefix = f'from(bucket:"{bucket}") |> range(start: {range_start}, stop: {range_stop}) |>'
if imports is not None:
for i in imports:
self.query_prefix = f'import "{i}" {self.query_prefix}'
if group is None:
self.query_postfix = "|> limit(n: 1)"
else:
self.query_postfix = f'|> {group}(column: "_value")'
@Throttle(MIN_TIME_BETWEEN_UPDATES)
def update(self):
"""Get the latest data by querying influx."""
_LOGGER.debug("Rendering query: %s", self.query)
try:
rendered_query = self.query.render()
except TemplateError as ex:
_LOGGER.error("Could not render query template: %s", ex)
return
self.full_query = f"{self.query_prefix} {rendered_query} {self.query_postfix}"
_LOGGER.info("Running query: %s", self.full_query)
try:
tables = self.query_api.query(self.full_query)
except ApiException as exc:
_LOGGER.error(
"Could not execute query '%s' due to '%s', "
"Check the syntax of your query",
self.full_query,
exc,
)
self.value = None
return
if not tables:
_LOGGER.warning(
"Query returned no results, sensor state set to UNKNOWN: %s",
self.full_query,
)
self.value = None
else:
if len(tables) > 1:
_LOGGER.warning(
"Query returned multiple tables, only value from first one is shown: %s",
self.full_query,
)
self.value = tables[0].records[0].values["_value"]
class InfluxSensorDataV1:
"""Class for handling the data retrieval with v1 API."""
def __init__(self, influx, group, field, measurement, where): def __init__(self, influx, group, field, measurement, where):
"""Initialize the data object.""" """Initialize the data object."""
@ -200,7 +388,18 @@ class InfluxSensorData:
_LOGGER.info("Running query: %s", self.query) _LOGGER.info("Running query: %s", self.query)
try:
points = list(self.influx.query(self.query).get_points()) points = list(self.influx.query(self.query).get_points())
except exceptions.InfluxDBClientError as exc:
_LOGGER.error(
"Could not execute query '%s' due to '%s', "
"Check the syntax of your query",
self.query,
exc,
)
self.value = None
return
if not points: if not points:
_LOGGER.warning( _LOGGER.warning(
"Query returned no points, sensor state set to UNKNOWN: %s", self.query "Query returned no points, sensor state set to UNKNOWN: %s", self.query

View File

@ -784,6 +784,9 @@ ihcsdk==2.7.0
# homeassistant.components.incomfort # homeassistant.components.incomfort
incomfort-client==0.4.0 incomfort-client==0.4.0
# homeassistant.components.influxdb
influxdb-client==1.6.0
# homeassistant.components.influxdb # homeassistant.components.influxdb
influxdb==5.2.3 influxdb==5.2.3

View File

@ -345,6 +345,9 @@ huawei-lte-api==1.4.12
# homeassistant.components.iaqualink # homeassistant.components.iaqualink
iaqualink==0.3.4 iaqualink==0.3.4
# homeassistant.components.influxdb
influxdb-client==1.6.0
# homeassistant.components.influxdb # homeassistant.components.influxdb
influxdb==5.2.3 influxdb==5.2.3

File diff suppressed because it is too large Load Diff