2019-02-14 05:35:12 +01:00
|
|
|
"""Support for sending data to an Influx database."""
|
2015-11-21 19:01:47 +01:00
|
|
|
import logging
|
2019-12-06 13:05:35 +01:00
|
|
|
import math
|
2018-02-08 12:25:26 +01:00
|
|
|
import queue
|
|
|
|
import threading
|
|
|
|
import time
|
2020-06-12 15:29:46 -04:00
|
|
|
from typing import Dict
|
Make percentage string values as floats/ints in InfluxDB (#7879)
* Make percentage string values as floats in InfluxDB
Currently Z-wave and other compontents report an attributes battery
level as an integer, for example
```yaml
{
"is_awake": false,
"battery_level": 61,
}
```
However, some other components like Vera add the battery level as a
string
```yaml
{
"Vera Device Id": 25,
"device_armed": "False",
"battery_level": "63%",
"device_tripped": "False",
}
```
By removing any % signs in the field, this will send the value to
InfluxDB as an int, which can then be used to plot the data in graphs
correctly, like other percentage fields.
* Add tests and remove all trailing non digits
Adds tests and now removes all trailing non-numeric characters for
better use
* Update variable name for InfluxDB digit checks
Updates the variable used for the regex to remove trailing non digits
* Fix linting errors for InfluxDB component
Fixes a small linting error on the InfluxDB component
2017-06-14 08:42:55 +10:00
|
|
|
|
2019-12-06 13:05:35 +01:00
|
|
|
from influxdb import InfluxDBClient, exceptions
|
2020-06-12 15:29:46 -04:00
|
|
|
from influxdb_client import InfluxDBClient as InfluxDBClientV2
|
|
|
|
from influxdb_client.client.write_api import ASYNCHRONOUS, SYNCHRONOUS
|
|
|
|
from influxdb_client.rest import ApiException
|
2017-11-09 20:17:01 +01:00
|
|
|
import requests.exceptions
|
2020-06-26 18:01:32 -04:00
|
|
|
import urllib3.exceptions
|
2016-09-18 15:32:18 -07:00
|
|
|
import voluptuous as vol
|
|
|
|
|
|
|
|
from homeassistant.const import (
|
2020-06-12 15:29:46 -04:00
|
|
|
CONF_URL,
|
2019-07-31 12:25:30 -07:00
|
|
|
EVENT_HOMEASSISTANT_STOP,
|
2019-12-06 13:05:35 +01:00
|
|
|
EVENT_STATE_CHANGED,
|
2019-07-31 12:25:30 -07:00
|
|
|
STATE_UNAVAILABLE,
|
|
|
|
STATE_UNKNOWN,
|
|
|
|
)
|
2019-12-06 13:05:35 +01:00
|
|
|
from homeassistant.helpers import event as event_helper, state as state_helper
|
2018-01-21 07:35:38 +01:00
|
|
|
import homeassistant.helpers.config_validation as cv
|
2017-08-03 10:26:01 -04:00
|
|
|
from homeassistant.helpers.entity_values import EntityValues
|
2020-06-26 18:01:32 -04:00
|
|
|
from homeassistant.helpers.entityfilter import (
|
|
|
|
INCLUDE_EXCLUDE_BASE_FILTER_SCHEMA,
|
|
|
|
convert_include_exclude_filter,
|
|
|
|
)
|
2016-09-18 15:32:18 -07:00
|
|
|
|
2020-06-29 11:31:49 -04:00
|
|
|
from .const import (
|
|
|
|
API_VERSION_2,
|
|
|
|
BATCH_BUFFER_SIZE,
|
|
|
|
BATCH_TIMEOUT,
|
|
|
|
CLIENT_ERROR_V1_WITH_RETRY,
|
|
|
|
CLIENT_ERROR_V2_WITH_RETRY,
|
|
|
|
COMPONENT_CONFIG_SCHEMA_CONNECTION,
|
|
|
|
CONF_API_VERSION,
|
|
|
|
CONF_BUCKET,
|
|
|
|
CONF_COMPONENT_CONFIG,
|
|
|
|
CONF_COMPONENT_CONFIG_DOMAIN,
|
|
|
|
CONF_COMPONENT_CONFIG_GLOB,
|
|
|
|
CONF_DB_NAME,
|
|
|
|
CONF_DEFAULT_MEASUREMENT,
|
|
|
|
CONF_HOST,
|
|
|
|
CONF_ORG,
|
|
|
|
CONF_OVERRIDE_MEASUREMENT,
|
|
|
|
CONF_PASSWORD,
|
|
|
|
CONF_PATH,
|
|
|
|
CONF_PORT,
|
|
|
|
CONF_RETRY_COUNT,
|
|
|
|
CONF_SSL,
|
|
|
|
CONF_TAGS,
|
|
|
|
CONF_TAGS_ATTRIBUTES,
|
|
|
|
CONF_TOKEN,
|
|
|
|
CONF_USERNAME,
|
|
|
|
CONF_VERIFY_SSL,
|
|
|
|
CONNECTION_ERROR_WITH_RETRY,
|
|
|
|
DEFAULT_API_VERSION,
|
|
|
|
DEFAULT_HOST_V2,
|
|
|
|
DEFAULT_SSL_V2,
|
|
|
|
DOMAIN,
|
|
|
|
QUEUE_BACKLOG_SECONDS,
|
|
|
|
RE_DECIMAL,
|
|
|
|
RE_DIGIT_TAIL,
|
|
|
|
RETRY_DELAY,
|
|
|
|
RETRY_INTERVAL,
|
|
|
|
TIMEOUT,
|
|
|
|
WRITE_ERROR,
|
|
|
|
)
|
2015-11-21 19:01:47 +01:00
|
|
|
|
2020-06-29 11:31:49 -04:00
|
|
|
_LOGGER = logging.getLogger(__name__)
|
2020-06-12 15:29:46 -04:00
|
|
|
|
|
|
|
|
|
|
|
def create_influx_url(conf: Dict) -> Dict:
|
|
|
|
"""Build URL used from config inputs and default when necessary."""
|
|
|
|
if conf[CONF_API_VERSION] == API_VERSION_2:
|
|
|
|
if CONF_SSL not in conf:
|
|
|
|
conf[CONF_SSL] = DEFAULT_SSL_V2
|
|
|
|
if CONF_HOST not in conf:
|
|
|
|
conf[CONF_HOST] = DEFAULT_HOST_V2
|
|
|
|
|
|
|
|
url = conf[CONF_HOST]
|
|
|
|
if conf[CONF_SSL]:
|
|
|
|
url = f"https://{url}"
|
|
|
|
else:
|
|
|
|
url = f"http://{url}"
|
|
|
|
|
|
|
|
if CONF_PORT in conf:
|
|
|
|
url = f"{url}:{conf[CONF_PORT]}"
|
|
|
|
|
|
|
|
if CONF_PATH in conf:
|
|
|
|
url = f"{url}{conf[CONF_PATH]}"
|
|
|
|
|
|
|
|
conf[CONF_URL] = url
|
|
|
|
|
|
|
|
return conf
|
|
|
|
|
|
|
|
|
|
|
|
def validate_version_specific_config(conf: Dict) -> Dict:
|
|
|
|
"""Ensure correct config fields are provided based on API version used."""
|
|
|
|
if conf[CONF_API_VERSION] == API_VERSION_2:
|
|
|
|
if CONF_TOKEN not in conf:
|
|
|
|
raise vol.Invalid(
|
|
|
|
f"{CONF_TOKEN} and {CONF_BUCKET} are required when {CONF_API_VERSION} is {API_VERSION_2}"
|
|
|
|
)
|
|
|
|
|
|
|
|
if CONF_USERNAME in conf:
|
|
|
|
raise vol.Invalid(
|
|
|
|
f"{CONF_USERNAME} and {CONF_PASSWORD} are only allowed when {CONF_API_VERSION} is {DEFAULT_API_VERSION}"
|
|
|
|
)
|
|
|
|
|
|
|
|
else:
|
|
|
|
if CONF_TOKEN in conf:
|
|
|
|
raise vol.Invalid(
|
|
|
|
f"{CONF_TOKEN} and {CONF_BUCKET} are only allowed when {CONF_API_VERSION} is {API_VERSION_2}"
|
|
|
|
)
|
|
|
|
|
|
|
|
return conf
|
|
|
|
|
|
|
|
|
|
|
|
_CONFIG_SCHEMA_ENTRY = vol.Schema({vol.Optional(CONF_OVERRIDE_MEASUREMENT): cv.string})
|
|
|
|
|
2020-06-26 18:01:32 -04:00
|
|
|
_CONFIG_SCHEMA = INCLUDE_EXCLUDE_BASE_FILTER_SCHEMA.extend(
|
2020-06-12 15:29:46 -04:00
|
|
|
{
|
|
|
|
vol.Optional(CONF_RETRY_COUNT, default=0): cv.positive_int,
|
|
|
|
vol.Optional(CONF_DEFAULT_MEASUREMENT): cv.string,
|
|
|
|
vol.Optional(CONF_OVERRIDE_MEASUREMENT): cv.string,
|
|
|
|
vol.Optional(CONF_TAGS, default={}): vol.Schema({cv.string: cv.string}),
|
|
|
|
vol.Optional(CONF_TAGS_ATTRIBUTES, default=[]): vol.All(
|
|
|
|
cv.ensure_list, [cv.string]
|
|
|
|
),
|
|
|
|
vol.Optional(CONF_COMPONENT_CONFIG, default={}): vol.Schema(
|
|
|
|
{cv.entity_id: _CONFIG_SCHEMA_ENTRY}
|
|
|
|
),
|
|
|
|
vol.Optional(CONF_COMPONENT_CONFIG_GLOB, default={}): vol.Schema(
|
|
|
|
{cv.string: _CONFIG_SCHEMA_ENTRY}
|
|
|
|
),
|
|
|
|
vol.Optional(CONF_COMPONENT_CONFIG_DOMAIN, default={}): vol.Schema(
|
|
|
|
{cv.string: _CONFIG_SCHEMA_ENTRY}
|
|
|
|
),
|
|
|
|
}
|
2019-07-31 12:25:30 -07:00
|
|
|
)
|
|
|
|
|
|
|
|
CONFIG_SCHEMA = vol.Schema(
|
|
|
|
{
|
|
|
|
DOMAIN: vol.All(
|
2020-06-12 15:29:46 -04:00
|
|
|
_CONFIG_SCHEMA.extend(COMPONENT_CONFIG_SCHEMA_CONNECTION),
|
|
|
|
validate_version_specific_config,
|
|
|
|
create_influx_url,
|
|
|
|
),
|
2019-07-31 12:25:30 -07:00
|
|
|
},
|
|
|
|
extra=vol.ALLOW_EXTRA,
|
|
|
|
)
|
|
|
|
|
2015-11-21 19:01:47 +01:00
|
|
|
|
2020-06-12 15:29:46 -04:00
|
|
|
def get_influx_connection(client_kwargs, bucket):
|
|
|
|
"""Create and check the correct influx connection for the API version."""
|
|
|
|
if bucket is not None:
|
|
|
|
# Test connection by synchronously writing nothing.
|
|
|
|
# If config is valid this will generate a `Bad Request` exception but not make anything.
|
|
|
|
# If config is invalid we will output an error.
|
|
|
|
# Hopefully a better way to test connection is added in the future.
|
|
|
|
try:
|
|
|
|
influx = InfluxDBClientV2(**client_kwargs)
|
|
|
|
influx.write_api(write_options=SYNCHRONOUS).write(bucket=bucket)
|
|
|
|
|
|
|
|
except ApiException as exc:
|
|
|
|
# 400 is the success state since it means we can write we just gave a bad point.
|
|
|
|
if exc.status != 400:
|
|
|
|
raise exc
|
|
|
|
|
|
|
|
else:
|
|
|
|
influx = InfluxDBClient(**client_kwargs)
|
|
|
|
influx.write_points([])
|
|
|
|
|
|
|
|
return influx
|
|
|
|
|
|
|
|
|
2015-11-21 19:01:47 +01:00
|
|
|
def setup(hass, config):
|
2017-04-30 07:04:49 +02:00
|
|
|
"""Set up the InfluxDB component."""
|
2015-11-21 19:01:47 +01:00
|
|
|
conf = config[DOMAIN]
|
2020-06-12 15:29:46 -04:00
|
|
|
use_v2_api = conf[CONF_API_VERSION] == API_VERSION_2
|
|
|
|
bucket = None
|
2016-12-05 23:39:22 -08:00
|
|
|
kwargs = {
|
2019-07-31 12:25:30 -07:00
|
|
|
"timeout": TIMEOUT,
|
2016-12-05 23:39:22 -08:00
|
|
|
}
|
|
|
|
|
2020-06-12 15:29:46 -04:00
|
|
|
if use_v2_api:
|
|
|
|
kwargs["url"] = conf[CONF_URL]
|
|
|
|
kwargs["token"] = conf[CONF_TOKEN]
|
|
|
|
kwargs["org"] = conf[CONF_ORG]
|
|
|
|
bucket = conf[CONF_BUCKET]
|
|
|
|
|
|
|
|
else:
|
|
|
|
kwargs["database"] = conf[CONF_DB_NAME]
|
|
|
|
kwargs["verify_ssl"] = conf[CONF_VERIFY_SSL]
|
2016-12-05 23:39:22 -08:00
|
|
|
|
2020-06-12 15:29:46 -04:00
|
|
|
if CONF_USERNAME in conf:
|
|
|
|
kwargs["username"] = conf[CONF_USERNAME]
|
2020-05-03 04:12:54 +02:00
|
|
|
|
2020-06-12 15:29:46 -04:00
|
|
|
if CONF_PASSWORD in conf:
|
|
|
|
kwargs["password"] = conf[CONF_PASSWORD]
|
2016-12-05 23:39:22 -08:00
|
|
|
|
2020-06-12 15:29:46 -04:00
|
|
|
if CONF_HOST in conf:
|
|
|
|
kwargs["host"] = conf[CONF_HOST]
|
2016-12-05 23:39:22 -08:00
|
|
|
|
2020-06-12 15:29:46 -04:00
|
|
|
if CONF_PATH in conf:
|
|
|
|
kwargs["path"] = conf[CONF_PATH]
|
2016-12-05 23:39:22 -08:00
|
|
|
|
2020-06-12 15:29:46 -04:00
|
|
|
if CONF_PORT in conf:
|
|
|
|
kwargs["port"] = conf[CONF_PORT]
|
|
|
|
|
|
|
|
if CONF_SSL in conf:
|
|
|
|
kwargs["ssl"] = conf[CONF_SSL]
|
2016-12-05 23:39:22 -08:00
|
|
|
|
2020-06-26 18:01:32 -04:00
|
|
|
entity_filter = convert_include_exclude_filter(conf)
|
2016-09-18 15:32:18 -07:00
|
|
|
tags = conf.get(CONF_TAGS)
|
2017-08-03 10:26:01 -04:00
|
|
|
tags_attributes = conf.get(CONF_TAGS_ATTRIBUTES)
|
2017-01-14 12:52:47 -05:00
|
|
|
default_measurement = conf.get(CONF_DEFAULT_MEASUREMENT)
|
|
|
|
override_measurement = conf.get(CONF_OVERRIDE_MEASUREMENT)
|
2017-08-03 10:26:01 -04:00
|
|
|
component_config = EntityValues(
|
|
|
|
conf[CONF_COMPONENT_CONFIG],
|
|
|
|
conf[CONF_COMPONENT_CONFIG_DOMAIN],
|
2019-07-31 12:25:30 -07:00
|
|
|
conf[CONF_COMPONENT_CONFIG_GLOB],
|
|
|
|
)
|
2017-11-24 01:58:18 +01:00
|
|
|
max_tries = conf.get(CONF_RETRY_COUNT)
|
2015-11-21 19:01:47 +01:00
|
|
|
|
|
|
|
try:
|
2020-06-12 15:29:46 -04:00
|
|
|
influx = get_influx_connection(kwargs, bucket)
|
|
|
|
if use_v2_api:
|
|
|
|
write_api = influx.write_api(write_options=ASYNCHRONOUS)
|
2020-06-29 11:31:49 -04:00
|
|
|
except (
|
|
|
|
OSError,
|
|
|
|
requests.exceptions.ConnectionError,
|
|
|
|
urllib3.exceptions.HTTPError,
|
|
|
|
) as exc:
|
|
|
|
_LOGGER.error(CONNECTION_ERROR_WITH_RETRY, exc)
|
2019-07-31 12:25:30 -07:00
|
|
|
event_helper.call_later(hass, RETRY_INTERVAL, lambda _: setup(hass, config))
|
2019-03-31 22:00:48 +02:00
|
|
|
return True
|
2020-06-29 11:31:49 -04:00
|
|
|
except exceptions.InfluxDBClientError as exc:
|
|
|
|
_LOGGER.error(CLIENT_ERROR_V1_WITH_RETRY, exc)
|
|
|
|
event_helper.call_later(hass, RETRY_INTERVAL, lambda _: setup(hass, config))
|
|
|
|
return True
|
|
|
|
except ApiException as exc:
|
|
|
|
_LOGGER.error(CLIENT_ERROR_V2_WITH_RETRY, exc)
|
2020-06-12 15:29:46 -04:00
|
|
|
event_helper.call_later(hass, RETRY_INTERVAL, lambda _: setup(hass, config))
|
|
|
|
return True
|
2015-11-21 19:01:47 +01:00
|
|
|
|
2018-03-04 06:22:31 +01:00
|
|
|
def event_to_json(event):
|
|
|
|
"""Add an event to the outgoing Influx list."""
|
2019-07-31 12:25:30 -07:00
|
|
|
state = event.data.get("new_state")
|
|
|
|
if (
|
|
|
|
state is None
|
|
|
|
or state.state in (STATE_UNKNOWN, "", STATE_UNAVAILABLE)
|
2020-06-26 18:01:32 -04:00
|
|
|
or not entity_filter(state.entity_id)
|
2019-07-31 12:25:30 -07:00
|
|
|
):
|
2018-03-04 06:22:31 +01:00
|
|
|
return
|
2016-08-04 11:35:01 -04:00
|
|
|
|
2017-01-04 15:36:54 -06:00
|
|
|
try:
|
2017-11-19 22:49:49 +00:00
|
|
|
_include_state = _include_value = False
|
|
|
|
|
|
|
|
_state_as_value = float(state.state)
|
|
|
|
_include_value = True
|
2016-02-11 17:13:57 +00:00
|
|
|
except ValueError:
|
2017-11-19 22:49:49 +00:00
|
|
|
try:
|
|
|
|
_state_as_value = float(state_helper.state_as_number(state))
|
|
|
|
_include_state = _include_value = True
|
|
|
|
except ValueError:
|
|
|
|
_include_state = True
|
2017-01-14 12:52:47 -05:00
|
|
|
|
2017-11-19 20:30:47 +00:00
|
|
|
include_uom = True
|
2017-08-03 10:26:01 -04:00
|
|
|
measurement = component_config.get(state.entity_id).get(
|
2019-07-31 12:25:30 -07:00
|
|
|
CONF_OVERRIDE_MEASUREMENT
|
|
|
|
)
|
|
|
|
if measurement in (None, ""):
|
2017-08-03 10:26:01 -04:00
|
|
|
if override_measurement:
|
|
|
|
measurement = override_measurement
|
|
|
|
else:
|
2019-07-31 12:25:30 -07:00
|
|
|
measurement = state.attributes.get("unit_of_measurement")
|
|
|
|
if measurement in (None, ""):
|
2017-08-03 10:26:01 -04:00
|
|
|
if default_measurement:
|
|
|
|
measurement = default_measurement
|
|
|
|
else:
|
|
|
|
measurement = state.entity_id
|
2017-11-19 20:30:47 +00:00
|
|
|
else:
|
|
|
|
include_uom = False
|
2015-11-25 22:47:00 +01:00
|
|
|
|
2018-03-04 06:22:31 +01:00
|
|
|
json = {
|
2019-07-31 12:25:30 -07:00
|
|
|
"measurement": measurement,
|
|
|
|
"tags": {"domain": state.domain, "entity_id": state.object_id},
|
|
|
|
"time": event.time_fired,
|
|
|
|
"fields": {},
|
2018-03-04 06:22:31 +01:00
|
|
|
}
|
2017-11-19 22:49:49 +00:00
|
|
|
if _include_state:
|
2019-07-31 12:25:30 -07:00
|
|
|
json["fields"]["state"] = state.state
|
2017-11-19 22:49:49 +00:00
|
|
|
if _include_value:
|
2019-07-31 12:25:30 -07:00
|
|
|
json["fields"]["value"] = _state_as_value
|
2015-11-21 19:01:47 +01:00
|
|
|
|
2016-09-21 06:20:05 +01:00
|
|
|
for key, value in state.attributes.items():
|
2017-08-03 10:26:01 -04:00
|
|
|
if key in tags_attributes:
|
2019-07-31 12:25:30 -07:00
|
|
|
json["tags"][key] = value
|
|
|
|
elif key != "unit_of_measurement" or include_uom:
|
2017-01-14 12:52:47 -05:00
|
|
|
# If the key is already in fields
|
2019-07-31 12:25:30 -07:00
|
|
|
if key in json["fields"]:
|
2020-04-05 01:32:58 +02:00
|
|
|
key = f"{key}_"
|
2017-01-14 12:52:47 -05:00
|
|
|
# Prevent column data errors in influxDB.
|
|
|
|
# For each value we try to cast it as float
|
|
|
|
# But if we can not do it we store the value
|
|
|
|
# as string add "_str" postfix to the field key
|
|
|
|
try:
|
2019-07-31 12:25:30 -07:00
|
|
|
json["fields"][key] = float(value)
|
2017-01-14 12:52:47 -05:00
|
|
|
except (ValueError, TypeError):
|
2019-09-03 17:27:14 +02:00
|
|
|
new_key = f"{key}_str"
|
2017-06-20 15:53:13 +10:00
|
|
|
new_value = str(value)
|
2019-07-31 12:25:30 -07:00
|
|
|
json["fields"][new_key] = new_value
|
2017-06-20 15:53:13 +10:00
|
|
|
|
|
|
|
if RE_DIGIT_TAIL.match(new_value):
|
2019-07-31 12:25:30 -07:00
|
|
|
json["fields"][key] = float(RE_DECIMAL.sub("", new_value))
|
2016-09-21 06:20:05 +01:00
|
|
|
|
2018-05-09 02:54:38 +02:00
|
|
|
# Infinity and NaN are not valid floats in InfluxDB
|
|
|
|
try:
|
2019-07-31 12:25:30 -07:00
|
|
|
if not math.isfinite(json["fields"][key]):
|
|
|
|
del json["fields"][key]
|
2018-05-09 02:54:38 +02:00
|
|
|
except (KeyError, TypeError):
|
|
|
|
pass
|
2018-03-04 21:01:16 +01:00
|
|
|
|
2019-07-31 12:25:30 -07:00
|
|
|
json["tags"].update(tags)
|
2016-07-26 08:01:57 +02:00
|
|
|
|
2018-03-04 06:22:31 +01:00
|
|
|
return json
|
2015-11-21 19:01:47 +01:00
|
|
|
|
2020-06-12 15:29:46 -04:00
|
|
|
if use_v2_api:
|
|
|
|
instance = hass.data[DOMAIN] = InfluxThread(
|
|
|
|
hass, None, bucket, write_api, event_to_json, max_tries
|
|
|
|
)
|
|
|
|
else:
|
|
|
|
instance = hass.data[DOMAIN] = InfluxThread(
|
|
|
|
hass, influx, None, None, event_to_json, max_tries
|
|
|
|
)
|
|
|
|
|
2018-02-08 12:25:26 +01:00
|
|
|
instance.start()
|
2017-11-24 01:58:18 +01:00
|
|
|
|
2018-02-08 12:25:26 +01:00
|
|
|
def shutdown(event):
|
|
|
|
"""Shut down the thread."""
|
|
|
|
instance.queue.put(None)
|
|
|
|
instance.join()
|
2018-03-04 21:01:16 +01:00
|
|
|
influx.close()
|
2017-11-24 01:58:18 +01:00
|
|
|
|
2018-02-08 12:25:26 +01:00
|
|
|
hass.bus.listen_once(EVENT_HOMEASSISTANT_STOP, shutdown)
|
2017-11-24 01:58:18 +01:00
|
|
|
|
2018-02-08 12:25:26 +01:00
|
|
|
return True
|
2017-11-24 01:58:18 +01:00
|
|
|
|
|
|
|
|
2018-02-08 12:25:26 +01:00
|
|
|
class InfluxThread(threading.Thread):
|
|
|
|
"""A threaded event handler class."""
|
2017-11-24 01:58:18 +01:00
|
|
|
|
2020-06-12 15:29:46 -04:00
|
|
|
def __init__(self, hass, influx, bucket, write_api, event_to_json, max_tries):
|
2018-02-08 12:25:26 +01:00
|
|
|
"""Initialize the listener."""
|
2019-07-31 12:25:30 -07:00
|
|
|
threading.Thread.__init__(self, name="InfluxDB")
|
2018-02-08 12:25:26 +01:00
|
|
|
self.queue = queue.Queue()
|
2018-03-04 06:22:31 +01:00
|
|
|
self.influx = influx
|
2020-06-12 15:29:46 -04:00
|
|
|
self.bucket = bucket
|
|
|
|
self.write_api = write_api
|
2018-03-04 06:22:31 +01:00
|
|
|
self.event_to_json = event_to_json
|
2018-02-08 12:25:26 +01:00
|
|
|
self.max_tries = max_tries
|
2018-03-04 06:22:31 +01:00
|
|
|
self.write_errors = 0
|
|
|
|
self.shutdown = False
|
2018-02-08 12:25:26 +01:00
|
|
|
hass.bus.listen(EVENT_STATE_CHANGED, self._event_listener)
|
2017-11-24 01:58:18 +01:00
|
|
|
|
2018-02-08 12:25:26 +01:00
|
|
|
def _event_listener(self, event):
|
|
|
|
"""Listen for new messages on the bus and queue them for Influx."""
|
|
|
|
item = (time.monotonic(), event)
|
|
|
|
self.queue.put(item)
|
2017-11-24 01:58:18 +01:00
|
|
|
|
2018-03-04 06:22:31 +01:00
|
|
|
@staticmethod
|
|
|
|
def batch_timeout():
|
|
|
|
"""Return number of seconds to wait for more events."""
|
|
|
|
return BATCH_TIMEOUT
|
|
|
|
|
|
|
|
def get_events_json(self):
|
|
|
|
"""Return a batch of events formatted for writing."""
|
2019-07-31 12:25:30 -07:00
|
|
|
queue_seconds = QUEUE_BACKLOG_SECONDS + self.max_tries * RETRY_DELAY
|
2017-11-24 01:58:18 +01:00
|
|
|
|
2018-03-04 06:22:31 +01:00
|
|
|
count = 0
|
|
|
|
json = []
|
2017-11-24 01:58:18 +01:00
|
|
|
|
2018-03-04 06:22:31 +01:00
|
|
|
dropped = 0
|
2017-11-24 01:58:18 +01:00
|
|
|
|
2018-03-04 06:22:31 +01:00
|
|
|
try:
|
|
|
|
while len(json) < BATCH_BUFFER_SIZE and not self.shutdown:
|
|
|
|
timeout = None if count == 0 else self.batch_timeout()
|
|
|
|
item = self.queue.get(timeout=timeout)
|
|
|
|
count += 1
|
|
|
|
|
|
|
|
if item is None:
|
|
|
|
self.shutdown = True
|
|
|
|
else:
|
|
|
|
timestamp, event = item
|
|
|
|
age = time.monotonic() - timestamp
|
|
|
|
|
|
|
|
if age < queue_seconds:
|
|
|
|
event_json = self.event_to_json(event)
|
|
|
|
if event_json:
|
|
|
|
json.append(event_json)
|
|
|
|
else:
|
|
|
|
dropped += 1
|
2017-11-24 01:58:18 +01:00
|
|
|
|
2018-03-04 06:22:31 +01:00
|
|
|
except queue.Empty:
|
|
|
|
pass
|
|
|
|
|
|
|
|
if dropped:
|
|
|
|
_LOGGER.warning("Catching up, dropped %d old events", dropped)
|
|
|
|
|
|
|
|
return count, json
|
|
|
|
|
|
|
|
def write_to_influxdb(self, json):
|
|
|
|
"""Write preprocessed events to influxdb, with retry."""
|
2019-07-31 12:25:30 -07:00
|
|
|
for retry in range(self.max_tries + 1):
|
2018-03-04 06:22:31 +01:00
|
|
|
try:
|
2020-06-12 15:29:46 -04:00
|
|
|
if self.write_api is not None:
|
|
|
|
self.write_api.write(bucket=self.bucket, record=json)
|
|
|
|
else:
|
|
|
|
self.influx.write_points(json)
|
2018-03-04 06:22:31 +01:00
|
|
|
|
|
|
|
if self.write_errors:
|
|
|
|
_LOGGER.error("Resumed, lost %d events", self.write_errors)
|
|
|
|
self.write_errors = 0
|
|
|
|
|
|
|
|
_LOGGER.debug("Wrote %d events", len(json))
|
|
|
|
break
|
2019-10-05 23:43:57 +04:00
|
|
|
except (
|
|
|
|
exceptions.InfluxDBClientError,
|
|
|
|
exceptions.InfluxDBServerError,
|
2020-04-04 22:09:11 +02:00
|
|
|
OSError,
|
2020-06-12 15:29:46 -04:00
|
|
|
ApiException,
|
2019-10-05 23:43:57 +04:00
|
|
|
) as err:
|
2018-03-04 06:22:31 +01:00
|
|
|
if retry < self.max_tries:
|
|
|
|
time.sleep(RETRY_DELAY)
|
|
|
|
else:
|
|
|
|
if not self.write_errors:
|
2020-06-29 11:31:49 -04:00
|
|
|
_LOGGER.error(WRITE_ERROR, json, err)
|
2018-03-04 06:22:31 +01:00
|
|
|
self.write_errors += len(json)
|
|
|
|
|
|
|
|
def run(self):
|
|
|
|
"""Process incoming events."""
|
|
|
|
while not self.shutdown:
|
|
|
|
count, json = self.get_events_json()
|
|
|
|
if json:
|
|
|
|
self.write_to_influxdb(json)
|
|
|
|
for _ in range(count):
|
|
|
|
self.queue.task_done()
|
2018-02-08 12:25:26 +01:00
|
|
|
|
|
|
|
def block_till_done(self):
|
|
|
|
"""Block till all events processed."""
|
|
|
|
self.queue.join()
|