hass-core/homeassistant/components/recorder/util.py
J. Nick Koston 0a6deeb49b
Improve history api performance (#35822)
* Improve history api performance

A new option "minimal_response" reduces the amount of data
sent between the first and last history states to only the
"last_changed" and "state" fields.

Calling to_native is now avoided where possible and only
done at the end for rows that will be returned in the response.

When sending the `minimal_response` option, the history
api now returns a json response similar to the following
for an entity

Testing:

History API Response time for 1 day
Average of 10 runs with minimal_response

Before: 19.89s. (content length : 3427428)
After: 8.44s (content length: 592199)

```
[{
	"attributes": {--TRUNCATED--},
	"context": {--TRUNCATED--},
	"entity_id": "binary_sensor.powerwall_status",
	"last_changed": "2020-05-18T23:20:03.213000+00:00",
	"last_updated": "2020-05-18T23:20:03.213000+00:00",
	"state": "on"
},
...
{
	"last_changed": "2020-05-19T00:41:08Z",
	"state": "unavailable"
},
...
{
	"attributes": {--TRUNCATED--},
	"context": {--TRUNCATED--},
	"entity_id": "binary_sensor.powerwall_status",
	"last_changed": "2020-05-19T00:42:08.069698+00:00",
	"last_updated": "2020-05-19T00:42:08.069698+00:00",
	"state": "on"
}]
```

* Remove impossible state check

* Remove another impossible state check

* Update homeassistant/components/history/__init__.py

Co-authored-by: Paulus Schoutsen <paulus@home-assistant.io>

* Reorder to save some indent per review

* Make query response make sense with to_native=False

* Update test for 00:00 to Z change

* Update homeassistant/components/recorder/models.py

Co-authored-by: Paulus Schoutsen <paulus@home-assistant.io>

Co-authored-by: Paulus Schoutsen <paulus@home-assistant.io>
2020-05-26 21:53:56 -05:00

91 lines
2.6 KiB
Python

"""SQLAlchemy util functions."""
from contextlib import contextmanager
import logging
import time
from sqlalchemy.exc import OperationalError, SQLAlchemyError
from .const import DATA_INSTANCE
_LOGGER = logging.getLogger(__name__)
RETRIES = 3
QUERY_RETRY_WAIT = 0.1
@contextmanager
def session_scope(*, hass=None, session=None):
"""Provide a transactional scope around a series of operations."""
if session is None and hass is not None:
session = hass.data[DATA_INSTANCE].get_session()
if session is None:
raise RuntimeError("Session required")
need_rollback = False
try:
yield session
if session.transaction:
need_rollback = True
session.commit()
except Exception as err:
_LOGGER.error("Error executing query: %s", err)
if need_rollback:
session.rollback()
raise
finally:
session.close()
def commit(session, work):
"""Commit & retry work: Either a model or in a function."""
for _ in range(0, RETRIES):
try:
if callable(work):
work(session)
else:
session.add(work)
session.commit()
return True
except OperationalError as err:
_LOGGER.error("Error executing query: %s", err)
session.rollback()
time.sleep(QUERY_RETRY_WAIT)
return False
def execute(qry, to_native=True):
"""Query the database and convert the objects to HA native form.
This method also retries a few times in the case of stale connections.
"""
for tryno in range(0, RETRIES):
try:
timer_start = time.perf_counter()
if to_native:
result = [
row for row in (row.to_native() for row in qry) if row is not None
]
else:
result = list(qry)
if _LOGGER.isEnabledFor(logging.DEBUG):
elapsed = time.perf_counter() - timer_start
if to_native:
_LOGGER.debug(
"converting %d rows to native objects took %fs",
len(result),
elapsed,
)
else:
_LOGGER.debug(
"querying %d rows took %fs", len(result), elapsed,
)
return result
except SQLAlchemyError as err:
_LOGGER.error("Error executing query: %s", err)
if tryno == RETRIES - 1:
raise
time.sleep(QUERY_RETRY_WAIT)