Teach long term statistics that unit 'rpm' is same as 'RPM' (#80012)

* Teach long term statistics that unit 'rpm' is same as 'RPM'

* Add tests
This commit is contained in:
Erik Montnemery 2022-10-11 10:32:01 +02:00 committed by GitHub
parent edad6d0f26
commit 69d935b7bd
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
2 changed files with 217 additions and 4 deletions

View file

@ -23,7 +23,7 @@ from homeassistant.components.recorder.models import (
StatisticMetaData, StatisticMetaData,
StatisticResult, StatisticResult,
) )
from homeassistant.const import ATTR_UNIT_OF_MEASUREMENT from homeassistant.const import ATTR_UNIT_OF_MEASUREMENT, REVOLUTIONS_PER_MINUTE
from homeassistant.core import HomeAssistant, State, split_entity_id from homeassistant.core import HomeAssistant, State, split_entity_id
from homeassistant.exceptions import HomeAssistantError from homeassistant.exceptions import HomeAssistantError
from homeassistant.helpers.entity import entity_sources from homeassistant.helpers.entity import entity_sources
@ -47,6 +47,10 @@ DEFAULT_STATISTICS = {
STATE_CLASS_TOTAL_INCREASING: {"sum"}, STATE_CLASS_TOTAL_INCREASING: {"sum"},
} }
EQUIVALENT_UNITS = {
"RPM": REVOLUTIONS_PER_MINUTE,
}
# Keep track of entities for which a warning about decreasing value has been logged # Keep track of entities for which a warning about decreasing value has been logged
SEEN_DIP = "sensor_seen_total_increasing_dip" SEEN_DIP = "sensor_seen_total_increasing_dip"
WARN_DIP = "sensor_warn_total_increasing_dip" WARN_DIP = "sensor_warn_total_increasing_dip"
@ -113,10 +117,20 @@ def _time_weighted_average(
def _get_units(fstates: list[tuple[float, State]]) -> set[str | None]: def _get_units(fstates: list[tuple[float, State]]) -> set[str | None]:
"""Return True if all states have the same unit.""" """Return a set of all units."""
return {item[1].attributes.get(ATTR_UNIT_OF_MEASUREMENT) for item in fstates} return {item[1].attributes.get(ATTR_UNIT_OF_MEASUREMENT) for item in fstates}
def _equivalent_units(units: set[str | None]) -> bool:
"""Return True if the units are equivalent."""
if len(units) == 1:
return True
units = {
EQUIVALENT_UNITS[unit] if unit in EQUIVALENT_UNITS else unit for unit in units
}
return len(units) == 1
def _parse_float(state: str) -> float: def _parse_float(state: str) -> float:
"""Parse a float string, throw on inf or nan.""" """Parse a float string, throw on inf or nan."""
fstate = float(state) fstate = float(state)
@ -165,7 +179,7 @@ def _normalize_states(
# The unit used by this sensor doesn't support unit conversion # The unit used by this sensor doesn't support unit conversion
all_units = _get_units(fstates) all_units = _get_units(fstates)
if len(all_units) > 1: if not _equivalent_units(all_units):
if WARN_UNSTABLE_UNIT not in hass.data: if WARN_UNSTABLE_UNIT not in hass.data:
hass.data[WARN_UNSTABLE_UNIT] = set() hass.data[WARN_UNSTABLE_UNIT] = set()
if entity_id not in hass.data[WARN_UNSTABLE_UNIT]: if entity_id not in hass.data[WARN_UNSTABLE_UNIT]:
@ -442,7 +456,9 @@ def _compile_statistics( # noqa: C901
) in to_process: ) in to_process:
# Check metadata # Check metadata
if old_metadata := old_metadatas.get(entity_id): if old_metadata := old_metadatas.get(entity_id):
if old_metadata[1]["unit_of_measurement"] != statistics_unit: if not _equivalent_units(
{old_metadata[1]["unit_of_measurement"], statistics_unit}
):
if WARN_UNSTABLE_UNIT not in hass.data: if WARN_UNSTABLE_UNIT not in hass.data:
hass.data[WARN_UNSTABLE_UNIT] = set() hass.data[WARN_UNSTABLE_UNIT] = set()
if entity_id not in hass.data[WARN_UNSTABLE_UNIT]: if entity_id not in hass.data[WARN_UNSTABLE_UNIT]:

View file

@ -2192,6 +2192,203 @@ def test_compile_hourly_statistics_changing_units_3(
assert "Error while processing event StatisticsTask" not in caplog.text assert "Error while processing event StatisticsTask" not in caplog.text
@pytest.mark.parametrize(
"device_class, state_unit, state_unit2, unit_class, mean, mean2, min, max",
[
(None, "RPM", "rpm", None, 13.050847, 13.333333, -10, 30),
(None, "rpm", "RPM", None, 13.050847, 13.333333, -10, 30),
],
)
def test_compile_hourly_statistics_equivalent_units_1(
hass_recorder,
caplog,
device_class,
state_unit,
state_unit2,
unit_class,
mean,
mean2,
min,
max,
):
"""Test compiling hourly statistics where units change from one hour to the next."""
zero = dt_util.utcnow()
hass = hass_recorder()
setup_component(hass, "sensor", {})
wait_recording_done(hass) # Wait for the sensor recorder platform to be added
attributes = {
"device_class": device_class,
"state_class": "measurement",
"unit_of_measurement": state_unit,
}
four, states = record_states(hass, zero, "sensor.test1", attributes)
attributes["unit_of_measurement"] = state_unit2
four, _states = record_states(
hass, zero + timedelta(minutes=5), "sensor.test1", attributes
)
states["sensor.test1"] += _states["sensor.test1"]
four, _states = record_states(
hass, zero + timedelta(minutes=10), "sensor.test1", attributes
)
states["sensor.test1"] += _states["sensor.test1"]
hist = history.get_significant_states(hass, zero, four)
assert dict(states) == dict(hist)
do_adhoc_statistics(hass, start=zero)
wait_recording_done(hass)
assert "can not be converted to the unit of previously" not in caplog.text
statistic_ids = list_statistic_ids(hass)
assert statistic_ids == [
{
"statistic_id": "sensor.test1",
"has_mean": True,
"has_sum": False,
"name": None,
"source": "recorder",
"statistics_unit_of_measurement": state_unit,
"unit_class": unit_class,
},
]
stats = statistics_during_period(hass, zero, period="5minute")
assert stats == {
"sensor.test1": [
{
"statistic_id": "sensor.test1",
"start": process_timestamp_to_utc_isoformat(zero),
"end": process_timestamp_to_utc_isoformat(zero + timedelta(minutes=5)),
"mean": approx(mean),
"min": approx(min),
"max": approx(max),
"last_reset": None,
"state": None,
"sum": None,
}
]
}
do_adhoc_statistics(hass, start=zero + timedelta(minutes=10))
wait_recording_done(hass)
statistic_ids = list_statistic_ids(hass)
assert statistic_ids == [
{
"statistic_id": "sensor.test1",
"has_mean": True,
"has_sum": False,
"name": None,
"source": "recorder",
"statistics_unit_of_measurement": state_unit2,
"unit_class": unit_class,
},
]
stats = statistics_during_period(hass, zero, period="5minute")
assert stats == {
"sensor.test1": [
{
"statistic_id": "sensor.test1",
"start": process_timestamp_to_utc_isoformat(zero),
"end": process_timestamp_to_utc_isoformat(zero + timedelta(minutes=5)),
"mean": approx(mean),
"min": approx(min),
"max": approx(max),
"last_reset": None,
"state": None,
"sum": None,
},
{
"statistic_id": "sensor.test1",
"start": process_timestamp_to_utc_isoformat(
zero + timedelta(minutes=10)
),
"end": process_timestamp_to_utc_isoformat(zero + timedelta(minutes=15)),
"mean": approx(mean2),
"min": approx(min),
"max": approx(max),
"last_reset": None,
"state": None,
"sum": None,
},
]
}
assert "Error while processing event StatisticsTask" not in caplog.text
@pytest.mark.parametrize(
"device_class, state_unit, state_unit2, unit_class, mean, min, max",
[
(None, "RPM", "rpm", None, 13.333333, -10, 30),
(None, "rpm", "RPM", None, 13.333333, -10, 30),
],
)
def test_compile_hourly_statistics_equivalent_units_2(
hass_recorder,
caplog,
device_class,
state_unit,
state_unit2,
unit_class,
mean,
min,
max,
):
"""Test compiling hourly statistics where units change during an hour."""
zero = dt_util.utcnow()
hass = hass_recorder()
setup_component(hass, "sensor", {})
wait_recording_done(hass) # Wait for the sensor recorder platform to be added
attributes = {
"device_class": device_class,
"state_class": "measurement",
"unit_of_measurement": state_unit,
}
four, states = record_states(hass, zero, "sensor.test1", attributes)
attributes["unit_of_measurement"] = state_unit2
four, _states = record_states(
hass, zero + timedelta(minutes=5), "sensor.test1", attributes
)
states["sensor.test1"] += _states["sensor.test1"]
hist = history.get_significant_states(hass, zero, four)
assert dict(states) == dict(hist)
do_adhoc_statistics(hass, start=zero + timedelta(seconds=30 * 5))
wait_recording_done(hass)
assert "The unit of sensor.test1 is changing" not in caplog.text
assert "and matches the unit of already compiled statistics" not in caplog.text
statistic_ids = list_statistic_ids(hass)
assert statistic_ids == [
{
"statistic_id": "sensor.test1",
"has_mean": True,
"has_sum": False,
"name": None,
"source": "recorder",
"statistics_unit_of_measurement": state_unit,
"unit_class": unit_class,
},
]
stats = statistics_during_period(hass, zero, period="5minute")
assert stats == {
"sensor.test1": [
{
"statistic_id": "sensor.test1",
"start": process_timestamp_to_utc_isoformat(
zero + timedelta(seconds=30 * 5)
),
"end": process_timestamp_to_utc_isoformat(
zero + timedelta(seconds=30 * 15)
),
"mean": approx(mean),
"min": approx(min),
"max": approx(max),
"last_reset": None,
"state": None,
"sum": None,
},
]
}
assert "Error while processing event StatisticsTask" not in caplog.text
@pytest.mark.parametrize( @pytest.mark.parametrize(
"device_class, state_unit, statistic_unit, unit_class, mean1, mean2, min, max", "device_class, state_unit, statistic_unit, unit_class, mean1, mean2, min, max",
[ [