Prevent overly large attributes from being stored in the database (#87105)
This commit is contained in:
parent
5e81d28116
commit
389fc515a1
2 changed files with 42 additions and 5 deletions
|
@ -72,6 +72,9 @@ TABLE_STATISTICS_META = "statistics_meta"
|
|||
TABLE_STATISTICS_RUNS = "statistics_runs"
|
||||
TABLE_STATISTICS_SHORT_TERM = "statistics_short_term"
|
||||
|
||||
MAX_STATE_ATTRS_BYTES = 16384
|
||||
PSQL_DIALECT = SupportedDialect.POSTGRESQL
|
||||
|
||||
ALL_TABLES = [
|
||||
TABLE_STATES,
|
||||
TABLE_STATE_ATTRIBUTES,
|
||||
|
@ -415,13 +418,20 @@ class StateAttributes(Base): # type: ignore[misc,valid-type]
|
|||
exclude_attrs = (
|
||||
exclude_attrs_by_domain.get(domain, set()) | ALL_DOMAIN_EXCLUDE_ATTRS
|
||||
)
|
||||
if dialect == SupportedDialect.POSTGRESQL:
|
||||
return json_bytes_strip_null(
|
||||
{k: v for k, v in state.attributes.items() if k not in exclude_attrs}
|
||||
)
|
||||
return json_bytes(
|
||||
encoder = json_bytes_strip_null if dialect == PSQL_DIALECT else json_bytes
|
||||
bytes_result = encoder(
|
||||
{k: v for k, v in state.attributes.items() if k not in exclude_attrs}
|
||||
)
|
||||
if len(bytes_result) > MAX_STATE_ATTRS_BYTES:
|
||||
_LOGGER.warning(
|
||||
"State attributes for %s exceed maximum size of %s bytes. "
|
||||
"This can cause database performance issues; Attributes "
|
||||
"will not be stored",
|
||||
state.entity_id,
|
||||
MAX_STATE_ATTRS_BYTES,
|
||||
)
|
||||
return b"{}"
|
||||
return bytes_result
|
||||
|
||||
@staticmethod
|
||||
def hash_shared_attrs_bytes(shared_attrs_bytes: bytes) -> int:
|
||||
|
|
|
@ -702,6 +702,33 @@ def test_saving_state_and_removing_entity(hass, hass_recorder):
|
|||
assert states[2].state is None
|
||||
|
||||
|
||||
def test_saving_state_with_oversized_attributes(hass_recorder, caplog):
|
||||
"""Test saving states is limited to 16KiB of JSON encoded attributes."""
|
||||
hass = hass_recorder()
|
||||
massive_dict = {"a": "b" * 16384}
|
||||
attributes = {"test_attr": 5, "test_attr_10": "nice"}
|
||||
hass.states.set("switch.sane", "on", attributes)
|
||||
hass.states.set("switch.too_big", "on", massive_dict)
|
||||
wait_recording_done(hass)
|
||||
states = []
|
||||
|
||||
with session_scope(hass=hass) as session:
|
||||
for state, state_attributes in session.query(States, StateAttributes).outerjoin(
|
||||
StateAttributes, States.attributes_id == StateAttributes.attributes_id
|
||||
):
|
||||
native_state = state.to_native()
|
||||
native_state.attributes = state_attributes.to_native()
|
||||
states.append(native_state)
|
||||
|
||||
assert "switch.too_big" in caplog.text
|
||||
|
||||
assert len(states) == 2
|
||||
assert _state_with_context(hass, "switch.sane").as_dict() == states[0].as_dict()
|
||||
assert states[1].state == "on"
|
||||
assert states[1].entity_id == "switch.too_big"
|
||||
assert states[1].attributes == {}
|
||||
|
||||
|
||||
def test_recorder_setup_failure(hass):
|
||||
"""Test some exceptions."""
|
||||
recorder_helper.async_initialize_recorder(hass)
|
||||
|
|
Loading…
Add table
Reference in a new issue