Add CI job which runs recorder tests on PostgreSQL (#80614)

Co-authored-by: Franck Nijhof <git@frenck.dev>
Co-authored-by: J. Nick Koston <nick@koston.org>
This commit is contained in:
Erik Montnemery 2023-02-07 16:46:14 +01:00 committed by GitHub
parent 3a83b2f66f
commit 720f51657d
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
10 changed files with 187 additions and 41 deletions

View file

@ -1006,12 +1006,116 @@ jobs:
run: | run: |
./script/check_dirty ./script/check_dirty
pytest-postgres:
runs-on: ubuntu-20.04
services:
postgres:
image: postgres:15.0
ports:
- 5432:5432
env:
POSTGRES_PASSWORD: password
options: --health-cmd="pg_isready -hlocalhost -Upostgres" --health-interval=5s --health-timeout=2s --health-retries=3
if: |
(github.event_name != 'push' || github.event.repository.full_name == 'home-assistant/core')
&& github.event.inputs.lint-only != 'true'
&& needs.info.outputs.test_full_suite == 'true'
needs:
- info
- base
- gen-requirements-all
- hassfest
- lint-black
- lint-other
- lint-isort
- mypy
strategy:
fail-fast: false
matrix:
python-version: ${{ fromJson(needs.info.outputs.python_versions) }}
name: >-
Run tests Python ${{ matrix.python-version }} (postgresql)
steps:
- name: Install additional OS dependencies
run: |
sudo apt-get update
sudo apt-get -y install \
bluez \
ffmpeg \
postgresql-server-dev-12
- name: Check out code from GitHub
uses: actions/checkout@v3.1.0
- name: Set up Python ${{ matrix.python-version }}
id: python
uses: actions/setup-python@v4.3.0
with:
python-version: ${{ matrix.python-version }}
check-latest: true
- name: Restore full Python ${{ matrix.python-version }} virtual environment
id: cache-venv
uses: actions/cache@v3.0.11
with:
path: venv
key: ${{ runner.os }}-${{ steps.python.outputs.python-version }}-${{
needs.info.outputs.python_cache_key }}
- name: Fail job if Python cache restore failed
if: steps.cache-venv.outputs.cache-hit != 'true'
run: |
echo "Failed to restore Python virtual environment from cache"
exit 1
- name: Register Python problem matcher
run: |
echo "::add-matcher::.github/workflows/matchers/python.json"
- name: Install Pytest Annotation plugin
run: |
. venv/bin/activate
# Ideally this should be part of our dependencies
# However this plugin is fairly new and doesn't run correctly
# on a non-GitHub environment.
pip install pytest-github-actions-annotate-failures==0.1.3
- name: Register pytest slow test problem matcher
run: |
echo "::add-matcher::.github/workflows/matchers/pytest-slow.json"
- name: Install SQL Python libraries
run: |
. venv/bin/activate
pip install psycopg2 sqlalchemy_utils
- name: Run pytest (partially)
timeout-minutes: 10
shell: bash
run: |
. venv/bin/activate
python --version
python3 -X dev -m pytest \
-qq \
--timeout=9 \
-n 1 \
--cov="homeassistant.components.recorder" \
--cov-report=xml \
--cov-report=term-missing \
-o console_output_style=count \
--durations=0 \
--durations-min=10 \
-p no:sugar \
--dburl=postgresql://postgres:password@127.0.0.1/homeassistant-test \
tests/components/recorder
- name: Upload coverage artifact
uses: actions/upload-artifact@v3.1.0
with:
name: coverage-${{ matrix.python-version }}-postgresql
path: coverage.xml
- name: Check dirty
run: |
./script/check_dirty
coverage: coverage:
name: Upload test coverage to Codecov name: Upload test coverage to Codecov
runs-on: ubuntu-20.04 runs-on: ubuntu-20.04
needs: needs:
- info - info
- pytest - pytest
- pytest-postgres
steps: steps:
- name: Check out code from GitHub - name: Check out code from GitHub
uses: actions/checkout@v3.3.0 uses: actions/checkout@v3.3.0

View file

@ -503,10 +503,15 @@ def test_get_significant_states_without_initial(hass_recorder):
hass = hass_recorder() hass = hass_recorder()
zero, four, states = record_states(hass) zero, four, states = record_states(hass)
one = zero + timedelta(seconds=1) one = zero + timedelta(seconds=1)
one_with_microsecond = zero + timedelta(seconds=1, microseconds=1)
one_and_half = zero + timedelta(seconds=1.5) one_and_half = zero + timedelta(seconds=1.5)
for entity_id in states: for entity_id in states:
states[entity_id] = list( states[entity_id] = list(
filter(lambda s: s.last_changed != one, states[entity_id]) filter(
lambda s: s.last_changed != one
and s.last_changed != one_with_microsecond,
states[entity_id],
)
) )
del states["media_player.test2"] del states["media_player.test2"]
@ -687,9 +692,6 @@ def record_states(hass) -> tuple[datetime, datetime, dict[str, list[State]]]:
states[mp].append( states[mp].append(
set_state(mp, "idle", attributes={"media_title": str(sentinel.mt1)}) set_state(mp, "idle", attributes={"media_title": str(sentinel.mt1)})
) )
states[mp].append(
set_state(mp, "YouTube", attributes={"media_title": str(sentinel.mt2)})
)
states[mp2].append( states[mp2].append(
set_state(mp2, "YouTube", attributes={"media_title": str(sentinel.mt2)}) set_state(mp2, "YouTube", attributes={"media_title": str(sentinel.mt2)})
) )
@ -700,6 +702,14 @@ def record_states(hass) -> tuple[datetime, datetime, dict[str, list[State]]]:
set_state(therm, 20, attributes={"current_temperature": 19.5}) set_state(therm, 20, attributes={"current_temperature": 19.5})
) )
with patch(
"homeassistant.components.recorder.core.dt_util.utcnow",
return_value=one + timedelta(microseconds=1),
):
states[mp].append(
set_state(mp, "YouTube", attributes={"media_title": str(sentinel.mt2)})
)
with patch( with patch(
"homeassistant.components.recorder.core.dt_util.utcnow", return_value=two "homeassistant.components.recorder.core.dt_util.utcnow", return_value=two
): ):
@ -740,8 +750,8 @@ async def test_state_changes_during_period_query_during_migration_to_schema_25(
recorder_db_url: str, recorder_db_url: str,
): ):
"""Test we can query data prior to schema 25 and during migration to schema 25.""" """Test we can query data prior to schema 25 and during migration to schema 25."""
if recorder_db_url.startswith("mysql://"): if recorder_db_url.startswith(("mysql://", "postgresql://")):
# This test doesn't run on MySQL / MariaDB; we can't drop table state_attributes # This test doesn't run on MySQL / MariaDB / Postgresql; we can't drop table state_attributes
return return
instance = await async_setup_recorder_instance(hass, {}) instance = await async_setup_recorder_instance(hass, {})
@ -795,8 +805,8 @@ async def test_get_states_query_during_migration_to_schema_25(
recorder_db_url: str, recorder_db_url: str,
): ):
"""Test we can query data prior to schema 25 and during migration to schema 25.""" """Test we can query data prior to schema 25 and during migration to schema 25."""
if recorder_db_url.startswith("mysql://"): if recorder_db_url.startswith(("mysql://", "postgresql://")):
# This test doesn't run on MySQL / MariaDB; we can't drop table state_attributes # This test doesn't run on MySQL / MariaDB / Postgresql; we can't drop table state_attributes
return return
instance = await async_setup_recorder_instance(hass, {}) instance = await async_setup_recorder_instance(hass, {})
@ -846,8 +856,8 @@ async def test_get_states_query_during_migration_to_schema_25_multiple_entities(
recorder_db_url: str, recorder_db_url: str,
): ):
"""Test we can query data prior to schema 25 and during migration to schema 25.""" """Test we can query data prior to schema 25 and during migration to schema 25."""
if recorder_db_url.startswith("mysql://"): if recorder_db_url.startswith(("mysql://", "postgresql://")):
# This test doesn't run on MySQL / MariaDB; we can't drop table state_attributes # This test doesn't run on MySQL / MariaDB / Postgresql; we can't drop table state_attributes
return return
instance = await async_setup_recorder_instance(hass, {}) instance = await async_setup_recorder_instance(hass, {})

View file

@ -400,12 +400,13 @@ def test_get_significant_states_with_initial(hass_recorder):
hass = hass_recorder() hass = hass_recorder()
zero, four, states = record_states(hass) zero, four, states = record_states(hass)
one = zero + timedelta(seconds=1) one = zero + timedelta(seconds=1)
one_with_microsecond = zero + timedelta(seconds=1, microseconds=1)
one_and_half = zero + timedelta(seconds=1.5) one_and_half = zero + timedelta(seconds=1.5)
for entity_id in states: for entity_id in states:
if entity_id == "media_player.test": if entity_id == "media_player.test":
states[entity_id] = states[entity_id][1:] states[entity_id] = states[entity_id][1:]
for state in states[entity_id]: for state in states[entity_id]:
if state.last_changed == one: if state.last_changed == one or state.last_changed == one_with_microsecond:
state.last_changed = one_and_half state.last_changed = one_and_half
state.last_updated = one_and_half state.last_updated = one_and_half
@ -428,10 +429,15 @@ def test_get_significant_states_without_initial(hass_recorder):
hass = hass_recorder() hass = hass_recorder()
zero, four, states = record_states(hass) zero, four, states = record_states(hass)
one = zero + timedelta(seconds=1) one = zero + timedelta(seconds=1)
one_with_microsecond = zero + timedelta(seconds=1, microseconds=1)
one_and_half = zero + timedelta(seconds=1.5) one_and_half = zero + timedelta(seconds=1.5)
for entity_id in states: for entity_id in states:
states[entity_id] = list( states[entity_id] = list(
filter(lambda s: s.last_changed != one, states[entity_id]) filter(
lambda s: s.last_changed != one
and s.last_changed != one_with_microsecond,
states[entity_id],
)
) )
del states["media_player.test2"] del states["media_player.test2"]
@ -594,9 +600,6 @@ def record_states(hass) -> tuple[datetime, datetime, dict[str, list[State]]]:
states[mp].append( states[mp].append(
set_state(mp, "idle", attributes={"media_title": str(sentinel.mt1)}) set_state(mp, "idle", attributes={"media_title": str(sentinel.mt1)})
) )
states[mp].append(
set_state(mp, "YouTube", attributes={"media_title": str(sentinel.mt2)})
)
states[mp2].append( states[mp2].append(
set_state(mp2, "YouTube", attributes={"media_title": str(sentinel.mt2)}) set_state(mp2, "YouTube", attributes={"media_title": str(sentinel.mt2)})
) )
@ -607,6 +610,14 @@ def record_states(hass) -> tuple[datetime, datetime, dict[str, list[State]]]:
set_state(therm, 20, attributes={"current_temperature": 19.5}) set_state(therm, 20, attributes={"current_temperature": 19.5})
) )
with patch(
"homeassistant.components.recorder.core.dt_util.utcnow",
return_value=one + timedelta(microseconds=1),
):
states[mp].append(
set_state(mp, "YouTube", attributes={"media_title": str(sentinel.mt2)})
)
with patch( with patch(
"homeassistant.components.recorder.core.dt_util.utcnow", return_value=two "homeassistant.components.recorder.core.dt_util.utcnow", return_value=two
): ):

View file

@ -567,10 +567,15 @@ def test_saving_state_include_domains_globs(hass_recorder):
hass, ["test.recorder", "test2.recorder", "test3.included_entity"] hass, ["test.recorder", "test2.recorder", "test3.included_entity"]
) )
assert len(states) == 2 assert len(states) == 2
assert _state_with_context(hass, "test2.recorder").as_dict() == states[0].as_dict() state_map = {state.entity_id: state for state in states}
assert (
_state_with_context(hass, "test2.recorder").as_dict()
== state_map["test2.recorder"].as_dict()
)
assert ( assert (
_state_with_context(hass, "test3.included_entity").as_dict() _state_with_context(hass, "test3.included_entity").as_dict()
== states[1].as_dict() == state_map["test3.included_entity"].as_dict()
) )
@ -1595,7 +1600,7 @@ async def test_database_lock_and_overflow(
async def test_database_lock_timeout(recorder_mock, hass, recorder_db_url): async def test_database_lock_timeout(recorder_mock, hass, recorder_db_url):
"""Test locking database timeout when recorder stopped.""" """Test locking database timeout when recorder stopped."""
if recorder_db_url.startswith("mysql://"): if recorder_db_url.startswith(("mysql://", "postgresql://")):
# This test is specific for SQLite: Locking is not implemented for other engines # This test is specific for SQLite: Locking is not implemented for other engines
return return
@ -1667,7 +1672,7 @@ async def test_database_connection_keep_alive_disabled_on_sqlite(
recorder_db_url: str, recorder_db_url: str,
): ):
"""Test we do not do keep alive for sqlite.""" """Test we do not do keep alive for sqlite."""
if recorder_db_url.startswith("mysql://"): if recorder_db_url.startswith(("mysql://", "postgresql://")):
# This test is specific for SQLite, keepalive runs on other engines # This test is specific for SQLite, keepalive runs on other engines
return return

View file

@ -90,9 +90,15 @@ async def test_purge_old_states(
assert "test.recorder2" in instance._old_states assert "test.recorder2" in instance._old_states
states_after_purge = session.query(States) states_after_purge = list(session.query(States))
assert states_after_purge[1].old_state_id == states_after_purge[0].state_id # Since these states are deleted in batches, we can't guarantee the order
assert states_after_purge[0].old_state_id is None # but we can look them up by state
state_map_by_state = {state.state: state for state in states_after_purge}
dontpurgeme_5 = state_map_by_state["dontpurgeme_5"]
dontpurgeme_4 = state_map_by_state["dontpurgeme_4"]
assert dontpurgeme_5.old_state_id == dontpurgeme_4.state_id
assert dontpurgeme_4.old_state_id is None
finished = purge_old_data(instance, purge_before, repack=False) finished = purge_old_data(instance, purge_before, repack=False)
assert finished assert finished
@ -140,7 +146,7 @@ async def test_purge_old_states_encouters_database_corruption(
recorder_db_url: str, recorder_db_url: str,
): ):
"""Test database image image is malformed while deleting old states.""" """Test database image image is malformed while deleting old states."""
if recorder_db_url.startswith("mysql://"): if recorder_db_url.startswith(("mysql://", "postgresql://")):
# This test is specific for SQLite, wiping the database on error only happens # This test is specific for SQLite, wiping the database on error only happens
# with SQLite. # with SQLite.
return return

View file

@ -1733,14 +1733,19 @@ def record_states(hass):
states[mp].append( states[mp].append(
set_state(mp, "idle", attributes={"media_title": str(sentinel.mt1)}) set_state(mp, "idle", attributes={"media_title": str(sentinel.mt1)})
) )
states[mp].append(
set_state(mp, "YouTube", attributes={"media_title": str(sentinel.mt2)})
)
states[sns1].append(set_state(sns1, "10", attributes=sns1_attr)) states[sns1].append(set_state(sns1, "10", attributes=sns1_attr))
states[sns2].append(set_state(sns2, "10", attributes=sns2_attr)) states[sns2].append(set_state(sns2, "10", attributes=sns2_attr))
states[sns3].append(set_state(sns3, "10", attributes=sns3_attr)) states[sns3].append(set_state(sns3, "10", attributes=sns3_attr))
states[sns4].append(set_state(sns4, "10", attributes=sns4_attr)) states[sns4].append(set_state(sns4, "10", attributes=sns4_attr))
with patch(
"homeassistant.components.recorder.core.dt_util.utcnow",
return_value=one + timedelta(microseconds=1),
):
states[mp].append(
set_state(mp, "YouTube", attributes={"media_title": str(sentinel.mt2)})
)
with patch( with patch(
"homeassistant.components.recorder.core.dt_util.utcnow", return_value=two "homeassistant.components.recorder.core.dt_util.utcnow", return_value=two
): ):

View file

@ -16,7 +16,7 @@ from tests.common import SetupRecorderInstanceT, get_system_health_info
async def test_recorder_system_health(recorder_mock, hass, recorder_db_url): async def test_recorder_system_health(recorder_mock, hass, recorder_db_url):
"""Test recorder system health.""" """Test recorder system health."""
if recorder_db_url.startswith("mysql://"): if recorder_db_url.startswith(("mysql://", "postgresql://")):
# This test is specific for SQLite # This test is specific for SQLite
return return
@ -94,7 +94,7 @@ async def test_recorder_system_health_crashed_recorder_runs_table(
recorder_db_url: str, recorder_db_url: str,
): ):
"""Test recorder system health with crashed recorder runs table.""" """Test recorder system health with crashed recorder runs table."""
if recorder_db_url.startswith("mysql://"): if recorder_db_url.startswith(("mysql://", "postgresql://")):
# This test is specific for SQLite # This test is specific for SQLite
return return

View file

@ -16,7 +16,10 @@ from homeassistant.components import recorder
from homeassistant.components.recorder import history, util from homeassistant.components.recorder import history, util
from homeassistant.components.recorder.const import DOMAIN, SQLITE_URL_PREFIX from homeassistant.components.recorder.const import DOMAIN, SQLITE_URL_PREFIX
from homeassistant.components.recorder.db_schema import RecorderRuns from homeassistant.components.recorder.db_schema import RecorderRuns
from homeassistant.components.recorder.models import UnsupportedDialect from homeassistant.components.recorder.models import (
UnsupportedDialect,
process_timestamp,
)
from homeassistant.components.recorder.util import ( from homeassistant.components.recorder.util import (
end_incomplete_runs, end_incomplete_runs,
is_second_sunday, is_second_sunday,
@ -44,8 +47,8 @@ def test_session_scope_not_setup(hass_recorder):
def test_recorder_bad_commit(hass_recorder, recorder_db_url): def test_recorder_bad_commit(hass_recorder, recorder_db_url):
"""Bad _commit should retry 3 times.""" """Bad _commit should retry 3 times."""
if recorder_db_url.startswith("mysql://"): if recorder_db_url.startswith(("mysql://", "postgresql://")):
# This test is specific for SQLite: mysql does not raise an OperationalError # This test is specific for SQLite: mysql/postgresql does not raise an OperationalError
# which triggers retries for the bad query below, it raises ProgrammingError # which triggers retries for the bad query below, it raises ProgrammingError
# on which we give up # on which we give up
return return
@ -696,7 +699,7 @@ async def test_no_issue_for_mariadb_with_MDEV_25020(hass, caplog, mysql_version)
def test_basic_sanity_check(hass_recorder, recorder_db_url): def test_basic_sanity_check(hass_recorder, recorder_db_url):
"""Test the basic sanity checks with a missing table.""" """Test the basic sanity checks with a missing table."""
if recorder_db_url.startswith("mysql://"): if recorder_db_url.startswith(("mysql://", "postgresql://")):
# This test is specific for SQLite # This test is specific for SQLite
return return
@ -714,7 +717,7 @@ def test_basic_sanity_check(hass_recorder, recorder_db_url):
def test_combined_checks(hass_recorder, caplog, recorder_db_url): def test_combined_checks(hass_recorder, caplog, recorder_db_url):
"""Run Checks on the open database.""" """Run Checks on the open database."""
if recorder_db_url.startswith("mysql://"): if recorder_db_url.startswith(("mysql://", "postgresql://")):
# This test is specific for SQLite # This test is specific for SQLite
return return
@ -780,24 +783,23 @@ def test_end_incomplete_runs(hass_recorder, caplog):
assert run_info.closed_incorrect is False assert run_info.closed_incorrect is False
now = dt_util.utcnow() now = dt_util.utcnow()
now_without_tz = now.replace(tzinfo=None)
end_incomplete_runs(session, now) end_incomplete_runs(session, now)
run_info = run_information_with_session(session) run_info = run_information_with_session(session)
assert run_info.closed_incorrect is True assert run_info.closed_incorrect is True
assert run_info.end == now_without_tz assert process_timestamp(run_info.end) == now
session.flush() session.flush()
later = dt_util.utcnow() later = dt_util.utcnow()
end_incomplete_runs(session, later) end_incomplete_runs(session, later)
run_info = run_information_with_session(session) run_info = run_information_with_session(session)
assert run_info.end == now_without_tz assert process_timestamp(run_info.end) == now
assert "Ended unfinished session" in caplog.text assert "Ended unfinished session" in caplog.text
def test_periodic_db_cleanups(hass_recorder, recorder_db_url): def test_periodic_db_cleanups(hass_recorder, recorder_db_url):
"""Test periodic db cleanups.""" """Test periodic db cleanups."""
if recorder_db_url.startswith("mysql://"): if recorder_db_url.startswith(("mysql://", "postgresql://")):
# This test is specific for SQLite # This test is specific for SQLite
return return

View file

@ -2163,7 +2163,7 @@ async def test_backup_start_timeout(
recorder_mock, hass, hass_ws_client, hass_supervisor_access_token, recorder_db_url recorder_mock, hass, hass_ws_client, hass_supervisor_access_token, recorder_db_url
): ):
"""Test getting backup start when recorder is not present.""" """Test getting backup start when recorder is not present."""
if recorder_db_url.startswith("mysql://"): if recorder_db_url.startswith(("mysql://", "postgresql://")):
# This test is specific for SQLite: Locking is not implemented for other engines # This test is specific for SQLite: Locking is not implemented for other engines
return return
@ -2204,7 +2204,7 @@ async def test_backup_end_without_start(
recorder_mock, hass, hass_ws_client, hass_supervisor_access_token, recorder_db_url recorder_mock, hass, hass_ws_client, hass_supervisor_access_token, recorder_db_url
): ):
"""Test backup start.""" """Test backup start."""
if recorder_db_url.startswith("mysql://"): if recorder_db_url.startswith(("mysql://", "postgresql://")):
# This test is specific for SQLite: Locking is not implemented for other engines # This test is specific for SQLite: Locking is not implemented for other engines
return return

View file

@ -1002,9 +1002,12 @@ def recorder_db_url(pytestconfig):
assert not sqlalchemy_utils.database_exists(db_url) assert not sqlalchemy_utils.database_exists(db_url)
sqlalchemy_utils.create_database(db_url, encoding=charset) sqlalchemy_utils.create_database(db_url, encoding=charset)
elif db_url.startswith("postgresql://"): elif db_url.startswith("postgresql://"):
pass import sqlalchemy_utils
assert not sqlalchemy_utils.database_exists(db_url)
sqlalchemy_utils.create_database(db_url, encoding="utf8")
yield db_url yield db_url
if db_url.startswith("mysql://"): if db_url.startswith("mysql://") or db_url.startswith("postgresql://"):
sqlalchemy_utils.drop_database(db_url) sqlalchemy_utils.drop_database(db_url)