Reduce lock contention when all translations are already cached (#108634)

This commit is contained in:
J. Nick Koston 2024-01-21 22:29:03 -10:00 committed by GitHub
parent 62fe9144f1
commit 4a34cd25b2
No known key found for this signature in database
GPG key ID: B5690EEEBB952194

View file

@ -18,7 +18,6 @@ from homeassistant.util.json import load_json
_LOGGER = logging.getLogger(__name__) _LOGGER = logging.getLogger(__name__)
TRANSLATION_LOAD_LOCK = "translation_load_lock"
TRANSLATION_FLATTEN_CACHE = "translation_flatten_cache" TRANSLATION_FLATTEN_CACHE = "translation_flatten_cache"
LOCALE_EN = "en" LOCALE_EN = "en"
@ -191,13 +190,14 @@ async def _async_get_component_strings(
class _TranslationCache: class _TranslationCache:
"""Cache for flattened translations.""" """Cache for flattened translations."""
__slots__ = ("hass", "loaded", "cache") __slots__ = ("hass", "loaded", "cache", "lock")
def __init__(self, hass: HomeAssistant) -> None: def __init__(self, hass: HomeAssistant) -> None:
"""Initialize the cache.""" """Initialize the cache."""
self.hass = hass self.hass = hass
self.loaded: dict[str, set[str]] = {} self.loaded: dict[str, set[str]] = {}
self.cache: dict[str, dict[str, dict[str, dict[str, str]]]] = {} self.cache: dict[str, dict[str, dict[str, dict[str, str]]]] = {}
self.lock = asyncio.Lock()
async def async_fetch( async def async_fetch(
self, self,
@ -206,10 +206,17 @@ class _TranslationCache:
components: set[str], components: set[str],
) -> dict[str, str]: ) -> dict[str, str]:
"""Load resources into the cache.""" """Load resources into the cache."""
components_to_load = components - self.loaded.setdefault(language, set()) loaded = self.loaded.setdefault(language, set())
if components_to_load := components - loaded:
if components_to_load: # Translations are never unloaded so if there are no components to load
await self._async_load(language, components_to_load) # we can skip the lock which reduces contention when multiple different
# translations categories are being fetched at the same time which is
# common from the frontend.
async with self.lock:
# Check components to load again, as another task might have loaded
# them while we were waiting for the lock.
if components_to_load := components - loaded:
await self._async_load(language, components_to_load)
result: dict[str, str] = {} result: dict[str, str] = {}
category_cache = self.cache.get(language, {}).get(category, {}) category_cache = self.cache.get(language, {}).get(category, {})
@ -337,11 +344,9 @@ async def async_get_translations(
"""Return all backend translations. """Return all backend translations.
If integration specified, load it for that one. If integration specified, load it for that one.
Otherwise default to loaded intgrations combined with config flow Otherwise default to loaded integrations combined with config flow
integrations if config_flow is true. integrations if config_flow is true.
""" """
lock = hass.data.setdefault(TRANSLATION_LOAD_LOCK, asyncio.Lock())
if integrations is not None: if integrations is not None:
components = set(integrations) components = set(integrations)
elif config_flow: elif config_flow:
@ -354,10 +359,9 @@ async def async_get_translations(
component for component in hass.config.components if "." not in component component for component in hass.config.components if "." not in component
} }
async with lock: if TRANSLATION_FLATTEN_CACHE in hass.data:
if TRANSLATION_FLATTEN_CACHE in hass.data: cache: _TranslationCache = hass.data[TRANSLATION_FLATTEN_CACHE]
cache: _TranslationCache = hass.data[TRANSLATION_FLATTEN_CACHE] else:
else: cache = hass.data[TRANSLATION_FLATTEN_CACHE] = _TranslationCache(hass)
cache = hass.data[TRANSLATION_FLATTEN_CACHE] = _TranslationCache(hass)
return await cache.async_fetch(language, category, components) return await cache.async_fetch(language, category, components)