* Remove unnecessary exception re-wraps * Preserve exception chains on re-raise We slap "from cause" to almost all possible cases here. In some cases it could conceivably be better to do "from None" if we really want to hide the cause. However those should be in the minority, and "from cause" should be an improvement over the corresponding raise without a "from" in all cases anyway. The only case where we raise from None here is in plex, where the exception for an original invalid SSL cert is not the root cause for failure to validate a newly fetched one. Follow local convention on exception variable names if there is a consistent one, otherwise `err` to match with majority of codebase. * Fix mistaken re-wrap in homematicip_cloud/hap.py Missed the difference between HmipConnectionError and HmipcConnectionError. * Do not hide original error on plex new cert validation error Original is not the cause for the new one, but showing old in the traceback is useful nevertheless.
132 lines
4.3 KiB
Python
132 lines
4.3 KiB
Python
"""The AccuWeather component."""
|
|
import asyncio
|
|
from datetime import timedelta
|
|
import logging
|
|
|
|
from accuweather import AccuWeather, ApiError, InvalidApiKeyError, RequestsExceededError
|
|
from aiohttp.client_exceptions import ClientConnectorError
|
|
from async_timeout import timeout
|
|
|
|
from homeassistant.const import CONF_API_KEY
|
|
from homeassistant.core import Config, HomeAssistant
|
|
from homeassistant.exceptions import ConfigEntryNotReady
|
|
from homeassistant.helpers.aiohttp_client import async_get_clientsession
|
|
from homeassistant.helpers.update_coordinator import DataUpdateCoordinator, UpdateFailed
|
|
|
|
from .const import (
|
|
ATTR_FORECAST,
|
|
CONF_FORECAST,
|
|
COORDINATOR,
|
|
DOMAIN,
|
|
UNDO_UPDATE_LISTENER,
|
|
)
|
|
|
|
_LOGGER = logging.getLogger(__name__)
|
|
|
|
PLATFORMS = ["sensor", "weather"]
|
|
|
|
|
|
async def async_setup(hass: HomeAssistant, config: Config) -> bool:
|
|
"""Set up configured AccuWeather."""
|
|
hass.data.setdefault(DOMAIN, {})
|
|
return True
|
|
|
|
|
|
async def async_setup_entry(hass, config_entry) -> bool:
|
|
"""Set up AccuWeather as config entry."""
|
|
api_key = config_entry.data[CONF_API_KEY]
|
|
location_key = config_entry.unique_id
|
|
forecast = config_entry.options.get(CONF_FORECAST, False)
|
|
|
|
_LOGGER.debug("Using location_key: %s, get forecast: %s", location_key, forecast)
|
|
|
|
websession = async_get_clientsession(hass)
|
|
|
|
coordinator = AccuWeatherDataUpdateCoordinator(
|
|
hass, websession, api_key, location_key, forecast
|
|
)
|
|
await coordinator.async_refresh()
|
|
|
|
if not coordinator.last_update_success:
|
|
raise ConfigEntryNotReady
|
|
|
|
undo_listener = config_entry.add_update_listener(update_listener)
|
|
|
|
hass.data[DOMAIN][config_entry.entry_id] = {
|
|
COORDINATOR: coordinator,
|
|
UNDO_UPDATE_LISTENER: undo_listener,
|
|
}
|
|
|
|
for component in PLATFORMS:
|
|
hass.async_create_task(
|
|
hass.config_entries.async_forward_entry_setup(config_entry, component)
|
|
)
|
|
|
|
return True
|
|
|
|
|
|
async def async_unload_entry(hass, config_entry):
|
|
"""Unload a config entry."""
|
|
unload_ok = all(
|
|
await asyncio.gather(
|
|
*[
|
|
hass.config_entries.async_forward_entry_unload(config_entry, component)
|
|
for component in PLATFORMS
|
|
]
|
|
)
|
|
)
|
|
|
|
hass.data[DOMAIN][config_entry.entry_id][UNDO_UPDATE_LISTENER]()
|
|
|
|
if unload_ok:
|
|
hass.data[DOMAIN].pop(config_entry.entry_id)
|
|
|
|
return unload_ok
|
|
|
|
|
|
async def update_listener(hass, config_entry):
|
|
"""Update listener."""
|
|
await hass.config_entries.async_reload(config_entry.entry_id)
|
|
|
|
|
|
class AccuWeatherDataUpdateCoordinator(DataUpdateCoordinator):
|
|
"""Class to manage fetching AccuWeather data API."""
|
|
|
|
def __init__(self, hass, session, api_key, location_key, forecast: bool):
|
|
"""Initialize."""
|
|
self.location_key = location_key
|
|
self.forecast = forecast
|
|
self.is_metric = hass.config.units.is_metric
|
|
self.accuweather = AccuWeather(api_key, session, location_key=self.location_key)
|
|
|
|
# Enabling the forecast download increases the number of requests per data
|
|
# update, we use 32 minutes for current condition only and 64 minutes for
|
|
# current condition and forecast as update interval to not exceed allowed number
|
|
# of requests. We have 50 requests allowed per day, so we use 45 and leave 5 as
|
|
# a reserve for restarting HA.
|
|
update_interval = (
|
|
timedelta(minutes=64) if self.forecast else timedelta(minutes=32)
|
|
)
|
|
_LOGGER.debug("Data will be update every %s", update_interval)
|
|
|
|
super().__init__(hass, _LOGGER, name=DOMAIN, update_interval=update_interval)
|
|
|
|
async def _async_update_data(self):
|
|
"""Update data via library."""
|
|
try:
|
|
async with timeout(10):
|
|
current = await self.accuweather.async_get_current_conditions()
|
|
forecast = (
|
|
await self.accuweather.async_get_forecast(metric=self.is_metric)
|
|
if self.forecast
|
|
else {}
|
|
)
|
|
except (
|
|
ApiError,
|
|
ClientConnectorError,
|
|
InvalidApiKeyError,
|
|
RequestsExceededError,
|
|
) as error:
|
|
raise UpdateFailed(error) from error
|
|
_LOGGER.debug("Requests remaining: %s", self.accuweather.requests_remaining)
|
|
return {**current, **{ATTR_FORECAST: forecast}}
|