Misc. ZHA changes (#23190)
* handle the off part of on with timed off command * use correct var * only bind / configure cluster once * clean up channel configuration * additional debug logging * add guard * prevent multiple discoveries for a device * cleanup and still configure on rejoin
This commit is contained in:
parent
5e1338a9e4
commit
38d23ba0af
4 changed files with 99 additions and 25 deletions
|
@ -167,6 +167,11 @@ class ZigbeeChannel:
|
|||
|
||||
async def async_initialize(self, from_cache):
|
||||
"""Initialize channel."""
|
||||
_LOGGER.debug(
|
||||
'initializing channel: %s from_cache: %s',
|
||||
self._channel_name,
|
||||
from_cache
|
||||
)
|
||||
self._status = ChannelStatus.INITIALIZED
|
||||
|
||||
@callback
|
||||
|
|
|
@ -7,6 +7,7 @@ https://home-assistant.io/components/zha/
|
|||
import logging
|
||||
from homeassistant.core import callback
|
||||
from homeassistant.helpers.dispatcher import async_dispatcher_send
|
||||
from homeassistant.helpers.event import async_call_later
|
||||
from . import ZigbeeChannel, parse_and_log_command, MAINS_POWERED
|
||||
from ..helpers import get_attr_id_by_name
|
||||
from ..const import (
|
||||
|
@ -40,11 +41,28 @@ class OnOffChannel(ZigbeeChannel):
|
|||
|
||||
if cmd in ('off', 'off_with_effect'):
|
||||
self.attribute_updated(self.ON_OFF, False)
|
||||
elif cmd in ('on', 'on_with_recall_global_scene', 'on_with_timed_off'):
|
||||
elif cmd in ('on', 'on_with_recall_global_scene'):
|
||||
self.attribute_updated(self.ON_OFF, True)
|
||||
elif cmd == 'on_with_timed_off':
|
||||
should_accept = args[0]
|
||||
on_time = args[1]
|
||||
# 0 is always accept 1 is only accept when already on
|
||||
if should_accept == 0 or (should_accept == 1 and self._state):
|
||||
self.attribute_updated(self.ON_OFF, True)
|
||||
if on_time > 0:
|
||||
async_call_later(
|
||||
self.device.hass,
|
||||
(on_time / 10), # value is in 10ths of a second
|
||||
self.set_to_off
|
||||
)
|
||||
elif cmd == 'toggle':
|
||||
self.attribute_updated(self.ON_OFF, not bool(self._state))
|
||||
|
||||
@callback
|
||||
def set_to_off(self, *_):
|
||||
"""Set the state to off."""
|
||||
self.attribute_updated(self.ON_OFF, False)
|
||||
|
||||
@callback
|
||||
def attribute_updated(self, attrid, value):
|
||||
"""Handle attribute updates on this cluster."""
|
||||
|
|
|
@ -200,10 +200,50 @@ class ZHADevice:
|
|||
self.cluster_channels[cluster_channel.name] = cluster_channel
|
||||
self._all_channels.append(cluster_channel)
|
||||
|
||||
def get_channels_to_configure(self):
|
||||
"""Get a deduped list of channels for configuration.
|
||||
|
||||
This goes through all channels and gets a unique list of channels to
|
||||
configure. It first assembles a unique list of channels that are part
|
||||
of entities while stashing relay channels off to the side. It then
|
||||
takse the stashed relay channels and adds them to the list of channels
|
||||
that will be returned if there isn't a channel in the list for that
|
||||
cluster already. This is done to ensure each cluster is only configured
|
||||
once.
|
||||
"""
|
||||
channel_keys = []
|
||||
channels = []
|
||||
relay_channels = self._relay_channels.values()
|
||||
|
||||
def get_key(channel):
|
||||
channel_key = "ZDO"
|
||||
if hasattr(channel.cluster, 'cluster_id'):
|
||||
channel_key = "{}_{}".format(
|
||||
channel.cluster.endpoint.endpoint_id,
|
||||
channel.cluster.cluster_id
|
||||
)
|
||||
return channel_key
|
||||
|
||||
# first we get all unique non event channels
|
||||
for channel in self.all_channels:
|
||||
c_key = get_key(channel)
|
||||
if c_key not in channel_keys and channel not in relay_channels:
|
||||
channel_keys.append(c_key)
|
||||
channels.append(channel)
|
||||
|
||||
# now we get event channels that still need their cluster configured
|
||||
for channel in relay_channels:
|
||||
channel_key = get_key(channel)
|
||||
if channel_key not in channel_keys:
|
||||
channel_keys.append(channel_key)
|
||||
channels.append(channel)
|
||||
return channels
|
||||
|
||||
async def async_configure(self):
|
||||
"""Configure the device."""
|
||||
_LOGGER.debug('%s: started configuration', self.name)
|
||||
await self._execute_channel_tasks('async_configure')
|
||||
await self._execute_channel_tasks(
|
||||
self.get_channels_to_configure(), 'async_configure')
|
||||
_LOGGER.debug('%s: completed configuration', self.name)
|
||||
entry = self.gateway.zha_storage.async_create_or_update(self)
|
||||
_LOGGER.debug('%s: stored in registry: %s', self.name, entry)
|
||||
|
@ -211,7 +251,8 @@ class ZHADevice:
|
|||
async def async_initialize(self, from_cache=False):
|
||||
"""Initialize channels."""
|
||||
_LOGGER.debug('%s: started initialization', self.name)
|
||||
await self._execute_channel_tasks('async_initialize', from_cache)
|
||||
await self._execute_channel_tasks(
|
||||
self.all_channels, 'async_initialize', from_cache)
|
||||
_LOGGER.debug(
|
||||
'%s: power source: %s',
|
||||
self.name,
|
||||
|
@ -220,16 +261,17 @@ class ZHADevice:
|
|||
self.status = DeviceStatus.INITIALIZED
|
||||
_LOGGER.debug('%s: completed initialization', self.name)
|
||||
|
||||
async def _execute_channel_tasks(self, task_name, *args):
|
||||
async def _execute_channel_tasks(self, channels, task_name, *args):
|
||||
"""Gather and execute a set of CHANNEL tasks."""
|
||||
channel_tasks = []
|
||||
semaphore = asyncio.Semaphore(3)
|
||||
zdo_task = None
|
||||
for channel in self.all_channels:
|
||||
for channel in channels:
|
||||
if channel.name == ZDO_CHANNEL:
|
||||
# pylint: disable=E1111
|
||||
zdo_task = self._async_create_task(
|
||||
semaphore, channel, task_name, *args)
|
||||
if zdo_task is None: # We only want to do this once
|
||||
zdo_task = self._async_create_task(
|
||||
semaphore, channel, task_name, *args)
|
||||
else:
|
||||
channel_tasks.append(
|
||||
self._async_create_task(
|
||||
|
|
|
@ -259,17 +259,25 @@ class ZHAGateway:
|
|||
"""Handle device joined and basic information discovered (async)."""
|
||||
zha_device = self._async_get_or_create_device(device, is_new_join)
|
||||
|
||||
discovery_infos = []
|
||||
for endpoint_id, endpoint in device.endpoints.items():
|
||||
async_process_endpoint(
|
||||
self._hass, self._config, endpoint_id, endpoint,
|
||||
discovery_infos, device, zha_device, is_new_join
|
||||
is_rejoin = False
|
||||
if zha_device.status is not DeviceStatus.INITIALIZED:
|
||||
discovery_infos = []
|
||||
for endpoint_id, endpoint in device.endpoints.items():
|
||||
async_process_endpoint(
|
||||
self._hass, self._config, endpoint_id, endpoint,
|
||||
discovery_infos, device, zha_device, is_new_join
|
||||
)
|
||||
if endpoint_id != 0:
|
||||
for cluster in endpoint.in_clusters.values():
|
||||
cluster.bind_only = False
|
||||
for cluster in endpoint.out_clusters.values():
|
||||
cluster.bind_only = True
|
||||
else:
|
||||
is_rejoin = is_new_join is True
|
||||
_LOGGER.debug(
|
||||
'skipping discovery for previously discovered device: %s',
|
||||
"{} - is rejoin: {}".format(zha_device.ieee, is_rejoin)
|
||||
)
|
||||
if endpoint_id != 0:
|
||||
for cluster in endpoint.in_clusters.values():
|
||||
cluster.bind_only = False
|
||||
for cluster in endpoint.out_clusters.values():
|
||||
cluster.bind_only = True
|
||||
|
||||
if is_new_join:
|
||||
# configure the device
|
||||
|
@ -290,15 +298,16 @@ class ZHAGateway:
|
|||
else:
|
||||
await zha_device.async_initialize(from_cache=True)
|
||||
|
||||
for discovery_info in discovery_infos:
|
||||
async_dispatch_discovery_info(
|
||||
self._hass,
|
||||
is_new_join,
|
||||
discovery_info
|
||||
)
|
||||
if not is_rejoin:
|
||||
for discovery_info in discovery_infos:
|
||||
async_dispatch_discovery_info(
|
||||
self._hass,
|
||||
is_new_join,
|
||||
discovery_info
|
||||
)
|
||||
|
||||
device_entity = async_create_device_entity(zha_device)
|
||||
await self._component.async_add_entities([device_entity])
|
||||
device_entity = async_create_device_entity(zha_device)
|
||||
await self._component.async_add_entities([device_entity])
|
||||
|
||||
if is_new_join:
|
||||
device_info = async_get_device_info(self._hass, zha_device)
|
||||
|
|
Loading…
Add table
Reference in a new issue