diff --git a/homeassistant/components/image_processing/demo.py b/homeassistant/components/image_processing/demo.py index 97f0eace41d..3cc2c17654c 100644 --- a/homeassistant/components/image_processing/demo.py +++ b/homeassistant/components/image_processing/demo.py @@ -4,19 +4,19 @@ Support for the demo image processing. For more details about this component, please refer to the documentation at https://home-assistant.io/components/demo/ """ - +from homeassistant.components.image_processing import ATTR_CONFIDENCE from homeassistant.components.image_processing.openalpr_local import ( ImageProcessingAlprEntity) from homeassistant.components.image_processing.microsoft_face_identify import ( - ImageProcessingFaceIdentifyEntity) + ImageProcessingFaceEntity, ATTR_NAME, ATTR_AGE, ATTR_GENDER) def setup_platform(hass, config, add_devices, discovery_info=None): """Setup the demo image_processing platform.""" add_devices([ DemoImageProcessingAlpr('camera.demo_camera', "Demo Alpr"), - DemoImageProcessingFaceIdentify( - 'camera.demo_camera', "Demo Face Identify") + DemoImageProcessingFace( + 'camera.demo_camera', "Demo Face") ]) @@ -57,7 +57,7 @@ class DemoImageProcessingAlpr(ImageProcessingAlprEntity): self.process_plates(demo_data, 1) -class DemoImageProcessingFaceIdentify(ImageProcessingFaceIdentifyEntity): +class DemoImageProcessingFace(ImageProcessingFaceEntity): """Demo face identify image processing entity.""" def __init__(self, camera_entity, name): @@ -84,10 +84,22 @@ class DemoImageProcessingFaceIdentify(ImageProcessingFaceIdentifyEntity): def process_image(self, image): """Process image.""" - demo_data = { - 'Hans': 98.34, - 'Helena': 82.53, - 'Luna': 62.53, - } + demo_data = [ + { + ATTR_CONFIDENCE: 98.34, + ATTR_NAME: 'Hans', + ATTR_AGE: 16.0, + ATTR_GENDER: 'male', + }, + { + ATTR_NAME: 'Helena', + ATTR_AGE: 28.0, + ATTR_GENDER: 'female', + }, + { + ATTR_CONFIDENCE: 62.53, + ATTR_NAME: 'Luna', + }, + ] self.process_faces(demo_data, 4) diff --git a/homeassistant/components/image_processing/microsoft_face_detect.py b/homeassistant/components/image_processing/microsoft_face_detect.py new file mode 100644 index 00000000000..43c5c9dd7f0 --- /dev/null +++ b/homeassistant/components/image_processing/microsoft_face_detect.py @@ -0,0 +1,122 @@ +""" +Component that will help set the microsoft face detect processing. + +For more details about this component, please refer to the documentation at +https://home-assistant.io/components/image_processing.microsoft_face_detect/ +""" +import asyncio +import logging + +import voluptuous as vol + +from homeassistant.core import split_entity_id +from homeassistant.exceptions import HomeAssistantError +from homeassistant.components.microsoft_face import DATA_MICROSOFT_FACE +from homeassistant.components.image_processing import ( + PLATFORM_SCHEMA, CONF_SOURCE, CONF_ENTITY_ID, CONF_NAME) +from homeassistant.components.image_processing.microsoft_face_identify import ( + ImageProcessingFaceEntity, ATTR_GENDER, ATTR_AGE, ATTR_GLASSES) +import homeassistant.helpers.config_validation as cv + +DEPENDENCIES = ['microsoft_face'] + +_LOGGER = logging.getLogger(__name__) + +EVENT_IDENTIFY_FACE = 'detect_face' + +SUPPORTED_ATTRIBUTES = [ + ATTR_AGE, + ATTR_GENDER, + ATTR_GLASSES +] + +CONF_ATTRIBUTES = 'attributes' +DEFAULT_ATTRIBUTES = [ATTR_AGE, ATTR_GENDER] + + +def validate_attributes(list_attributes): + """Validate face attributes.""" + for attr in list_attributes: + if attr not in SUPPORTED_ATTRIBUTES: + raise vol.Invalid("Invalid attribtue {0}".format(attr)) + return list_attributes + + +PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({ + vol.Optional(CONF_ATTRIBUTES, default=DEFAULT_ATTRIBUTES): + vol.All(cv.ensure_list, validate_attributes), +}) + + +@asyncio.coroutine +def async_setup_platform(hass, config, async_add_devices, discovery_info=None): + """Set up the microsoft face detection platform.""" + api = hass.data[DATA_MICROSOFT_FACE] + attributes = config[CONF_ATTRIBUTES] + + entities = [] + for camera in config[CONF_SOURCE]: + entities.append(MicrosoftFaceDetectEntity( + camera[CONF_ENTITY_ID], api, attributes, camera.get(CONF_NAME) + )) + + yield from async_add_devices(entities) + + +class MicrosoftFaceDetectEntity(ImageProcessingFaceEntity): + """Microsoft face api entity for identify.""" + + def __init__(self, camera_entity, api, attributes, name=None): + """Initialize openalpr local api.""" + super().__init__() + + self._api = api + self._camera = camera_entity + self._attributes = attributes + + if name: + self._name = name + else: + self._name = "MicrosoftFace {0}".format( + split_entity_id(camera_entity)[1]) + + @property + def camera_entity(self): + """Return camera entity id from process pictures.""" + return self._camera + + @property + def name(self): + """Return the name of the entity.""" + return self._name + + @asyncio.coroutine + def async_process_image(self, image): + """Process image. + + This method is a coroutine. + """ + face_data = None + try: + face_data = yield from self._api.call_api( + 'post', 'detect', image, binary=True, + params={'returnFaceAttributes': ",".join(self._attributes)}) + + except HomeAssistantError as err: + _LOGGER.error("Can't process image on microsoft face: %s", err) + return + + if face_data is None or len(face_data) < 1: + return + + faces = [] + for face in face_data: + face_attr = {} + for attr in self._attributes: + if attr in face['faceAttributes']: + face_attr[attr] = face['faceAttributes'][attr] + + if face_attr: + faces.append(face_attr) + + self.async_process_faces(faces, len(face_data)) diff --git a/homeassistant/components/image_processing/microsoft_face_identify.py b/homeassistant/components/image_processing/microsoft_face_identify.py index 0402f272eeb..1cc17f1443b 100644 --- a/homeassistant/components/image_processing/microsoft_face_identify.py +++ b/homeassistant/components/image_processing/microsoft_face_identify.py @@ -23,11 +23,16 @@ DEPENDENCIES = ['microsoft_face'] _LOGGER = logging.getLogger(__name__) -EVENT_IDENTIFY_FACE = 'identify_face' +EVENT_DETECT_FACE = 'image_processing.detect_face' ATTR_NAME = 'name' ATTR_TOTAL_FACES = 'total_faces' -ATTR_KNOWN_FACES = 'known_faces' +ATTR_AGE = 'age' +ATTR_GENDER = 'gender' +ATTR_MOTION = 'motion' +ATTR_GLASSES = 'glasses' +ATTR_FACES = 'faces' + CONF_GROUP = 'group' PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({ @@ -52,71 +57,90 @@ def async_setup_platform(hass, config, async_add_devices, discovery_info=None): yield from async_add_devices(entities) -class ImageProcessingFaceIdentifyEntity(ImageProcessingEntity): - """Base entity class for face identify/verify image processing.""" +class ImageProcessingFaceEntity(ImageProcessingEntity): + """Base entity class for face image processing.""" def __init__(self): """Initialize base face identify/verify entity.""" - self.known_faces = {} # last scan data + self.faces = [] # last scan data self.total_faces = 0 # face count @property def state(self): """Return the state of the entity.""" confidence = 0 - face_name = STATE_UNKNOWN + state = STATE_UNKNOWN - # search high verify face - for i_name, i_co in self.known_faces.items(): - if i_co > confidence: - confidence = i_co - face_name = i_name - return face_name + # no confidence support + if not self.confidence: + return self.total_faces + + # search high confidence + for face in self.faces: + if ATTR_CONFIDENCE not in face: + continue + + f_co = face[ATTR_CONFIDENCE] + if f_co > confidence: + confidence = f_co + for attr in [ATTR_NAME, ATTR_MOTION]: + if attr in face: + state = face[attr] + break + + return state @property def state_attributes(self): """Return device specific state attributes.""" attr = { - ATTR_KNOWN_FACES: self.known_faces, + ATTR_FACES: self.faces, ATTR_TOTAL_FACES: self.total_faces, } return attr - def process_faces(self, known, total): + def process_faces(self, faces, total): """Send event with detected faces and store data.""" run_callback_threadsafe( - self.hass.loop, self.async_process_faces, known, total + self.hass.loop, self.async_process_faces, faces, total ).result() @callback - def async_process_faces(self, known, total): + def async_process_faces(self, faces, total): """Send event with detected faces and store data. known are a dict in follow format: - { 'name': confidence } + [ + { + ATTR_CONFIDENCE: 80, + ATTR_NAME: 'Name', + ATTR_AGE: 12.0, + ATTR_GENDER: 'man', + ATTR_MOTION: 'smile', + ATTR_GLASSES: 'sunglasses' + }, + ] This method must be run in the event loop. """ - detect = {name: confidence for name, confidence in known.items() - if confidence >= self.confidence} - # send events - for name, confidence in detect.items(): + for face in faces: + if ATTR_CONFIDENCE in face and self.confidence: + if face[ATTR_CONFIDENCE] < self.confidence: + continue + + face.update({ATTR_ENTITY_ID: self.entity_id}) self.hass.async_add_job( - self.hass.bus.async_fire, EVENT_IDENTIFY_FACE, { - ATTR_NAME: name, - ATTR_ENTITY_ID: self.entity_id, - ATTR_CONFIDENCE: confidence, - } + self.hass.bus.async_fire, EVENT_DETECT_FACE, face ) # update entity store - self.known_faces = detect + self.faces = faces self.total_faces = total -class MicrosoftFaceIdentifyEntity(ImageProcessingFaceIdentifyEntity): +class MicrosoftFaceIdentifyEntity(ImageProcessingFaceEntity): """Microsoft face api entity for identify.""" def __init__(self, camera_entity, api, face_group, confidence, name=None): @@ -173,7 +197,7 @@ class MicrosoftFaceIdentifyEntity(ImageProcessingFaceIdentifyEntity): return # parse data - knwon_faces = {} + knwon_faces = [] total = 0 for face in detect: total += 1 @@ -187,7 +211,10 @@ class MicrosoftFaceIdentifyEntity(ImageProcessingFaceIdentifyEntity): name = s_name break - knwon_faces[name] = data['confidence'] * 100 + knwon_faces.append({ + ATTR_NAME: name, + ATTR_CONFIDENCE: data['confidence'] * 100, + }) # process data self.async_process_faces(knwon_faces, total) diff --git a/homeassistant/components/image_processing/openalpr_local.py b/homeassistant/components/image_processing/openalpr_local.py index 319f14c1f3d..dbd404dd04c 100644 --- a/homeassistant/components/image_processing/openalpr_local.py +++ b/homeassistant/components/image_processing/openalpr_local.py @@ -24,7 +24,7 @@ _LOGGER = logging.getLogger(__name__) RE_ALPR_PLATE = re.compile(r"^plate\d*:") RE_ALPR_RESULT = re.compile(r"- (\w*)\s*confidence: (\d*.\d*)") -EVENT_FOUND_PLATE = 'found_plate' +EVENT_FOUND_PLATE = 'image_processing.found_plate' ATTR_PLATE = 'plate' ATTR_PLATES = 'plates' diff --git a/tests/components/image_processing/test_init.py b/tests/components/image_processing/test_init.py index b13dcf48a72..2ac64891e95 100644 --- a/tests/components/image_processing/test_init.py +++ b/tests/components/image_processing/test_init.py @@ -110,7 +110,7 @@ class TestImageProcessing(object): class TestImageProcessingAlpr(object): - """Test class for image processing.""" + """Test class for alpr image processing.""" def setup_method(self): """Setup things to be run when tests are started.""" @@ -142,7 +142,7 @@ class TestImageProcessingAlpr(object): """Mock event.""" self.alpr_events.append(event) - self.hass.bus.listen('found_plate', mock_alpr_event) + self.hass.bus.listen('image_processing.found_plate', mock_alpr_event) def teardown_method(self): """Stop everything that was started.""" @@ -211,8 +211,8 @@ class TestImageProcessingAlpr(object): assert event_data[0]['entity_id'] == 'image_processing.demo_alpr' -class TestImageProcessingFaceIdentify(object): - """Test class for image processing.""" +class TestImageProcessingFace(object): + """Test class for face image processing.""" def setup_method(self): """Setup things to be run when tests are started.""" @@ -228,7 +228,7 @@ class TestImageProcessingFaceIdentify(object): } with patch('homeassistant.components.image_processing.demo.' - 'DemoImageProcessingFaceIdentify.should_poll', + 'DemoImageProcessingFace.should_poll', new_callable=PropertyMock(return_value=False)): setup_component(self.hass, ip.DOMAIN, config) @@ -244,7 +244,7 @@ class TestImageProcessingFaceIdentify(object): """Mock event.""" self.face_events.append(event) - self.hass.bus.listen('identify_face', mock_face_event) + self.hass.bus.listen('image_processing.detect_face', mock_face_event) def teardown_method(self): """Stop everything that was started.""" @@ -254,10 +254,10 @@ class TestImageProcessingFaceIdentify(object): """Setup and scan a picture and test faces from event.""" aioclient_mock.get(self.url, content=b'image') - ip.scan(self.hass, entity_id='image_processing.demo_face_identify') + ip.scan(self.hass, entity_id='image_processing.demo_face') self.hass.block_till_done() - state = self.hass.states.get('image_processing.demo_face_identify') + state = self.hass.states.get('image_processing.demo_face') assert len(self.face_events) == 2 assert state.state == 'Hans' @@ -268,5 +268,31 @@ class TestImageProcessingFaceIdentify(object): assert len(event_data) == 1 assert event_data[0]['name'] == 'Hans' assert event_data[0]['confidence'] == 98.34 + assert event_data[0]['gender'] == 'male' assert event_data[0]['entity_id'] == \ - 'image_processing.demo_face_identify' + 'image_processing.demo_face' + + @patch('homeassistant.components.image_processing.demo.' + 'DemoImageProcessingFace.confidence', + new_callable=PropertyMock(return_value=None)) + def test_face_event_call_no_confidence(self, mock_confi, aioclient_mock): + """Setup and scan a picture and test faces from event.""" + aioclient_mock.get(self.url, content=b'image') + + ip.scan(self.hass, entity_id='image_processing.demo_face') + self.hass.block_till_done() + + state = self.hass.states.get('image_processing.demo_face') + + assert len(self.face_events) == 3 + assert state.state == '4' + assert state.attributes['total_faces'] == 4 + + event_data = [event.data for event in self.face_events if + event.data.get('name') == 'Hans'] + assert len(event_data) == 1 + assert event_data[0]['name'] == 'Hans' + assert event_data[0]['confidence'] == 98.34 + assert event_data[0]['gender'] == 'male' + assert event_data[0]['entity_id'] == \ + 'image_processing.demo_face' diff --git a/tests/components/image_processing/test_microsoft_face_detect.py b/tests/components/image_processing/test_microsoft_face_detect.py new file mode 100644 index 00000000000..82fd54f1633 --- /dev/null +++ b/tests/components/image_processing/test_microsoft_face_detect.py @@ -0,0 +1,159 @@ +"""The tests for the microsoft face detect platform.""" +from unittest.mock import patch, PropertyMock + +from homeassistant.core import callback +from homeassistant.const import ATTR_ENTITY_PICTURE +from homeassistant.bootstrap import setup_component +import homeassistant.components.image_processing as ip +import homeassistant.components.microsoft_face as mf + +from tests.common import ( + get_test_home_assistant, assert_setup_component, load_fixture, mock_coro) + + +class TestMicrosoftFaceDetectSetup(object): + """Test class for image processing.""" + + def setup_method(self): + """Setup things to be run when tests are started.""" + self.hass = get_test_home_assistant() + + def teardown_method(self): + """Stop everything that was started.""" + self.hass.stop() + + @patch('homeassistant.components.microsoft_face.' + 'MicrosoftFace.update_store', return_value=mock_coro()()) + def test_setup_platform(self, store_mock): + """Setup platform with one entity.""" + config = { + ip.DOMAIN: { + 'platform': 'microsoft_face_detect', + 'source': { + 'entity_id': 'camera.demo_camera' + }, + 'attributes': ['age', 'gender'], + }, + 'camera': { + 'platform': 'demo' + }, + mf.DOMAIN: { + 'api_key': '12345678abcdef6', + } + } + + with assert_setup_component(1, ip.DOMAIN): + setup_component(self.hass, ip.DOMAIN, config) + + assert self.hass.states.get( + 'image_processing.microsoftface_demo_camera') + + @patch('homeassistant.components.microsoft_face.' + 'MicrosoftFace.update_store', return_value=mock_coro()()) + def test_setup_platform_name(self, store_mock): + """Setup platform with one entity and set name.""" + config = { + ip.DOMAIN: { + 'platform': 'microsoft_face_detect', + 'source': { + 'entity_id': 'camera.demo_camera', + 'name': 'test local' + }, + }, + 'camera': { + 'platform': 'demo' + }, + mf.DOMAIN: { + 'api_key': '12345678abcdef6', + } + } + + with assert_setup_component(1, ip.DOMAIN): + setup_component(self.hass, ip.DOMAIN, config) + + assert self.hass.states.get('image_processing.test_local') + + +class TestMicrosoftFaceDetect(object): + """Test class for image processing.""" + + def setup_method(self): + """Setup things to be run when tests are started.""" + self.hass = get_test_home_assistant() + + self.config = { + ip.DOMAIN: { + 'platform': 'microsoft_face_detect', + 'source': { + 'entity_id': 'camera.demo_camera', + 'name': 'test local' + }, + 'attributes': ['age', 'gender'], + }, + 'camera': { + 'platform': 'demo' + }, + mf.DOMAIN: { + 'api_key': '12345678abcdef6', + } + } + + def teardown_method(self): + """Stop everything that was started.""" + self.hass.stop() + + @patch('homeassistant.components.image_processing.microsoft_face_detect.' + 'MicrosoftFaceDetectEntity.should_poll', + new_callable=PropertyMock(return_value=False)) + def test_ms_detect_process_image(self, poll_mock, aioclient_mock): + """Setup and scan a picture and test plates from event.""" + aioclient_mock.get( + mf.FACE_API_URL.format("persongroups"), + text=load_fixture('microsoft_face_persongroups.json') + ) + aioclient_mock.get( + mf.FACE_API_URL.format("persongroups/test_group1/persons"), + text=load_fixture('microsoft_face_persons.json') + ) + aioclient_mock.get( + mf.FACE_API_URL.format("persongroups/test_group2/persons"), + text=load_fixture('microsoft_face_persons.json') + ) + + setup_component(self.hass, ip.DOMAIN, self.config) + + state = self.hass.states.get('camera.demo_camera') + url = "{0}{1}".format( + self.hass.config.api.base_url, + state.attributes.get(ATTR_ENTITY_PICTURE)) + + face_events = [] + + @callback + def mock_face_event(event): + """Mock event.""" + face_events.append(event) + + self.hass.bus.listen('image_processing.detect_face', mock_face_event) + + aioclient_mock.get(url, content=b'image') + + aioclient_mock.post( + mf.FACE_API_URL.format("detect"), + text=load_fixture('microsoft_face_detect.json'), + params={'returnFaceAttributes': "age,gender"} + ) + + ip.scan(self.hass, entity_id='image_processing.test_local') + self.hass.block_till_done() + + state = self.hass.states.get('image_processing.test_local') + + assert len(face_events) == 1 + assert state.attributes.get('total_faces') == 1 + assert state.state == '1' + + assert face_events[0].data['age'] == 71.0 + assert face_events[0].data['gender'] == 'male' + assert face_events[0].data['entity_id'] == \ + 'image_processing.test_local' diff --git a/tests/components/image_processing/test_microsoft_face_identify.py b/tests/components/image_processing/test_microsoft_face_identify.py index 8d75f6ff1d3..8812c1c050e 100644 --- a/tests/components/image_processing/test_microsoft_face_identify.py +++ b/tests/components/image_processing/test_microsoft_face_identify.py @@ -106,7 +106,7 @@ class TestMicrosoftFaceIdentify(object): @patch('homeassistant.components.image_processing.microsoft_face_identify.' 'MicrosoftFaceIdentifyEntity.should_poll', new_callable=PropertyMock(return_value=False)) - def test_openalpr_process_image(self, poll_mock, aioclient_mock): + def test_ms_identify_process_image(self, poll_mock, aioclient_mock): """Setup and scan a picture and test plates from event.""" aioclient_mock.get( mf.FACE_API_URL.format("persongroups"), @@ -135,7 +135,7 @@ class TestMicrosoftFaceIdentify(object): """Mock event.""" face_events.append(event) - self.hass.bus.listen('identify_face', mock_face_event) + self.hass.bus.listen('image_processing.detect_face', mock_face_event) aioclient_mock.get(url, content=b'image') diff --git a/tests/components/image_processing/test_openalpr_cloud.py b/tests/components/image_processing/test_openalpr_cloud.py index 8e9f35eb0b2..8bce672e0d9 100644 --- a/tests/components/image_processing/test_openalpr_cloud.py +++ b/tests/components/image_processing/test_openalpr_cloud.py @@ -143,7 +143,7 @@ class TestOpenAlprCloud(object): """Mock event.""" self.alpr_events.append(event) - self.hass.bus.listen('found_plate', mock_alpr_event) + self.hass.bus.listen('image_processing.found_plate', mock_alpr_event) self.params = { 'secret_key': "sk_abcxyz123456", diff --git a/tests/components/image_processing/test_openalpr_local.py b/tests/components/image_processing/test_openalpr_local.py index 5186332661b..ffe2eadc8d6 100644 --- a/tests/components/image_processing/test_openalpr_local.py +++ b/tests/components/image_processing/test_openalpr_local.py @@ -134,7 +134,7 @@ class TestOpenAlprLocal(object): """Mock event.""" self.alpr_events.append(event) - self.hass.bus.listen('found_plate', mock_alpr_event) + self.hass.bus.listen('image_processing.found_plate', mock_alpr_event) def teardown_method(self): """Stop everything that was started."""