feat: face recognition agent #53
@@ -55,6 +55,7 @@ class RICommunicationAgent(BaseAgent):
|
|||||||
self.connected = False
|
self.connected = False
|
||||||
self.gesture_agent: RobotGestureAgent | None = None
|
self.gesture_agent: RobotGestureAgent | None = None
|
||||||
self.speech_agent: RobotSpeechAgent | None = None
|
self.speech_agent: RobotSpeechAgent | None = None
|
||||||
|
self.visual_emotion_recognition_agent: VisualEmotionRecognitionAgent | None = None
|
||||||
|
|
||||||
async def setup(self):
|
async def setup(self):
|
||||||
"""
|
"""
|
||||||
@@ -218,6 +219,7 @@ class RICommunicationAgent(BaseAgent):
|
|||||||
socket_address=addr,
|
socket_address=addr,
|
||||||
bind=bind,
|
bind=bind,
|
||||||
)
|
)
|
||||||
|
self.visual_emotion_recognition_agent = visual_emotion_agent
|
||||||
await visual_emotion_agent.start()
|
await visual_emotion_agent.start()
|
||||||
case _:
|
case _:
|
||||||
self.logger.warning("Unhandled negotiation id: %s", id)
|
self.logger.warning("Unhandled negotiation id: %s", id)
|
||||||
@@ -323,6 +325,9 @@ class RICommunicationAgent(BaseAgent):
|
|||||||
|
|
||||||
if self.speech_agent is not None:
|
if self.speech_agent is not None:
|
||||||
await self.speech_agent.stop()
|
await self.speech_agent.stop()
|
||||||
|
|
||||||
|
if self.visual_emotion_recognition_agent is not None:
|
||||||
|
await self.visual_emotion_recognition_agent.stop()
|
||||||
|
|
||||||
if self.pub_socket is not None:
|
if self.pub_socket is not None:
|
||||||
self.pub_socket.close()
|
self.pub_socket.close()
|
||||||
@@ -332,6 +337,7 @@ class RICommunicationAgent(BaseAgent):
|
|||||||
self.connected = True
|
self.connected = True
|
||||||
|
|
||||||
async def handle_message(self, msg: InternalMessage):
|
async def handle_message(self, msg: InternalMessage):
|
||||||
|
return
|
||||||
try:
|
try:
|
||||||
pause_command = PauseCommand.model_validate_json(msg.body)
|
pause_command = PauseCommand.model_validate_json(msg.body)
|
||||||
await self._req_socket.send_json(pause_command.model_dump())
|
await self._req_socket.send_json(pause_command.model_dump())
|
||||||
|
|||||||
@@ -7,6 +7,7 @@ import numpy as np
|
|||||||
import zmq
|
import zmq
|
||||||
import zmq.asyncio as azmq
|
import zmq.asyncio as azmq
|
||||||
from pydantic_core import ValidationError
|
from pydantic_core import ValidationError
|
||||||
|
import struct
|
||||||
|
|
||||||
from control_backend.agents import BaseAgent
|
from control_backend.agents import BaseAgent
|
||||||
from control_backend.agents.perception.visual_emotion_recognition_agent.visual_emotion_recognizer import ( # noqa
|
from control_backend.agents.perception.visual_emotion_recognition_agent.visual_emotion_recognizer import ( # noqa
|
||||||
@@ -88,7 +89,7 @@ class VisualEmotionRecognitionAgent(BaseAgent):
|
|||||||
while self._running:
|
while self._running:
|
||||||
try:
|
try:
|
||||||
frame_bytes = await self.video_in_socket.recv()
|
frame_bytes = await self.video_in_socket.recv()
|
||||||
|
|
||||||
# Convert bytes to a numpy buffer
|
# Convert bytes to a numpy buffer
|
||||||
nparr = np.frombuffer(frame_bytes, np.uint8)
|
nparr = np.frombuffer(frame_bytes, np.uint8)
|
||||||
|
|
||||||
@@ -97,6 +98,7 @@ class VisualEmotionRecognitionAgent(BaseAgent):
|
|||||||
|
|
||||||
if frame_image is None:
|
if frame_image is None:
|
||||||
# Could not decode image, skip this frame
|
# Could not decode image, skip this frame
|
||||||
|
self.logger.warning("Received invalid video frame, skipping.")
|
||||||
continue
|
continue
|
||||||
|
|
||||||
# Get the dominant emotion from each face
|
# Get the dominant emotion from each face
|
||||||
@@ -124,6 +126,7 @@ class VisualEmotionRecognitionAgent(BaseAgent):
|
|||||||
except zmq.Again:
|
except zmq.Again:
|
||||||
self.logger.warning("No video frame received within timeout.")
|
self.logger.warning("No video frame received within timeout.")
|
||||||
|
|
||||||
|
|
||||||
async def update_emotions(self, prev_emotions: set[str], emotions: set[str]):
|
async def update_emotions(self, prev_emotions: set[str], emotions: set[str]):
|
||||||
"""
|
"""
|
||||||
Compare emotions from previous window and current emotions,
|
Compare emotions from previous window and current emotions,
|
||||||
|
|||||||
Reference in New Issue
Block a user