feat: visual emotion recognition agent #54

Merged
s.o.h.luijkx merged 27 commits from feat/visual-emotion-recognition into main 2026-01-30 16:53:16 +00:00
2 changed files with 10 additions and 1 deletions
Showing only changes of commit f9b807fc97 - Show all commits

View File

@@ -55,6 +55,7 @@ class RICommunicationAgent(BaseAgent):
self.connected = False self.connected = False
self.gesture_agent: RobotGestureAgent | None = None self.gesture_agent: RobotGestureAgent | None = None
self.speech_agent: RobotSpeechAgent | None = None self.speech_agent: RobotSpeechAgent | None = None
self.visual_emotion_recognition_agent: VisualEmotionRecognitionAgent | None = None
async def setup(self): async def setup(self):
""" """
@@ -218,6 +219,7 @@ class RICommunicationAgent(BaseAgent):
socket_address=addr, socket_address=addr,
bind=bind, bind=bind,
) )
self.visual_emotion_recognition_agent = visual_emotion_agent
await visual_emotion_agent.start() await visual_emotion_agent.start()
case _: case _:
self.logger.warning("Unhandled negotiation id: %s", id) self.logger.warning("Unhandled negotiation id: %s", id)
@@ -324,6 +326,9 @@ class RICommunicationAgent(BaseAgent):
if self.speech_agent is not None: if self.speech_agent is not None:
await self.speech_agent.stop() await self.speech_agent.stop()
if self.visual_emotion_recognition_agent is not None:
await self.visual_emotion_recognition_agent.stop()
if self.pub_socket is not None: if self.pub_socket is not None:
self.pub_socket.close() self.pub_socket.close()
@@ -332,6 +337,7 @@ class RICommunicationAgent(BaseAgent):
self.connected = True self.connected = True
async def handle_message(self, msg: InternalMessage): async def handle_message(self, msg: InternalMessage):
return
try: try:
pause_command = PauseCommand.model_validate_json(msg.body) pause_command = PauseCommand.model_validate_json(msg.body)
await self._req_socket.send_json(pause_command.model_dump()) await self._req_socket.send_json(pause_command.model_dump())

View File

@@ -7,6 +7,7 @@ import numpy as np
import zmq import zmq
import zmq.asyncio as azmq import zmq.asyncio as azmq
from pydantic_core import ValidationError from pydantic_core import ValidationError
import struct
from control_backend.agents import BaseAgent from control_backend.agents import BaseAgent
from control_backend.agents.perception.visual_emotion_recognition_agent.visual_emotion_recognizer import ( # noqa from control_backend.agents.perception.visual_emotion_recognition_agent.visual_emotion_recognizer import ( # noqa
@@ -97,6 +98,7 @@ class VisualEmotionRecognitionAgent(BaseAgent):
if frame_image is None: if frame_image is None:
# Could not decode image, skip this frame # Could not decode image, skip this frame
self.logger.warning("Received invalid video frame, skipping.")
continue continue
# Get the dominant emotion from each face # Get the dominant emotion from each face
@@ -124,6 +126,7 @@ class VisualEmotionRecognitionAgent(BaseAgent):
except zmq.Again: except zmq.Again:
self.logger.warning("No video frame received within timeout.") self.logger.warning("No video frame received within timeout.")
async def update_emotions(self, prev_emotions: set[str], emotions: set[str]): async def update_emotions(self, prev_emotions: set[str], emotions: set[str]):
""" """
Compare emotions from previous window and current emotions, Compare emotions from previous window and current emotions,