feat: fully implemented visual emotion recognition agent in pipeline

ref: N25B-393
This commit is contained in:
Storm
2026-01-16 13:26:53 +01:00
parent 0771b0d607
commit 05804c158d
6 changed files with 67 additions and 29 deletions

View File

@@ -7,6 +7,9 @@ from zmq.asyncio import Context
from control_backend.agents import BaseAgent
from control_backend.agents.actuation.robot_gesture_agent import RobotGestureAgent
from control_backend.agents.perception.visual_emotion_detection_agent.visual_emotion_recognition_agent import (
VisualEmotionRecognitionAgent,
)
from control_backend.core.config import settings
from ..actuation.robot_speech_agent import RobotSpeechAgent
@@ -201,6 +204,13 @@ class RICommunicationAgent(BaseAgent):
case "audio":
vad_agent = VADAgent(audio_in_address=addr, audio_in_bind=bind)
await vad_agent.start()
case "video":
visual_emotion_agent = VisualEmotionRecognitionAgent(
settings.agent_settings.visual_emotion_recognition_name,
socket_address=addr,
bind=bind,
)
await visual_emotion_agent.start()
case _:
self.logger.warning("Unhandled negotiation id: %s", id)