chore: quick push before demo; fixed image receiving from RI

This commit is contained in:
Storm
2026-01-20 12:46:30 +01:00
parent 424294b0a3
commit f9b807fc97
2 changed files with 10 additions and 1 deletions

View File

@@ -55,6 +55,7 @@ class RICommunicationAgent(BaseAgent):
self.connected = False
self.gesture_agent: RobotGestureAgent | None = None
self.speech_agent: RobotSpeechAgent | None = None
self.visual_emotion_recognition_agent: VisualEmotionRecognitionAgent | None = None
async def setup(self):
"""
@@ -218,6 +219,7 @@ class RICommunicationAgent(BaseAgent):
socket_address=addr,
bind=bind,
)
self.visual_emotion_recognition_agent = visual_emotion_agent
await visual_emotion_agent.start()
case _:
self.logger.warning("Unhandled negotiation id: %s", id)
@@ -323,6 +325,9 @@ class RICommunicationAgent(BaseAgent):
if self.speech_agent is not None:
await self.speech_agent.stop()
if self.visual_emotion_recognition_agent is not None:
await self.visual_emotion_recognition_agent.stop()
if self.pub_socket is not None:
self.pub_socket.close()
@@ -332,6 +337,7 @@ class RICommunicationAgent(BaseAgent):
self.connected = True
async def handle_message(self, msg: InternalMessage):
return
try:
pause_command = PauseCommand.model_validate_json(msg.body)
await self._req_socket.send_json(pause_command.model_dump())

View File

@@ -7,6 +7,7 @@ import numpy as np
import zmq
import zmq.asyncio as azmq
from pydantic_core import ValidationError
import struct
from control_backend.agents import BaseAgent
from control_backend.agents.perception.visual_emotion_recognition_agent.visual_emotion_recognizer import ( # noqa
@@ -88,7 +89,7 @@ class VisualEmotionRecognitionAgent(BaseAgent):
while self._running:
try:
frame_bytes = await self.video_in_socket.recv()
# Convert bytes to a numpy buffer
nparr = np.frombuffer(frame_bytes, np.uint8)
@@ -97,6 +98,7 @@ class VisualEmotionRecognitionAgent(BaseAgent):
if frame_image is None:
# Could not decode image, skip this frame
self.logger.warning("Received invalid video frame, skipping.")
continue
# Get the dominant emotion from each face
@@ -124,6 +126,7 @@ class VisualEmotionRecognitionAgent(BaseAgent):
except zmq.Again:
self.logger.warning("No video frame received within timeout.")
async def update_emotions(self, prev_emotions: set[str], emotions: set[str]):
"""
Compare emotions from previous window and current emotions,