Compare commits

...

31 Commits

Author SHA1 Message Date
Storm
ce99dc5ec3 Merge branch 'main' into feat/face-recognition 2026-01-30 17:23:01 +01:00
JobvAlewijk
12b905ff22 Merge branch 'main' of https://git.science.uu.nl/ics/sp/2025/n25b/pepperplus-cb into feat/face-recognition 2026-01-30 16:26:30 +01:00
JobvAlewijk
11d7c1409e chore: remove pipeline stuff 2026-01-30 16:02:40 +01:00
JobvAlewijk
f89fb2266a feat: added face recognition and tests
ref: N25B-397
2026-01-30 15:59:27 +01:00
Twirre Meulenbelt
0f964795f3 feat: subscribe instead of req/res for face detection
ref: N25B-395
2026-01-29 17:16:38 +01:00
JobvAlewijk
00a13426a0 Merge branch 'main' of https://git.science.uu.nl/ics/sp/2025/n25b/pepperplus-cb into feat/face-recognition 2026-01-29 16:21:55 +01:00
JobvAlewijk
f6477e5325 chore: blabla 2026-01-29 16:20:51 +01:00
JobvAlewijk
09d8cca309 Merge branch 'feat/visual-emotion-recognition' of https://git.science.uu.nl/ics/sp/2025/n25b/pepperplus-cb into feat/face-recognition 2026-01-21 17:57:23 +01:00
Storm
0b1c2ce20a feat: implemented pausing, implemented graceful stopping, removed old RI pausing code
ref: N25B-393
2026-01-20 18:53:24 +01:00
Storm
f9b807fc97 chore: quick push before demo; fixed image receiving from RI 2026-01-20 12:46:30 +01:00
Storm
424294b0a3 Merged feat/longer-pauses-possible into feat/visual-emotion-recognition 2026-01-19 18:35:07 +01:00
Pim Hutting
bc0947fac1 chore: added a dot 2026-01-19 18:26:15 +01:00
Storm
cd80cdf93b Merge branch 'feat/longer-pauses-possible' into feat/visual-emotion-recognition 2026-01-19 18:24:31 +01:00
Storm
985327de70 docs: updated docstrings and fixed styling
ref: N25B-393
2026-01-19 12:52:00 +01:00
Storm
302c50934e feat: implemented emotion recognition functionality in AgentSpeak
ref: N25B-393
2026-01-19 12:10:58 +01:00
Storm
f9c69cafb3 Merge branch 'feat/reset-experiment-and-phase' into feat/visual-emotion-recognition 2026-01-19 11:45:31 +01:00
JobvAlewijk
37da9992ba feat: added face detection and communication with
RI
ref: N25B-397
2026-01-17 14:02:34 +01:00
JobvAlewijk
f41201dd8e chore: revert 2026-01-16 16:44:49 +01:00
JobvAlewijk
2033e02116 Merge branch 'dev' of https://git.science.uu.nl/ics/sp/2025/n25b/pepperplus-cb into feat/face-recognition 2026-01-16 16:38:59 +01:00
Storm
1b0b72d63a chore: fixed broken uv.lock file 2026-01-16 15:10:55 +01:00
Storm
0941b26703 refactor: updated how changes are passed to bdi_core_agent after merge
ref: N25B-393
2026-01-16 15:05:13 +01:00
Storm
48ae0c7a12 Merge remote-tracking branch 'origin/feat/reset-experiment-and-phase' into feat/visual-emotion-recognition 2026-01-16 14:45:16 +01:00
Storm
a09d8b3d9a chore: small changes 2026-01-16 14:40:59 +01:00
Storm
ac20048f02 Merge branch 'dev' into feat/visual-emotion-recognition 2026-01-16 14:16:28 +01:00
Storm
05804c158d feat: fully implemented visual emotion recognition agent in pipeline
ref: N25B-393
2026-01-16 13:26:53 +01:00
Storm
0771b0d607 feat: implemented visual emotion recogntion agent
ref: N25B-393
2026-01-16 09:50:59 +01:00
Storm
1c88ae6078 feat: visual emotion recognition agent
ref: N25B-393
2026-01-13 12:41:18 +01:00
JobvAlewijk
6b790de53a Merge branch 'dev' of https://git.science.uu.nl/ics/sp/2025/n25b/pepperplus-cb into feat/face-recognition 2026-01-12 14:23:44 +01:00
JobvAlewijk
1932ac959b feat: connected with RI properly
ref: N25B-397
2026-01-12 14:23:00 +01:00
JobvAlewijk
bb0c1bd383 feat: cb can communicate face with ri
ref: N25B-397
2026-01-08 13:02:32 +01:00
JobvAlewijk
03954bef54 feat: face recognition agent
ref: N25B-397
2026-01-04 19:47:18 +01:00
15 changed files with 1503 additions and 124 deletions

View File

@@ -7,6 +7,7 @@ requires-python = ">=3.13"
dependencies = [ dependencies = [
"agentspeak>=0.2.2", "agentspeak>=0.2.2",
"colorlog>=6.10.1", "colorlog>=6.10.1",
"deepface>=0.0.96",
"fastapi[all]>=0.115.6", "fastapi[all]>=0.115.6",
"mlx-whisper>=0.4.3 ; sys_platform == 'darwin'", "mlx-whisper>=0.4.3 ; sys_platform == 'darwin'",
"numpy>=2.3.3", "numpy>=2.3.3",
@@ -21,6 +22,7 @@ dependencies = [
"silero-vad>=6.0.0", "silero-vad>=6.0.0",
"sphinx>=7.3.7", "sphinx>=7.3.7",
"sphinx-rtd-theme>=3.0.2", "sphinx-rtd-theme>=3.0.2",
"tf-keras>=2.20.1",
"torch>=2.8.0", "torch>=2.8.0",
"uvicorn>=0.37.0", "uvicorn>=0.37.0",
] ]

View File

@@ -29,6 +29,8 @@ from control_backend.schemas.program import (
BaseGoal, BaseGoal,
BasicNorm, BasicNorm,
ConditionalNorm, ConditionalNorm,
EmotionBelief,
FaceBelief,
GestureAction, GestureAction,
Goal, Goal,
InferredBelief, InferredBelief,
@@ -682,6 +684,14 @@ class AgentSpeakGenerator:
""" """
return AstLiteral(self.slugify(sb)) return AstLiteral(self.slugify(sb))
@_astify.register
def _(self, eb: EmotionBelief) -> AstExpression:
return AstLiteral("emotion_detected", [AstAtom(eb.emotion)])
@_astify.register
def _(self, eb: FaceBelief) -> AstExpression:
return AstLiteral("face_present")
@_astify.register @_astify.register
def _(self, ib: InferredBelief) -> AstExpression: def _(self, ib: InferredBelief) -> AstExpression:
""" """

View File

@@ -9,17 +9,17 @@ import json
import zmq import zmq
import zmq.asyncio as azmq import zmq.asyncio as azmq
from pydantic import ValidationError
from zmq.asyncio import Context from zmq.asyncio import Context
from control_backend.agents import BaseAgent from control_backend.agents import BaseAgent
from control_backend.agents.actuation.robot_gesture_agent import RobotGestureAgent from control_backend.agents.actuation.robot_gesture_agent import RobotGestureAgent
from control_backend.agents.perception.visual_emotion_recognition_agent.visual_emotion_recognition_agent import ( # noqa
VisualEmotionRecognitionAgent,
)
from control_backend.core.config import settings from control_backend.core.config import settings
from control_backend.schemas.internal_message import InternalMessage
from control_backend.schemas.ri_message import PauseCommand
from ..actuation.robot_speech_agent import RobotSpeechAgent from ..actuation.robot_speech_agent import RobotSpeechAgent
from ..perception import VADAgent from ..perception import FacePerceptionAgent, VADAgent
class RICommunicationAgent(BaseAgent): class RICommunicationAgent(BaseAgent):
@@ -58,6 +58,7 @@ class RICommunicationAgent(BaseAgent):
self.connected = False self.connected = False
self.gesture_agent: RobotGestureAgent | None = None self.gesture_agent: RobotGestureAgent | None = None
self.speech_agent: RobotSpeechAgent | None = None self.speech_agent: RobotSpeechAgent | None = None
self.visual_emotion_recognition_agent: VisualEmotionRecognitionAgent | None = None
async def setup(self): async def setup(self):
""" """
@@ -180,7 +181,7 @@ class RICommunicationAgent(BaseAgent):
bind = port_data["bind"] bind = port_data["bind"]
if not bind: if not bind:
addr = f"tcp://{settings.ri_host}:{port}" addr = f"tcp://localhost:{port}"
else: else:
addr = f"tcp://*:{port}" addr = f"tcp://*:{port}"
@@ -215,6 +216,21 @@ class RICommunicationAgent(BaseAgent):
case "audio": case "audio":
vad_agent = VADAgent(audio_in_address=addr, audio_in_bind=bind) vad_agent = VADAgent(audio_in_address=addr, audio_in_bind=bind)
await vad_agent.start() await vad_agent.start()
case "video":
visual_emotion_agent = VisualEmotionRecognitionAgent(
settings.agent_settings.visual_emotion_recognition_name,
socket_address=addr,
bind=bind,
)
self.visual_emotion_recognition_agent = visual_emotion_agent
await visual_emotion_agent.start()
case "face":
face_agent = FacePerceptionAgent(
settings.agent_settings.face_agent_name,
zmq_address=addr,
zmq_bind=bind,
)
await face_agent.start()
case _: case _:
self.logger.warning("Unhandled negotiation id: %s", id) self.logger.warning("Unhandled negotiation id: %s", id)
@@ -320,17 +336,12 @@ class RICommunicationAgent(BaseAgent):
if self.speech_agent is not None: if self.speech_agent is not None:
await self.speech_agent.stop() await self.speech_agent.stop()
if self.visual_emotion_recognition_agent is not None:
await self.visual_emotion_recognition_agent.stop()
if self.pub_socket is not None: if self.pub_socket is not None:
self.pub_socket.close() self.pub_socket.close()
self.logger.debug("Restarting communication negotiation.") self.logger.debug("Restarting communication negotiation.")
if await self._negotiate_connection(max_retries=2): if await self._negotiate_connection(max_retries=2):
self.connected = True self.connected = True
async def handle_message(self, msg: InternalMessage):
try:
pause_command = PauseCommand.model_validate_json(msg.body)
await self._req_socket.send_json(pause_command.model_dump())
self.logger.debug(await self._req_socket.recv_json())
except ValidationError:
self.logger.warning("Incorrect message format for PauseCommand.")

View File

@@ -7,6 +7,7 @@ Agents responsible for processing sensory input, such as audio transcription and
detection. detection.
""" """
from .face_rec_agent import FacePerceptionAgent as FacePerceptionAgent
from .transcription_agent.transcription_agent import ( from .transcription_agent.transcription_agent import (
TranscriptionAgent as TranscriptionAgent, TranscriptionAgent as TranscriptionAgent,
) )

View File

@@ -0,0 +1,144 @@
"""
This program has been developed by students from the bachelor Computer Science at Utrecht
University within the Software Project course.
© Copyright Utrecht University (Department of Information and Computing Sciences)
"""
import asyncio
import zmq
import zmq.asyncio as azmq
from control_backend.agents import BaseAgent
from control_backend.core.agent_system import InternalMessage
from control_backend.core.config import settings
from control_backend.schemas.belief_message import Belief, BeliefMessage
class FacePerceptionAgent(BaseAgent):
"""
Receives face presence updates from the RICommunicationAgent
via the internal PUB/SUB bus.
"""
def __init__(self, name: str, zmq_address: str, zmq_bind: bool):
"""
:param name: The name of the agent.
:param zmq_address: The ZMQ address to subscribe to, an endpoint which sends face presence
updates.
:param zmq_bind: Whether to connect to the ZMQ endpoint, or to bind.
"""
super().__init__(name)
self._zmq_address = zmq_address
self._zmq_bind = zmq_bind
self._socket: azmq.Socket | None = None
self._last_face_state: bool | None = None
# Pause functionality
# NOTE: flag is set when running, cleared when paused
self._paused = asyncio.Event()
self._paused.set()
async def setup(self):
self.logger.info("Starting FacePerceptionAgent")
if self._socket is None:
self._connect_socket()
self.add_behavior(self._poll_loop())
self.logger.info("Finished setting up %s", self.name)
def _connect_socket(self):
if self._socket is not None:
self.logger.warning("ZMQ socket already initialized. Did you call setup() twice?")
return
self._socket = azmq.Context.instance().socket(zmq.SUB)
self._socket.setsockopt_string(zmq.SUBSCRIBE, "")
if self._zmq_bind:
self._socket.bind(self._zmq_address)
else:
self._socket.connect(self._zmq_address)
async def _poll_loop(self):
if self._socket is None:
self.logger.warning("Connection not initialized before poll loop. Call setup() first.")
return
while self._running:
try:
await self._paused.wait()
response = await asyncio.wait_for(
self._socket.recv_json(), timeout=settings.behaviour_settings.sleep_s
)
face_present = response.get("face_detected", False)
if self._last_face_state is None:
self._last_face_state = face_present
continue
if face_present != self._last_face_state:
self._last_face_state = face_present
self.logger.debug("Face detected" if face_present else "Face lost")
await self._update_face_belief(face_present)
except TimeoutError:
pass
except Exception as e:
self.logger.error("Face polling failed", exc_info=e)
async def _post_face_belief(self, present: bool):
"""
Send a face_present belief update to the BDI Core Agent.
"""
if present:
belief_msg = BeliefMessage(create=[{"name": "face_present", "arguments": []}])
else:
belief_msg = BeliefMessage(delete=[{"name": "face_present", "arguments": []}])
msg = InternalMessage(
to=settings.agent_settings.bdi_core_name,
sender=self.name,
thread="beliefs",
body=belief_msg.model_dump_json(),
)
await self.send(msg)
async def _update_face_belief(self, present: bool):
"""
Add or remove the `face_present` belief in the BDI Core Agent.
"""
if present:
payload = BeliefMessage(create=[Belief(name="face_present").model_dump()])
else:
payload = BeliefMessage(delete=[Belief(name="face_present").model_dump()])
message = InternalMessage(
to=settings.agent_settings.bdi_core_name,
sender=self.name,
thread="beliefs",
body=payload.model_dump_json(),
)
await self.send(message)
async def handle_message(self, msg: InternalMessage):
"""
Handle incoming pause/resume commands from User Interrupt Agent.
"""
sender = msg.sender
if sender == settings.agent_settings.user_interrupt_name:
if msg.body == "PAUSE":
self.logger.info("Pausing Face Perception processing.")
self._paused.clear()
self._last_face_state = None
elif msg.body == "RESUME":
self.logger.info("Resuming Face Perception processing.")
self._paused.set()
else:
self.logger.warning("Unknown command from User Interrupt Agent: %s", msg.body)
else:
self.logger.debug("Ignoring message from unknown sender: %s", sender)

View File

@@ -0,0 +1,207 @@
import asyncio
import json
import time
from collections import Counter, defaultdict
import cv2
import numpy as np
import zmq
import zmq.asyncio as azmq
from pydantic_core import ValidationError
from control_backend.agents import BaseAgent
from control_backend.agents.perception.visual_emotion_recognition_agent.visual_emotion_recognizer import ( # noqa
DeepFaceEmotionRecognizer,
)
from control_backend.core.agent_system import InternalMessage
from control_backend.core.config import settings
from control_backend.schemas.belief_message import Belief
class VisualEmotionRecognitionAgent(BaseAgent):
def __init__(
self,
name: str,
socket_address: str,
bind: bool = False,
timeout_ms: int = 1000,
window_duration: int = settings.behaviour_settings.visual_emotion_recognition_window_duration_s, # noqa
min_frames_required: int = settings.behaviour_settings.visual_emotion_recognition_min_frames_per_face, # noqa
):
"""
Initialize the Visual Emotion Recognition Agent.
:param name: Name of the agent
:param socket_address: Address of the socket to connect or bind to
:param bind: Whether to bind to the socket address (True) or connect (False)
:param timeout_ms: Timeout for socket receive operations in milliseconds
:param window_duration: Duration in seconds over which to aggregate emotions
:param min_frames_required: Minimum number of frames per face required to consider a face
valid
"""
super().__init__(name)
self.socket_address = socket_address
self.socket_bind = bind
self.timeout_ms = timeout_ms
self.window_duration = window_duration
self.min_frames_required = min_frames_required
# Pause functionality
# NOTE: flag is set when running, cleared when paused
self._paused = asyncio.Event()
self._paused.set()
async def setup(self):
"""
Initialize the agent resources.
1. Initializes the :class:`VisualEmotionRecognizer`.
2. Connects to the video input ZMQ socket.
3. Starts the background emotion recognition loop.
"""
self.logger.info("Setting up %s.", self.name)
self.emotion_recognizer = DeepFaceEmotionRecognizer()
self.video_in_socket = azmq.Context.instance().socket(zmq.SUB)
if self.socket_bind:
self.video_in_socket.bind(self.socket_address)
else:
self.video_in_socket.connect(self.socket_address)
self.video_in_socket.setsockopt_string(zmq.SUBSCRIBE, "")
self.video_in_socket.setsockopt(zmq.RCVTIMEO, self.timeout_ms)
self.video_in_socket.setsockopt(zmq.CONFLATE, 1)
self.add_behavior(self.emotion_update_loop())
self.logger.info("Finished setting up %s", self.name)
async def emotion_update_loop(self):
"""
Background loop to receive video frames, recognize emotions, and update beliefs.
1. Receives video frames from the ZMQ socket.
2. Uses the :class:`VisualEmotionRecognizer` to detect emotions.
3. Aggregates emotions over a time window.
4. Sends updates to the BDI Core Agent about detected emotions.
"""
# Next time to process the window and update emotions
next_window_time = time.time() + self.window_duration
# Tracks counts of detected emotions per face index
face_stats = defaultdict(Counter)
prev_dominant_emotions = set()
while self._running:
try:
await self._paused.wait()
frame_bytes = await self.video_in_socket.recv()
# Convert bytes to a numpy buffer
nparr = np.frombuffer(frame_bytes, np.uint8)
# Decode image into the generic Numpy Array DeepFace expects
frame_image = cv2.imdecode(nparr, cv2.IMREAD_COLOR)
if frame_image is None:
# Could not decode image, skip this frame
self.logger.warning("Received invalid video frame, skipping.")
continue
# Get the dominant emotion from each face
current_emotions = self.emotion_recognizer.sorted_dominant_emotions(frame_image)
# Update emotion counts for each detected face
for i, emotion in enumerate(current_emotions):
face_stats[i][emotion] += 1
# If window duration has passed, process the collected stats
if time.time() >= next_window_time:
window_dominant_emotions = set()
# Determine dominant emotion for each face in the window
for _, counter in face_stats.items():
total_detections = sum(counter.values())
if total_detections >= self.min_frames_required:
dominant_emotion = counter.most_common(1)[0][0]
window_dominant_emotions.add(dominant_emotion)
await self.update_emotions(prev_dominant_emotions, window_dominant_emotions)
prev_dominant_emotions = window_dominant_emotions
face_stats.clear()
next_window_time = time.time() + self.window_duration
except zmq.Again:
self.logger.warning("No video frame received within timeout.")
async def update_emotions(self, prev_emotions: set[str], emotions: set[str]):
"""
Compare emotions from previous window and current emotions,
send updates to BDI Core Agent.
"""
emotions_to_remove = prev_emotions - emotions
emotions_to_add = emotions - prev_emotions
if not emotions_to_add and not emotions_to_remove:
return
emotion_beliefs_remove = []
for emotion in emotions_to_remove:
self.logger.info(f"Emotion '{emotion}' has disappeared.")
try:
emotion_beliefs_remove.append(
Belief(name="emotion_detected", arguments=[emotion], remove=True)
)
except ValidationError:
self.logger.warning("Invalid belief for emotion removal: %s", emotion)
emotion_beliefs_add = []
for emotion in emotions_to_add:
self.logger.info(f"New emotion detected: '{emotion}'")
try:
emotion_beliefs_add.append(Belief(name="emotion_detected", arguments=[emotion]))
except ValidationError:
self.logger.warning("Invalid belief for new emotion: %s", emotion)
beliefs_list_add = [b.model_dump() for b in emotion_beliefs_add]
beliefs_list_remove = [b.model_dump() for b in emotion_beliefs_remove]
payload = {"create": beliefs_list_add, "delete": beliefs_list_remove}
message = InternalMessage(
to=settings.agent_settings.bdi_core_name,
sender=self.name,
body=json.dumps(payload),
thread="beliefs",
)
await self.send(message)
async def handle_message(self, msg: InternalMessage):
"""
Handle incoming messages.
Expects messages to pause or resume the Visual Emotion Recognition
processing from User Interrupt Agent.
:param msg: The received internal message.
"""
sender = msg.sender
if sender == settings.agent_settings.user_interrupt_name:
if msg.body == "PAUSE":
self.logger.info("Pausing Visual Emotion Recognition processing.")
self._paused.clear()
elif msg.body == "RESUME":
self.logger.info("Resuming Visual Emotion Recognition processing.")
self._paused.set()
else:
self.logger.warning(f"Unknown command from User Interrupt Agent: {msg.body}")
else:
self.logger.debug(f"Ignoring message from unknown sender: {sender}")
async def stop(self):
"""
Clean up resources used by the agent.
"""
self.video_in_socket.close()
await super().stop()

View File

@@ -0,0 +1,54 @@
import abc
import numpy as np
from deepface import DeepFace
class VisualEmotionRecognizer(abc.ABC):
@abc.abstractmethod
def load_model(self):
"""Load the visual emotion recognition model into memory."""
pass
@abc.abstractmethod
def sorted_dominant_emotions(self, image) -> list[str]:
"""
Recognize dominant emotions from faces in the given image.
Emotions can be one of ['angry', 'disgust', 'fear', 'happy', 'sad', 'surprise', 'neutral'].
To minimize false positives, consider filtering faces with low confidence.
:param image: The input image for emotion recognition.
:return: List of dominant emotion detected for each face in the image,
sorted per face.
"""
pass
class DeepFaceEmotionRecognizer(VisualEmotionRecognizer):
"""
DeepFace-based implementation of VisualEmotionRecognizer.
DeepFape has proven to be quite a pessimistic model, so expect sad, fear and neutral
emotions to be over-represented.
"""
def __init__(self):
self.load_model()
def load_model(self):
print("Loading Deepface Emotion Model...")
dummy_img = np.zeros((224, 224, 3), dtype=np.uint8)
# analyze does not take a model as an argument, calling it once on a dummy image to load
# the model
DeepFace.analyze(dummy_img, actions=["emotion"], enforce_detection=False)
print("Deepface Emotion Model loaded.")
def sorted_dominant_emotions(self, image) -> list[str]:
analysis = DeepFace.analyze(image, actions=["emotion"], enforce_detection=False)
# Sort faces by x coordinate to maintain left-to-right order
analysis.sort(key=lambda face: face["region"]["x"])
analysis = [face for face in analysis if face["face_confidence"] >= 0.90]
dominant_emotions = [face["dominant_emotion"] for face in analysis]
return dominant_emotions

View File

@@ -18,7 +18,6 @@ from control_backend.schemas.belief_message import Belief, BeliefMessage
from control_backend.schemas.program import ConditionalNorm, Goal, Program from control_backend.schemas.program import ConditionalNorm, Goal, Program
from control_backend.schemas.ri_message import ( from control_backend.schemas.ri_message import (
GestureCommand, GestureCommand,
PauseCommand,
RIEndpoint, RIEndpoint,
SpeechCommand, SpeechCommand,
) )
@@ -398,34 +397,35 @@ class UserInterruptAgent(BaseAgent):
self.logger.debug("Sending experiment control '%s' to BDI Core.", thread) self.logger.debug("Sending experiment control '%s' to BDI Core.", thread)
await self.send(out_msg) await self.send(out_msg)
async def _send_pause_command(self, pause): async def _send_pause_command(self, pause: str):
""" """
Send a pause command to the Robot Interface via the RI Communication Agent. Send a pause command to the other internal agents; for now just VAD and VED agent.
Send a pause command to the other internal agents; for now just VAD agent.
""" """
cmd = PauseCommand(data=pause)
message = InternalMessage(
to=settings.agent_settings.ri_communication_name,
sender=self.name,
body=cmd.model_dump_json(),
)
await self.send(message)
if pause == "true": if pause == "true":
# Send pause to VAD agent # Send pause to VAD and VED agent
vad_message = InternalMessage( vad_message = InternalMessage(
to=settings.agent_settings.vad_name, to=[
settings.agent_settings.vad_name,
settings.agent_settings.visual_emotion_recognition_name,
settings.agent_settings.face_agent_name,
],
sender=self.name, sender=self.name,
body="PAUSE", body="PAUSE",
) )
await self.send(vad_message) await self.send(vad_message)
self.logger.info("Sent pause command to VAD Agent and RI Communication Agent.") # Voice Activity Detection and Visual Emotion Recognition agents
self.logger.info("Sent pause command to perception agents.")
else: else:
# Send resume to VAD agent # Send resume to VAD and VED agents
vad_message = InternalMessage( vad_message = InternalMessage(
to=settings.agent_settings.vad_name, to=[
settings.agent_settings.vad_name,
settings.agent_settings.visual_emotion_recognition_name,
settings.agent_settings.face_agent_name,
],
sender=self.name, sender=self.name,
body="RESUME", body="RESUME",
) )
await self.send(vad_message) await self.send(vad_message)
self.logger.info("Sent resume command to VAD Agent and RI Communication Agent.") # Voice Activity Detection and Visual Emotion Recognition agents
self.logger.info("Sent resume command to perception agents.")

View File

@@ -12,7 +12,7 @@ api_router = APIRouter()
api_router.include_router(message.router, tags=["Messages"]) api_router.include_router(message.router, tags=["Messages"])
api_router.include_router(robot.router, prefix="/robot", tags=["Pings", "Commands"]) api_router.include_router(robot.router, prefix="/robot", tags=["Pings", "Commands", "Face"])
api_router.include_router(logs.router, tags=["Logs"]) api_router.include_router(logs.router, tags=["Logs"])

View File

@@ -54,6 +54,7 @@ class AgentSettings(BaseModel):
# agent names # agent names
bdi_core_name: str = "bdi_core_agent" bdi_core_name: str = "bdi_core_agent"
bdi_program_manager_name: str = "bdi_program_manager_agent" bdi_program_manager_name: str = "bdi_program_manager_agent"
visual_emotion_recognition_name: str = "visual_emotion_recognition_agent"
text_belief_extractor_name: str = "text_belief_extractor_agent" text_belief_extractor_name: str = "text_belief_extractor_agent"
vad_name: str = "vad_agent" vad_name: str = "vad_agent"
llm_name: str = "llm_agent" llm_name: str = "llm_agent"
@@ -63,6 +64,7 @@ class AgentSettings(BaseModel):
robot_speech_name: str = "robot_speech_agent" robot_speech_name: str = "robot_speech_agent"
robot_gesture_name: str = "robot_gesture_agent" robot_gesture_name: str = "robot_gesture_agent"
user_interrupt_name: str = "user_interrupt_agent" user_interrupt_name: str = "user_interrupt_agent"
face_agent_name: str = "face_detection_agent"
class BehaviourSettings(BaseModel): class BehaviourSettings(BaseModel):
@@ -83,6 +85,10 @@ class BehaviourSettings(BaseModel):
:ivar conversation_history_length_limit: The maximum amount of messages to extract beliefs from. :ivar conversation_history_length_limit: The maximum amount of messages to extract beliefs from.
:ivar trigger_time_to_wait: Amount of milliseconds to wait before informing the UI about trigger :ivar trigger_time_to_wait: Amount of milliseconds to wait before informing the UI about trigger
completion. completion.
:ivar visual_emotion_recognition_window_duration_s: Duration in seconds over which to aggregate
emotions and update emotion beliefs.
:ivar visual_emotion_recognition_min_frames_per_face: Minimum number of frames per face required
to consider a face valid.
""" """
# ATTENTION: When adding/removing settings, make sure to update the .env.example file # ATTENTION: When adding/removing settings, make sure to update the .env.example file
@@ -110,6 +116,10 @@ class BehaviourSettings(BaseModel):
trigger_time_to_wait: int = 2000 trigger_time_to_wait: int = 2000
agentspeak_file: str = "src/control_backend/agents/bdi/agentspeak.asl" agentspeak_file: str = "src/control_backend/agents/bdi/agentspeak.asl"
# Visual Emotion Recognition settings
visual_emotion_recognition_window_duration_s: int = 5
visual_emotion_recognition_min_frames_per_face: int = 3
class LLMSettings(BaseModel): class LLMSettings(BaseModel):
""" """

View File

@@ -41,8 +41,8 @@ class LogicalOperator(Enum):
OR = "OR" OR = "OR"
type Belief = KeywordBelief | SemanticBelief | InferredBelief type Belief = KeywordBelief | SemanticBelief | InferredBelief | EmotionBelief | FaceBelief
type BasicBelief = KeywordBelief | SemanticBelief type BasicBelief = KeywordBelief | SemanticBelief | EmotionBelief | FaceBelief
class KeywordBelief(ProgramElement): class KeywordBelief(ProgramElement):
@@ -106,6 +106,27 @@ class InferredBelief(ProgramElement):
right: Belief right: Belief
class EmotionBelief(ProgramElement):
"""
Represents a belief that is set when a certain emotion is detected.
:ivar emotion: The emotion on which this belief gets set.
"""
name: str = ""
emotion: str
class FaceBelief(ProgramElement):
"""
Represents the belief that at least one face is currently detected.
This belief is maintained by a perception agent (not inferred).
"""
face_present: bool
name: str = ""
class Norm(ProgramElement): class Norm(ProgramElement):
""" """
Base class for behavioral norms that guide the robot's interactions. Base class for behavioral norms that guide the robot's interactions.
@@ -315,3 +336,9 @@ class Program(BaseModel):
""" """
phases: list[Phase] phases: list[Phase]
if __name__ == "__main__":
input = input("Enter program JSON: ")
program = Program.model_validate_json(input)
print(program)

View File

@@ -10,8 +10,6 @@ from unittest.mock import ANY, AsyncMock, MagicMock, patch
import pytest import pytest
from control_backend.agents.communication.ri_communication_agent import RICommunicationAgent from control_backend.agents.communication.ri_communication_agent import RICommunicationAgent
from control_backend.core.agent_system import InternalMessage
from control_backend.schemas.ri_message import PauseCommand, RIEndpoint
def speech_agent_path(): def speech_agent_path():
@@ -402,38 +400,3 @@ async def test_negotiate_req_socket_none_causes_retry(zmq_context):
result = await agent._negotiate_connection(max_retries=1) result = await agent._negotiate_connection(max_retries=1)
assert result is False assert result is False
@pytest.mark.asyncio
async def test_handle_message_pause_command(zmq_context):
"""Test handle_message with a valid PauseCommand."""
agent = RICommunicationAgent("ri_comm")
agent._req_socket = AsyncMock()
agent.logger = MagicMock()
agent._req_socket.recv_json.return_value = {"status": "ok"}
pause_cmd = PauseCommand(data=True)
msg = InternalMessage(to="ri_comm", sender="user_int", body=pause_cmd.model_dump_json())
await agent.handle_message(msg)
agent._req_socket.send_json.assert_awaited_once()
args = agent._req_socket.send_json.await_args[0][0]
assert args["endpoint"] == RIEndpoint.PAUSE.value
assert args["data"] is True
@pytest.mark.asyncio
async def test_handle_message_invalid_pause_command(zmq_context):
"""Test handle_message with invalid JSON."""
agent = RICommunicationAgent("ri_comm")
agent._req_socket = AsyncMock()
agent.logger = MagicMock()
msg = InternalMessage(to="ri_comm", sender="user_int", body="invalid json")
await agent.handle_message(msg)
agent.logger.warning.assert_called_with("Incorrect message format for PauseCommand.")
agent._req_socket.send_json.assert_not_called()

View File

@@ -0,0 +1,152 @@
from unittest.mock import AsyncMock, MagicMock
import pytest
import zmq
import control_backend.agents.perception.face_rec_agent as face_module
from control_backend.agents.perception.face_rec_agent import FacePerceptionAgent
from control_backend.core.agent_system import InternalMessage
from control_backend.schemas.belief_message import BeliefMessage
@pytest.fixture
def agent():
"""Return a FacePerceptionAgent instance for testing."""
return FacePerceptionAgent(
name="face_agent",
zmq_address="inproc://test",
zmq_bind=False,
)
@pytest.fixture
def socket():
"""Return a mocked ZMQ socket."""
sock = AsyncMock()
sock.setsockopt_string = MagicMock()
sock.connect = MagicMock()
sock.bind = MagicMock()
return sock
def test_connect_socket_connect(agent, socket, monkeypatch):
"""Test that _connect_socket properly connects when zmq_bind=False."""
ctx = MagicMock()
ctx.socket.return_value = socket
monkeypatch.setattr(face_module.azmq, "Context", MagicMock(instance=lambda: ctx))
agent._connect_socket()
socket.setsockopt_string.assert_called_once_with(zmq.SUBSCRIBE, "")
socket.connect.assert_called_once_with(agent._zmq_address)
socket.bind.assert_not_called()
def test_connect_socket_bind(agent, socket, monkeypatch):
"""Test that _connect_socket properly binds when zmq_bind=True."""
agent._zmq_bind = True
ctx = MagicMock()
ctx.socket.return_value = socket
monkeypatch.setattr(face_module.azmq, "Context", MagicMock(instance=lambda: ctx))
agent._connect_socket()
socket.bind.assert_called_once_with(agent._zmq_address)
socket.connect.assert_not_called()
def test_connect_socket_twice_is_noop(agent, socket):
"""Test that calling _connect_socket twice does not overwrite an existing socket."""
agent._socket = socket
agent._connect_socket()
assert agent._socket is socket
@pytest.mark.asyncio
async def test_update_face_belief_present(agent):
"""Test that _update_face_belief(True) creates the 'face_present' belief."""
agent.send = AsyncMock()
await agent._update_face_belief(True)
msg = agent.send.await_args.args[0]
payload = BeliefMessage.model_validate_json(msg.body)
assert payload.create[0].name == "face_present"
@pytest.mark.asyncio
async def test_update_face_belief_absent(agent):
"""Test that _update_face_belief(False) deletes the 'face_present' belief."""
agent.send = AsyncMock()
await agent._update_face_belief(False)
msg = agent.send.await_args.args[0]
payload = BeliefMessage.model_validate_json(msg.body)
assert payload.delete[0].name == "face_present"
@pytest.mark.asyncio
async def test_post_face_belief_present(agent):
"""Test that _post_face_belief(True) sends a belief creation message."""
agent.send = AsyncMock()
await agent._post_face_belief(True)
msg = agent.send.await_args.args[0]
assert '"create"' in msg.body and '"face_present"' in msg.body
@pytest.mark.asyncio
async def test_post_face_belief_absent(agent):
"""Test that _post_face_belief(False) sends a belief deletion message."""
agent.send = AsyncMock()
await agent._post_face_belief(False)
msg = agent.send.await_args.args[0]
assert '"delete"' in msg.body and '"face_present"' in msg.body
@pytest.mark.asyncio
async def test_handle_pause(agent):
"""Test that a 'PAUSE' message clears _paused and resets _last_face_state."""
agent._paused.set()
agent._last_face_state = True
msg = InternalMessage(
to=agent.name,
sender=face_module.settings.agent_settings.user_interrupt_name,
thread="cmd",
body="PAUSE",
)
await agent.handle_message(msg)
assert not agent._paused.is_set()
assert agent._last_face_state is None
@pytest.mark.asyncio
async def test_handle_resume(agent):
"""Test that a 'RESUME' message sets _paused."""
agent._paused.clear()
msg = InternalMessage(
to=agent.name,
sender=face_module.settings.agent_settings.user_interrupt_name,
thread="cmd",
body="RESUME",
)
await agent.handle_message(msg)
assert agent._paused.is_set()
@pytest.mark.asyncio
async def test_handle_unknown_command(agent):
"""Test that an unknown command from UserInterruptAgent is ignored (logs a warning)."""
msg = InternalMessage(
to=agent.name,
sender=face_module.settings.agent_settings.user_interrupt_name,
thread="cmd",
body="???",
)
await agent.handle_message(msg)
@pytest.mark.asyncio
async def test_handle_unknown_sender(agent):
"""Test that messages from unknown senders are ignored."""
msg = InternalMessage(
to=agent.name,
sender="someone_else",
thread="cmd",
body="PAUSE",
)
await agent.handle_message(msg)

View File

@@ -303,29 +303,6 @@ async def test_send_experiment_control(agent):
assert msg.thread == "reset_experiment" assert msg.thread == "reset_experiment"
@pytest.mark.asyncio
async def test_send_pause_command(agent):
await agent._send_pause_command("true")
# Sends to RI and VAD
assert agent.send.await_count == 2
msgs = [call.args[0] for call in agent.send.call_args_list]
ri_msg = next(m for m in msgs if m.to == settings.agent_settings.ri_communication_name)
assert json.loads(ri_msg.body)["endpoint"] == "" # PAUSE endpoint
assert json.loads(ri_msg.body)["data"] is True
vad_msg = next(m for m in msgs if m.to == settings.agent_settings.vad_name)
assert vad_msg.body == "PAUSE"
agent.send.reset_mock()
await agent._send_pause_command("false")
assert agent.send.await_count == 2
vad_msg = next(
m for m in agent.send.call_args_list if m.args[0].to == settings.agent_settings.vad_name
).args[0]
assert vad_msg.body == "RESUME"
@pytest.mark.asyncio @pytest.mark.asyncio
async def test_setup(agent): async def test_setup(agent):
"""Test the setup method initializes sockets correctly.""" """Test the setup method initializes sockets correctly."""

881
uv.lock generated

File diff suppressed because it is too large Load Diff