Compare commits

..

4 Commits

Author SHA1 Message Date
dfd2c3a0a1 fix: reset counter after each loop
ref: N25B-395
2026-01-30 20:39:10 +01:00
3efe8a7b06 chore: change emo loop frequency 2026-01-30 20:34:16 +01:00
3a5c27e01f fix: update face detected at same time as emotions
ref: N25B-395
2026-01-30 20:33:16 +01:00
1f799299b9 feat: (hopefully) face detection
Simplified implementation, relying on the already-present VED Agent.

ref: N25B-395
2026-01-30 20:12:31 +01:00
4 changed files with 67 additions and 32 deletions

View File

@@ -4,7 +4,6 @@ University within the Software Project course.
© Copyright Utrecht University (Department of Information and Computing Sciences) © Copyright Utrecht University (Department of Information and Computing Sciences)
""" """
import logging
from functools import singledispatchmethod from functools import singledispatchmethod
from slugify import slugify from slugify import slugify
@@ -31,6 +30,7 @@ from control_backend.schemas.program import (
BasicNorm, BasicNorm,
ConditionalNorm, ConditionalNorm,
EmotionBelief, EmotionBelief,
FaceBelief,
GestureAction, GestureAction,
Goal, Goal,
InferredBelief, InferredBelief,
@@ -67,7 +67,6 @@ class AgentSpeakGenerator:
""" """
_asp: AstProgram _asp: AstProgram
logger = logging.getLogger(__name__)
def generate(self, program: Program) -> str: def generate(self, program: Program) -> str:
""" """
@@ -481,7 +480,6 @@ class AgentSpeakGenerator:
:param main_goal: Whether this is a main goal (for UI notification purposes). :param main_goal: Whether this is a main goal (for UI notification purposes).
""" """
context: list[AstExpression] = [self._astify(phase)] context: list[AstExpression] = [self._astify(phase)]
if goal.can_fail:
context.append(~self._astify(goal, achieved=True)) context.append(~self._astify(goal, achieved=True))
if previous_goal and previous_goal.can_fail: if previous_goal and previous_goal.can_fail:
context.append(self._astify(previous_goal, achieved=True)) context.append(self._astify(previous_goal, achieved=True))
@@ -506,10 +504,6 @@ class AgentSpeakGenerator:
if not goal.can_fail and not continues_response: if not goal.can_fail and not continues_response:
body.append(AstStatement(StatementType.ADD_BELIEF, self._astify(goal, achieved=True))) body.append(AstStatement(StatementType.ADD_BELIEF, self._astify(goal, achieved=True)))
if len(body) == 0:
self.logger.warning("Goal with no plan detected: %s", goal.name)
body.append(AstStatement(StatementType.EMPTY, AstLiteral("true")))
self._asp.plans.append(AstPlan(TriggerType.ADDED_GOAL, self._astify(goal), context, body)) self._asp.plans.append(AstPlan(TriggerType.ADDED_GOAL, self._astify(goal), context, body))
self._asp.plans.append( self._asp.plans.append(
@@ -570,10 +564,10 @@ class AgentSpeakGenerator:
) )
) )
for step in trigger.plan.steps: for step in trigger.plan.steps:
if isinstance(step, Goal):
new_step = step.model_copy(update={"can_fail": False}) # triggers are sequence
subgoals.append(new_step)
body.append(self._step_to_statement(step)) body.append(self._step_to_statement(step))
if isinstance(step, Goal):
step.can_fail = False # triggers are continuous sequence
subgoals.append(step)
# Arbitrary wait for UI to display nicely # Arbitrary wait for UI to display nicely
body.append( body.append(
@@ -694,6 +688,10 @@ class AgentSpeakGenerator:
def _(self, eb: EmotionBelief) -> AstExpression: def _(self, eb: EmotionBelief) -> AstExpression:
return AstLiteral("emotion_detected", [AstAtom(eb.emotion)]) return AstLiteral("emotion_detected", [AstAtom(eb.emotion)])
@_astify.register
def _(self, fb: FaceBelief) -> AstExpression:
return AstLiteral("face_present")
@_astify.register @_astify.register
def _(self, ib: InferredBelief) -> AstExpression: def _(self, ib: InferredBelief) -> AstExpression:
""" """

View File

@@ -14,7 +14,7 @@ from control_backend.agents.perception.visual_emotion_recognition_agent.visual_e
) )
from control_backend.core.agent_system import InternalMessage from control_backend.core.agent_system import InternalMessage
from control_backend.core.config import settings from control_backend.core.config import settings
from control_backend.schemas.belief_message import Belief from control_backend.schemas.belief_message import Belief, BeliefMessage
class VisualEmotionRecognitionAgent(BaseAgent): class VisualEmotionRecognitionAgent(BaseAgent):
@@ -44,6 +44,7 @@ class VisualEmotionRecognitionAgent(BaseAgent):
self.timeout_ms = timeout_ms self.timeout_ms = timeout_ms
self.window_duration = window_duration self.window_duration = window_duration
self.min_frames_required = min_frames_required self.min_frames_required = min_frames_required
self._face_detected = False
# Pause functionality # Pause functionality
# NOTE: flag is set when running, cleared when paused # NOTE: flag is set when running, cleared when paused
@@ -89,6 +90,9 @@ class VisualEmotionRecognitionAgent(BaseAgent):
# Tracks counts of detected emotions per face index # Tracks counts of detected emotions per face index
face_stats = defaultdict(Counter) face_stats = defaultdict(Counter)
# How many times a face has been detected
face_detection_yes_no = [0, 0]
prev_dominant_emotions = set() prev_dominant_emotions = set()
while self._running: while self._running:
@@ -97,8 +101,8 @@ class VisualEmotionRecognitionAgent(BaseAgent):
width, height, image_bytes = await self.video_in_socket.recv_multipart() width, height, image_bytes = await self.video_in_socket.recv_multipart()
width = int.from_bytes(width, 'little') width = int.from_bytes(width, "little")
height = int.from_bytes(height, 'little') height = int.from_bytes(height, "little")
# Convert bytes to a numpy buffer # Convert bytes to a numpy buffer
image_array = np.frombuffer(image_bytes, np.uint8) image_array = np.frombuffer(image_bytes, np.uint8)
@@ -107,6 +111,13 @@ class VisualEmotionRecognitionAgent(BaseAgent):
# Get the dominant emotion from each face # Get the dominant emotion from each face
current_emotions = self.emotion_recognizer.sorted_dominant_emotions(frame) current_emotions = self.emotion_recognizer.sorted_dominant_emotions(frame)
# Update face face_detection_yes_no
if len(current_emotions) > 0:
face_detection_yes_no[0] += 1
else:
face_detection_yes_no[1] += 1
# Update emotion counts for each detected face # Update emotion counts for each detected face
for i, emotion in enumerate(current_emotions): for i, emotion in enumerate(current_emotions):
face_stats[i][emotion] += 1 face_stats[i][emotion] += 1
@@ -122,6 +133,20 @@ class VisualEmotionRecognitionAgent(BaseAgent):
dominant_emotion = counter.most_common(1)[0][0] dominant_emotion = counter.most_common(1)[0][0]
window_dominant_emotions.add(dominant_emotion) window_dominant_emotions.add(dominant_emotion)
if (
face_detection_yes_no[0] > face_detection_yes_no[1]
and not self._face_detected
):
self._face_detected = True
await self._inform_face_detected()
elif (
face_detection_yes_no[0] <= face_detection_yes_no[1] and self._face_detected
):
self._face_detected = False
await self._inform_face_detected()
face_detection_yes_no = [0, 0]
await self.update_emotions(prev_dominant_emotions, window_dominant_emotions) await self.update_emotions(prev_dominant_emotions, window_dominant_emotions)
prev_dominant_emotions = window_dominant_emotions prev_dominant_emotions = window_dominant_emotions
face_stats.clear() face_stats.clear()
@@ -133,7 +158,6 @@ class VisualEmotionRecognitionAgent(BaseAgent):
except Exception as e: except Exception as e:
self.logger.error(f"Error in emotion recognition loop: {e}") self.logger.error(f"Error in emotion recognition loop: {e}")
async def update_emotions(self, prev_emotions: set[str], emotions: set[str]): async def update_emotions(self, prev_emotions: set[str], emotions: set[str]):
""" """
Compare emotions from previous window and current emotions, Compare emotions from previous window and current emotions,
@@ -149,9 +173,7 @@ class VisualEmotionRecognitionAgent(BaseAgent):
for emotion in emotions_to_remove: for emotion in emotions_to_remove:
self.logger.info(f"Emotion '{emotion}' has disappeared.") self.logger.info(f"Emotion '{emotion}' has disappeared.")
try: try:
emotion_beliefs_remove.append( emotion_beliefs_remove.append(Belief(name="emotion_detected", arguments=[emotion]))
Belief(name="emotion_detected", arguments=[emotion], remove=True)
)
except ValidationError: except ValidationError:
self.logger.warning("Invalid belief for emotion removal: %s", emotion) self.logger.warning("Invalid belief for emotion removal: %s", emotion)
@@ -175,6 +197,20 @@ class VisualEmotionRecognitionAgent(BaseAgent):
) )
await self.send(message) await self.send(message)
async def _inform_face_detected(self):
if self._face_detected:
belief_message = BeliefMessage(create=[Belief(name="face_present")])
else:
belief_message = BeliefMessage(delete=[Belief(name="face_present")])
msg = InternalMessage(
to=settings.agent_settings.bdi_core_name,
thread="beliefs",
body=belief_message.model_dump_json(),
)
await self.send(msg)
async def handle_message(self, msg: InternalMessage): async def handle_message(self, msg: InternalMessage):
""" """
Handle incoming messages. Handle incoming messages.
@@ -204,4 +240,3 @@ class VisualEmotionRecognitionAgent(BaseAgent):
""" """
self.video_in_socket.close() self.video_in_socket.close()
await super().stop() await super().stop()

View File

@@ -112,7 +112,7 @@ class BehaviourSettings(BaseModel):
conversation_history_length_limit: int = 10 conversation_history_length_limit: int = 10
# Visual Emotion Recognition settings # Visual Emotion Recognition settings
visual_emotion_recognition_window_duration_s: int = 5 visual_emotion_recognition_window_duration_s: int = 3
visual_emotion_recognition_min_frames_per_face: int = 3 visual_emotion_recognition_min_frames_per_face: int = 3
# AgentSpeak related settings # AgentSpeak related settings
trigger_time_to_wait: int = 2000 trigger_time_to_wait: int = 2000

View File

@@ -7,7 +7,7 @@ University within the Software Project course.
from enum import Enum from enum import Enum
from typing import Literal from typing import Literal
from pydantic import UUID4, BaseModel, field_validator from pydantic import UUID4, BaseModel
class ProgramElement(BaseModel): class ProgramElement(BaseModel):
@@ -24,13 +24,6 @@ class ProgramElement(BaseModel):
# To make program elements hashable # To make program elements hashable
model_config = {"frozen": True} model_config = {"frozen": True}
@field_validator("name")
@classmethod
def name_must_not_start_with_number(cls, v: str) -> str:
if v and v[0].isdigit():
raise ValueError('Field "name" must not start with a number.')
return v
class LogicalOperator(Enum): class LogicalOperator(Enum):
""" """
@@ -48,8 +41,8 @@ class LogicalOperator(Enum):
OR = "OR" OR = "OR"
type Belief = KeywordBelief | SemanticBelief | InferredBelief | EmotionBelief type Belief = KeywordBelief | SemanticBelief | InferredBelief | EmotionBelief | FaceBelief
type BasicBelief = KeywordBelief | SemanticBelief | EmotionBelief type BasicBelief = KeywordBelief | SemanticBelief | EmotionBelief | FaceBelief
class KeywordBelief(ProgramElement): class KeywordBelief(ProgramElement):
@@ -124,6 +117,15 @@ class EmotionBelief(ProgramElement):
emotion: str emotion: str
class FaceBelief(ProgramElement):
"""
Represents the belief that at least one face is currently in view.
"""
name: str = ""
face_present: bool
class Norm(ProgramElement): class Norm(ProgramElement):
""" """
Base class for behavioral norms that guide the robot's interactions. Base class for behavioral norms that guide the robot's interactions.