Compare commits
4 Commits
feat/face-
...
2e717ec277
| Author | SHA1 | Date | |
|---|---|---|---|
| 2e717ec277 | |||
| b53bf872a5 | |||
| 1337b1f06b | |||
| f79b65a6fa |
@@ -4,6 +4,7 @@ University within the Software Project course.
|
|||||||
© Copyright Utrecht University (Department of Information and Computing Sciences)
|
© Copyright Utrecht University (Department of Information and Computing Sciences)
|
||||||
"""
|
"""
|
||||||
|
|
||||||
|
import logging
|
||||||
from functools import singledispatchmethod
|
from functools import singledispatchmethod
|
||||||
|
|
||||||
from slugify import slugify
|
from slugify import slugify
|
||||||
@@ -30,7 +31,6 @@ from control_backend.schemas.program import (
|
|||||||
BasicNorm,
|
BasicNorm,
|
||||||
ConditionalNorm,
|
ConditionalNorm,
|
||||||
EmotionBelief,
|
EmotionBelief,
|
||||||
FaceBelief,
|
|
||||||
GestureAction,
|
GestureAction,
|
||||||
Goal,
|
Goal,
|
||||||
InferredBelief,
|
InferredBelief,
|
||||||
@@ -67,6 +67,7 @@ class AgentSpeakGenerator:
|
|||||||
"""
|
"""
|
||||||
|
|
||||||
_asp: AstProgram
|
_asp: AstProgram
|
||||||
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
def generate(self, program: Program) -> str:
|
def generate(self, program: Program) -> str:
|
||||||
"""
|
"""
|
||||||
@@ -480,7 +481,8 @@ class AgentSpeakGenerator:
|
|||||||
:param main_goal: Whether this is a main goal (for UI notification purposes).
|
:param main_goal: Whether this is a main goal (for UI notification purposes).
|
||||||
"""
|
"""
|
||||||
context: list[AstExpression] = [self._astify(phase)]
|
context: list[AstExpression] = [self._astify(phase)]
|
||||||
context.append(~self._astify(goal, achieved=True))
|
if goal.can_fail:
|
||||||
|
context.append(~self._astify(goal, achieved=True))
|
||||||
if previous_goal and previous_goal.can_fail:
|
if previous_goal and previous_goal.can_fail:
|
||||||
context.append(self._astify(previous_goal, achieved=True))
|
context.append(self._astify(previous_goal, achieved=True))
|
||||||
if not continues_response:
|
if not continues_response:
|
||||||
@@ -504,6 +506,10 @@ class AgentSpeakGenerator:
|
|||||||
if not goal.can_fail and not continues_response:
|
if not goal.can_fail and not continues_response:
|
||||||
body.append(AstStatement(StatementType.ADD_BELIEF, self._astify(goal, achieved=True)))
|
body.append(AstStatement(StatementType.ADD_BELIEF, self._astify(goal, achieved=True)))
|
||||||
|
|
||||||
|
if len(body) == 0:
|
||||||
|
self.logger.warning("Goal with no plan detected: %s", goal.name)
|
||||||
|
body.append(AstStatement(StatementType.EMPTY, AstLiteral("true")))
|
||||||
|
|
||||||
self._asp.plans.append(AstPlan(TriggerType.ADDED_GOAL, self._astify(goal), context, body))
|
self._asp.plans.append(AstPlan(TriggerType.ADDED_GOAL, self._astify(goal), context, body))
|
||||||
|
|
||||||
self._asp.plans.append(
|
self._asp.plans.append(
|
||||||
@@ -564,10 +570,10 @@ class AgentSpeakGenerator:
|
|||||||
)
|
)
|
||||||
)
|
)
|
||||||
for step in trigger.plan.steps:
|
for step in trigger.plan.steps:
|
||||||
body.append(self._step_to_statement(step))
|
|
||||||
if isinstance(step, Goal):
|
if isinstance(step, Goal):
|
||||||
step.can_fail = False # triggers are continuous sequence
|
new_step = step.model_copy(update={"can_fail": False}) # triggers are sequence
|
||||||
subgoals.append(step)
|
subgoals.append(new_step)
|
||||||
|
body.append(self._step_to_statement(step))
|
||||||
|
|
||||||
# Arbitrary wait for UI to display nicely
|
# Arbitrary wait for UI to display nicely
|
||||||
body.append(
|
body.append(
|
||||||
@@ -688,10 +694,6 @@ class AgentSpeakGenerator:
|
|||||||
def _(self, eb: EmotionBelief) -> AstExpression:
|
def _(self, eb: EmotionBelief) -> AstExpression:
|
||||||
return AstLiteral("emotion_detected", [AstAtom(eb.emotion)])
|
return AstLiteral("emotion_detected", [AstAtom(eb.emotion)])
|
||||||
|
|
||||||
@_astify.register
|
|
||||||
def _(self, fb: FaceBelief) -> AstExpression:
|
|
||||||
return AstLiteral("face_present")
|
|
||||||
|
|
||||||
@_astify.register
|
@_astify.register
|
||||||
def _(self, ib: InferredBelief) -> AstExpression:
|
def _(self, ib: InferredBelief) -> AstExpression:
|
||||||
"""
|
"""
|
||||||
|
|||||||
@@ -14,7 +14,7 @@ from control_backend.agents.perception.visual_emotion_recognition_agent.visual_e
|
|||||||
)
|
)
|
||||||
from control_backend.core.agent_system import InternalMessage
|
from control_backend.core.agent_system import InternalMessage
|
||||||
from control_backend.core.config import settings
|
from control_backend.core.config import settings
|
||||||
from control_backend.schemas.belief_message import Belief, BeliefMessage
|
from control_backend.schemas.belief_message import Belief
|
||||||
|
|
||||||
|
|
||||||
class VisualEmotionRecognitionAgent(BaseAgent):
|
class VisualEmotionRecognitionAgent(BaseAgent):
|
||||||
@@ -44,7 +44,6 @@ class VisualEmotionRecognitionAgent(BaseAgent):
|
|||||||
self.timeout_ms = timeout_ms
|
self.timeout_ms = timeout_ms
|
||||||
self.window_duration = window_duration
|
self.window_duration = window_duration
|
||||||
self.min_frames_required = min_frames_required
|
self.min_frames_required = min_frames_required
|
||||||
self._face_detected = False
|
|
||||||
|
|
||||||
# Pause functionality
|
# Pause functionality
|
||||||
# NOTE: flag is set when running, cleared when paused
|
# NOTE: flag is set when running, cleared when paused
|
||||||
@@ -90,9 +89,6 @@ class VisualEmotionRecognitionAgent(BaseAgent):
|
|||||||
# Tracks counts of detected emotions per face index
|
# Tracks counts of detected emotions per face index
|
||||||
face_stats = defaultdict(Counter)
|
face_stats = defaultdict(Counter)
|
||||||
|
|
||||||
# How many times a face has been detected
|
|
||||||
face_detection_yes_no = [0, 0]
|
|
||||||
|
|
||||||
prev_dominant_emotions = set()
|
prev_dominant_emotions = set()
|
||||||
|
|
||||||
while self._running:
|
while self._running:
|
||||||
@@ -101,8 +97,8 @@ class VisualEmotionRecognitionAgent(BaseAgent):
|
|||||||
|
|
||||||
width, height, image_bytes = await self.video_in_socket.recv_multipart()
|
width, height, image_bytes = await self.video_in_socket.recv_multipart()
|
||||||
|
|
||||||
width = int.from_bytes(width, "little")
|
width = int.from_bytes(width, 'little')
|
||||||
height = int.from_bytes(height, "little")
|
height = int.from_bytes(height, 'little')
|
||||||
|
|
||||||
# Convert bytes to a numpy buffer
|
# Convert bytes to a numpy buffer
|
||||||
image_array = np.frombuffer(image_bytes, np.uint8)
|
image_array = np.frombuffer(image_bytes, np.uint8)
|
||||||
@@ -111,13 +107,6 @@ class VisualEmotionRecognitionAgent(BaseAgent):
|
|||||||
|
|
||||||
# Get the dominant emotion from each face
|
# Get the dominant emotion from each face
|
||||||
current_emotions = self.emotion_recognizer.sorted_dominant_emotions(frame)
|
current_emotions = self.emotion_recognizer.sorted_dominant_emotions(frame)
|
||||||
|
|
||||||
# Update face face_detection_yes_no
|
|
||||||
if len(current_emotions) > 0:
|
|
||||||
face_detection_yes_no[0] += 1
|
|
||||||
else:
|
|
||||||
face_detection_yes_no[1] += 1
|
|
||||||
|
|
||||||
# Update emotion counts for each detected face
|
# Update emotion counts for each detected face
|
||||||
for i, emotion in enumerate(current_emotions):
|
for i, emotion in enumerate(current_emotions):
|
||||||
face_stats[i][emotion] += 1
|
face_stats[i][emotion] += 1
|
||||||
@@ -133,20 +122,6 @@ class VisualEmotionRecognitionAgent(BaseAgent):
|
|||||||
dominant_emotion = counter.most_common(1)[0][0]
|
dominant_emotion = counter.most_common(1)[0][0]
|
||||||
window_dominant_emotions.add(dominant_emotion)
|
window_dominant_emotions.add(dominant_emotion)
|
||||||
|
|
||||||
if (
|
|
||||||
face_detection_yes_no[0] > face_detection_yes_no[1]
|
|
||||||
and not self._face_detected
|
|
||||||
):
|
|
||||||
self._face_detected = True
|
|
||||||
await self._inform_face_detected()
|
|
||||||
elif (
|
|
||||||
face_detection_yes_no[0] <= face_detection_yes_no[1] and self._face_detected
|
|
||||||
):
|
|
||||||
self._face_detected = False
|
|
||||||
await self._inform_face_detected()
|
|
||||||
|
|
||||||
face_detection_yes_no = [0, 0]
|
|
||||||
|
|
||||||
await self.update_emotions(prev_dominant_emotions, window_dominant_emotions)
|
await self.update_emotions(prev_dominant_emotions, window_dominant_emotions)
|
||||||
prev_dominant_emotions = window_dominant_emotions
|
prev_dominant_emotions = window_dominant_emotions
|
||||||
face_stats.clear()
|
face_stats.clear()
|
||||||
@@ -158,6 +133,7 @@ class VisualEmotionRecognitionAgent(BaseAgent):
|
|||||||
except Exception as e:
|
except Exception as e:
|
||||||
self.logger.error(f"Error in emotion recognition loop: {e}")
|
self.logger.error(f"Error in emotion recognition loop: {e}")
|
||||||
|
|
||||||
|
|
||||||
async def update_emotions(self, prev_emotions: set[str], emotions: set[str]):
|
async def update_emotions(self, prev_emotions: set[str], emotions: set[str]):
|
||||||
"""
|
"""
|
||||||
Compare emotions from previous window and current emotions,
|
Compare emotions from previous window and current emotions,
|
||||||
@@ -173,7 +149,9 @@ class VisualEmotionRecognitionAgent(BaseAgent):
|
|||||||
for emotion in emotions_to_remove:
|
for emotion in emotions_to_remove:
|
||||||
self.logger.info(f"Emotion '{emotion}' has disappeared.")
|
self.logger.info(f"Emotion '{emotion}' has disappeared.")
|
||||||
try:
|
try:
|
||||||
emotion_beliefs_remove.append(Belief(name="emotion_detected", arguments=[emotion]))
|
emotion_beliefs_remove.append(
|
||||||
|
Belief(name="emotion_detected", arguments=[emotion], remove=True)
|
||||||
|
)
|
||||||
except ValidationError:
|
except ValidationError:
|
||||||
self.logger.warning("Invalid belief for emotion removal: %s", emotion)
|
self.logger.warning("Invalid belief for emotion removal: %s", emotion)
|
||||||
|
|
||||||
@@ -197,25 +175,11 @@ class VisualEmotionRecognitionAgent(BaseAgent):
|
|||||||
)
|
)
|
||||||
await self.send(message)
|
await self.send(message)
|
||||||
|
|
||||||
async def _inform_face_detected(self):
|
|
||||||
if self._face_detected:
|
|
||||||
belief_message = BeliefMessage(create=[Belief(name="face_present")])
|
|
||||||
else:
|
|
||||||
belief_message = BeliefMessage(delete=[Belief(name="face_present")])
|
|
||||||
|
|
||||||
msg = InternalMessage(
|
|
||||||
to=settings.agent_settings.bdi_core_name,
|
|
||||||
thread="beliefs",
|
|
||||||
body=belief_message.model_dump_json(),
|
|
||||||
)
|
|
||||||
|
|
||||||
await self.send(msg)
|
|
||||||
|
|
||||||
async def handle_message(self, msg: InternalMessage):
|
async def handle_message(self, msg: InternalMessage):
|
||||||
"""
|
"""
|
||||||
Handle incoming messages.
|
Handle incoming messages.
|
||||||
|
|
||||||
Expects messages to pause or resume the Visual Emotion Recognition
|
Expects messages to pause or resume the Visual Emotion Recognition
|
||||||
processing from User Interrupt Agent.
|
processing from User Interrupt Agent.
|
||||||
|
|
||||||
:param msg: The received internal message.
|
:param msg: The received internal message.
|
||||||
@@ -240,3 +204,4 @@ class VisualEmotionRecognitionAgent(BaseAgent):
|
|||||||
"""
|
"""
|
||||||
self.video_in_socket.close()
|
self.video_in_socket.close()
|
||||||
await super().stop()
|
await super().stop()
|
||||||
|
|
||||||
@@ -82,7 +82,7 @@ class BehaviourSettings(BaseModel):
|
|||||||
:ivar transcription_words_per_token: Estimated words per token for transcription timing.
|
:ivar transcription_words_per_token: Estimated words per token for transcription timing.
|
||||||
:ivar transcription_token_buffer: Buffer for transcription tokens.
|
:ivar transcription_token_buffer: Buffer for transcription tokens.
|
||||||
:ivar conversation_history_length_limit: The maximum amount of messages to extract beliefs from.
|
:ivar conversation_history_length_limit: The maximum amount of messages to extract beliefs from.
|
||||||
:ivar visual_emotion_recognition_window_duration_s: Duration in seconds over which to aggregate
|
:ivar visual_emotion_recognition_window_duration_s: Duration in seconds over which to aggregate
|
||||||
emotions and update emotion beliefs.
|
emotions and update emotion beliefs.
|
||||||
:ivar visual_emotion_recognition_min_frames_per_face: Minimum number of frames per face required
|
:ivar visual_emotion_recognition_min_frames_per_face: Minimum number of frames per face required
|
||||||
to consider a face valid.
|
to consider a face valid.
|
||||||
@@ -112,7 +112,7 @@ class BehaviourSettings(BaseModel):
|
|||||||
conversation_history_length_limit: int = 10
|
conversation_history_length_limit: int = 10
|
||||||
|
|
||||||
# Visual Emotion Recognition settings
|
# Visual Emotion Recognition settings
|
||||||
visual_emotion_recognition_window_duration_s: int = 3
|
visual_emotion_recognition_window_duration_s: int = 5
|
||||||
visual_emotion_recognition_min_frames_per_face: int = 3
|
visual_emotion_recognition_min_frames_per_face: int = 3
|
||||||
# AgentSpeak related settings
|
# AgentSpeak related settings
|
||||||
trigger_time_to_wait: int = 2000
|
trigger_time_to_wait: int = 2000
|
||||||
|
|||||||
@@ -7,7 +7,7 @@ University within the Software Project course.
|
|||||||
from enum import Enum
|
from enum import Enum
|
||||||
from typing import Literal
|
from typing import Literal
|
||||||
|
|
||||||
from pydantic import UUID4, BaseModel
|
from pydantic import UUID4, BaseModel, field_validator
|
||||||
|
|
||||||
|
|
||||||
class ProgramElement(BaseModel):
|
class ProgramElement(BaseModel):
|
||||||
@@ -24,6 +24,13 @@ class ProgramElement(BaseModel):
|
|||||||
# To make program elements hashable
|
# To make program elements hashable
|
||||||
model_config = {"frozen": True}
|
model_config = {"frozen": True}
|
||||||
|
|
||||||
|
@field_validator("name")
|
||||||
|
@classmethod
|
||||||
|
def name_must_not_start_with_number(cls, v: str) -> str:
|
||||||
|
if v and v[0].isdigit():
|
||||||
|
raise ValueError('Field "name" must not start with a number.')
|
||||||
|
return v
|
||||||
|
|
||||||
|
|
||||||
class LogicalOperator(Enum):
|
class LogicalOperator(Enum):
|
||||||
"""
|
"""
|
||||||
@@ -41,8 +48,8 @@ class LogicalOperator(Enum):
|
|||||||
OR = "OR"
|
OR = "OR"
|
||||||
|
|
||||||
|
|
||||||
type Belief = KeywordBelief | SemanticBelief | InferredBelief | EmotionBelief | FaceBelief
|
type Belief = KeywordBelief | SemanticBelief | InferredBelief | EmotionBelief
|
||||||
type BasicBelief = KeywordBelief | SemanticBelief | EmotionBelief | FaceBelief
|
type BasicBelief = KeywordBelief | SemanticBelief | EmotionBelief
|
||||||
|
|
||||||
|
|
||||||
class KeywordBelief(ProgramElement):
|
class KeywordBelief(ProgramElement):
|
||||||
@@ -117,15 +124,6 @@ class EmotionBelief(ProgramElement):
|
|||||||
emotion: str
|
emotion: str
|
||||||
|
|
||||||
|
|
||||||
class FaceBelief(ProgramElement):
|
|
||||||
"""
|
|
||||||
Represents the belief that at least one face is currently in view.
|
|
||||||
"""
|
|
||||||
|
|
||||||
name: str = ""
|
|
||||||
face_present: bool
|
|
||||||
|
|
||||||
|
|
||||||
class Norm(ProgramElement):
|
class Norm(ProgramElement):
|
||||||
"""
|
"""
|
||||||
Base class for behavioral norms that guide the robot's interactions.
|
Base class for behavioral norms that guide the robot's interactions.
|
||||||
|
|||||||
Reference in New Issue
Block a user