fix: default belief false, user interrupt chat role, vad initial silence, unused import
ref: N25B-453
This commit is contained in:
@@ -150,6 +150,9 @@ class TextBeliefExtractorAgent(BaseAgent):
|
|||||||
return
|
return
|
||||||
|
|
||||||
available_beliefs = [b for b in belief_list.beliefs if isinstance(b, SemanticBelief)]
|
available_beliefs = [b for b in belief_list.beliefs if isinstance(b, SemanticBelief)]
|
||||||
|
self._current_beliefs = BeliefState(
|
||||||
|
false={InternalBelief(name=b.name, arguments=None) for b in available_beliefs},
|
||||||
|
)
|
||||||
self.belief_inferrer.available_beliefs = available_beliefs
|
self.belief_inferrer.available_beliefs = available_beliefs
|
||||||
self.logger.debug(
|
self.logger.debug(
|
||||||
"Received %d semantic beliefs from the program manager: %s",
|
"Received %d semantic beliefs from the program manager: %s",
|
||||||
@@ -170,6 +173,9 @@ class TextBeliefExtractorAgent(BaseAgent):
|
|||||||
available_goals = {g for g in goals_list.goals if g.can_fail}
|
available_goals = {g for g in goals_list.goals if g.can_fail}
|
||||||
available_goals -= self._force_completed_goals
|
available_goals -= self._force_completed_goals
|
||||||
self.goal_inferrer.goals = available_goals
|
self.goal_inferrer.goals = available_goals
|
||||||
|
self._current_goal_completions = {
|
||||||
|
f"achieved_{AgentSpeakGenerator.slugify(goal)}": False for goal in available_goals
|
||||||
|
}
|
||||||
self.logger.debug(
|
self.logger.debug(
|
||||||
"Received %d failable goals from the program manager: %s",
|
"Received %d failable goals from the program manager: %s",
|
||||||
len(available_goals),
|
len(available_goals),
|
||||||
|
|||||||
@@ -285,9 +285,10 @@ class VADAgent(BaseAgent):
|
|||||||
assert self.audio_out_socket is not None
|
assert self.audio_out_socket is not None
|
||||||
await self.audio_out_socket.send(self.audio_buffer[: -2 * len(chunk)].tobytes())
|
await self.audio_out_socket.send(self.audio_buffer[: -2 * len(chunk)].tobytes())
|
||||||
|
|
||||||
# At this point, we know that the speech has ended.
|
# At this point, we know that there is no speech.
|
||||||
# Prepend the last chunk that had no speech, for a more fluent boundary
|
# Prepend the last few chunks that had no speech, for a more fluent boundary.
|
||||||
self.audio_buffer = chunk
|
self.audio_buffer = np.append(self.audio_buffer, chunk)
|
||||||
|
self.audio_buffer = self.audio_buffer[-begin_silence_length * len(chunk) :]
|
||||||
|
|
||||||
async def handle_message(self, msg: InternalMessage):
|
async def handle_message(self, msg: InternalMessage):
|
||||||
"""
|
"""
|
||||||
|
|||||||
@@ -7,7 +7,6 @@ import numpy as np
|
|||||||
import zmq
|
import zmq
|
||||||
import zmq.asyncio as azmq
|
import zmq.asyncio as azmq
|
||||||
from pydantic_core import ValidationError
|
from pydantic_core import ValidationError
|
||||||
import struct
|
|
||||||
|
|
||||||
from control_backend.agents import BaseAgent
|
from control_backend.agents import BaseAgent
|
||||||
from control_backend.agents.perception.visual_emotion_recognition_agent.visual_emotion_recognizer import ( # noqa
|
from control_backend.agents.perception.visual_emotion_recognition_agent.visual_emotion_recognizer import ( # noqa
|
||||||
@@ -89,7 +88,7 @@ class VisualEmotionRecognitionAgent(BaseAgent):
|
|||||||
while self._running:
|
while self._running:
|
||||||
try:
|
try:
|
||||||
frame_bytes = await self.video_in_socket.recv()
|
frame_bytes = await self.video_in_socket.recv()
|
||||||
|
|
||||||
# Convert bytes to a numpy buffer
|
# Convert bytes to a numpy buffer
|
||||||
nparr = np.frombuffer(frame_bytes, np.uint8)
|
nparr = np.frombuffer(frame_bytes, np.uint8)
|
||||||
|
|
||||||
@@ -126,7 +125,6 @@ class VisualEmotionRecognitionAgent(BaseAgent):
|
|||||||
except zmq.Again:
|
except zmq.Again:
|
||||||
self.logger.warning("No video frame received within timeout.")
|
self.logger.warning("No video frame received within timeout.")
|
||||||
|
|
||||||
|
|
||||||
async def update_emotions(self, prev_emotions: set[str], emotions: set[str]):
|
async def update_emotions(self, prev_emotions: set[str], emotions: set[str]):
|
||||||
"""
|
"""
|
||||||
Compare emotions from previous window and current emotions,
|
Compare emotions from previous window and current emotions,
|
||||||
|
|||||||
@@ -300,7 +300,7 @@ class UserInterruptAgent(BaseAgent):
|
|||||||
|
|
||||||
:param text_to_say: The string that the robot has to say.
|
:param text_to_say: The string that the robot has to say.
|
||||||
"""
|
"""
|
||||||
experiment_logger.chat(text_to_say, extra={"role": "user"})
|
experiment_logger.chat(text_to_say, extra={"role": "assistant"})
|
||||||
cmd = SpeechCommand(data=text_to_say, is_priority=True)
|
cmd = SpeechCommand(data=text_to_say, is_priority=True)
|
||||||
out_msg = InternalMessage(
|
out_msg = InternalMessage(
|
||||||
to=settings.agent_settings.robot_speech_name,
|
to=settings.agent_settings.robot_speech_name,
|
||||||
|
|||||||
Reference in New Issue
Block a user