Compare commits
21 Commits
feat/agent
...
feat/pause
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
cdb7fac53f | ||
|
|
d1ad2c1549 | ||
|
|
612a96940d | ||
|
|
4c20656c75 | ||
|
|
6ca86e4b81 | ||
|
|
867837dcc4 | ||
|
|
9adeb1efff | ||
|
|
42ee5c76d8 | ||
|
|
7d798f2e77 | ||
|
|
5282c2471f | ||
|
|
57b1276cb5 | ||
|
|
200bd27d9b | ||
|
|
7e0dc9ce1c | ||
|
|
71cefdfef3 | ||
|
|
33501093a1 | ||
|
|
adbb7ffd5c | ||
|
|
0501a9fba3 | ||
|
|
539e814c5a | ||
|
|
0c682d6440 | ||
|
|
32d8f20dc9 | ||
|
|
9cc0e39955 |
9
.gitlab/merge_request_templates/default.md
Normal file
9
.gitlab/merge_request_templates/default.md
Normal file
@@ -0,0 +1,9 @@
|
||||
%{first_multiline_commit_description}
|
||||
|
||||
To verify:
|
||||
|
||||
- [ ] Style checks pass
|
||||
- [ ] Pipeline (tests) pass
|
||||
- [ ] Documentation is up to date
|
||||
- [ ] Tests are up to date (new code is covered)
|
||||
- [ ] ...
|
||||
@@ -28,6 +28,7 @@ class RobotGestureAgent(BaseAgent):
|
||||
address = ""
|
||||
bind = False
|
||||
gesture_data = []
|
||||
single_gesture_data = []
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
@@ -35,8 +36,10 @@ class RobotGestureAgent(BaseAgent):
|
||||
address=settings.zmq_settings.ri_command_address,
|
||||
bind=False,
|
||||
gesture_data=None,
|
||||
single_gesture_data=None,
|
||||
):
|
||||
self.gesture_data = gesture_data or []
|
||||
self.single_gesture_data = single_gesture_data or []
|
||||
super().__init__(name)
|
||||
self.address = address
|
||||
self.bind = bind
|
||||
@@ -99,7 +102,13 @@ class RobotGestureAgent(BaseAgent):
|
||||
gesture_command.data,
|
||||
)
|
||||
return
|
||||
|
||||
elif gesture_command.endpoint == RIEndpoint.GESTURE_SINGLE:
|
||||
if gesture_command.data not in self.single_gesture_data:
|
||||
self.logger.warning(
|
||||
"Received gesture '%s' which is not in available gestures. Early returning",
|
||||
gesture_command.data,
|
||||
)
|
||||
return
|
||||
await self.pubsocket.send_json(gesture_command.model_dump())
|
||||
except Exception:
|
||||
self.logger.exception("Error processing internal message.")
|
||||
|
||||
@@ -11,7 +11,7 @@ from pydantic import ValidationError
|
||||
from control_backend.agents.base import BaseAgent
|
||||
from control_backend.core.agent_system import InternalMessage
|
||||
from control_backend.core.config import settings
|
||||
from control_backend.schemas.belief_message import Belief, BeliefMessage
|
||||
from control_backend.schemas.belief_message import BeliefMessage
|
||||
from control_backend.schemas.llm_prompt_message import LLMPromptMessage
|
||||
from control_backend.schemas.ri_message import SpeechCommand
|
||||
|
||||
@@ -89,9 +89,9 @@ class BDICoreAgent(BaseAgent):
|
||||
the agent has deferred intentions (deadlines).
|
||||
"""
|
||||
while self._running:
|
||||
# await (
|
||||
# self._wake_bdi_loop.wait()
|
||||
# ) # gets set whenever there's an update to the belief base
|
||||
await (
|
||||
self._wake_bdi_loop.wait()
|
||||
) # gets set whenever there's an update to the belief base
|
||||
|
||||
# Agent knows when it's expected to have to do its next thing
|
||||
maybe_more_work = True
|
||||
@@ -124,8 +124,8 @@ class BDICoreAgent(BaseAgent):
|
||||
|
||||
if msg.thread == "beliefs":
|
||||
try:
|
||||
beliefs = BeliefMessage.model_validate_json(msg.body).beliefs
|
||||
self._apply_beliefs(beliefs)
|
||||
belief_changes = BeliefMessage.model_validate_json(msg.body)
|
||||
self._apply_belief_changes(belief_changes)
|
||||
except ValidationError:
|
||||
self.logger.exception("Error processing belief.")
|
||||
return
|
||||
@@ -145,21 +145,28 @@ class BDICoreAgent(BaseAgent):
|
||||
)
|
||||
await self.send(out_msg)
|
||||
|
||||
def _apply_beliefs(self, beliefs: list[Belief]):
|
||||
def _apply_belief_changes(self, belief_changes: BeliefMessage):
|
||||
"""
|
||||
Update the belief base with a list of new beliefs.
|
||||
|
||||
If ``replace=True`` is set on a belief, it removes all existing beliefs with that name
|
||||
before adding the new one.
|
||||
For beliefs in ``belief_changes.replace``, it removes all existing beliefs with that name
|
||||
before adding one new one.
|
||||
|
||||
:param belief_changes: The changes in beliefs to apply.
|
||||
"""
|
||||
if not beliefs:
|
||||
if not belief_changes.create and not belief_changes.replace and not belief_changes.delete:
|
||||
return
|
||||
|
||||
for belief in beliefs:
|
||||
if belief.replace:
|
||||
self._remove_all_with_name(belief.name)
|
||||
for belief in belief_changes.create:
|
||||
self._add_belief(belief.name, belief.arguments)
|
||||
|
||||
for belief in belief_changes.replace:
|
||||
self._remove_all_with_name(belief.name)
|
||||
self._add_belief(belief.name, belief.arguments)
|
||||
|
||||
for belief in belief_changes.delete:
|
||||
self._remove_belief(belief.name, belief.arguments)
|
||||
|
||||
def _add_belief(self, name: str, args: list[str] = None):
|
||||
"""
|
||||
Add a single belief to the BDI agent.
|
||||
|
||||
@@ -144,7 +144,7 @@ class BDIBeliefCollectorAgent(BaseAgent):
|
||||
msg = InternalMessage(
|
||||
to=settings.agent_settings.bdi_core_name,
|
||||
sender=self.name,
|
||||
body=BeliefMessage(beliefs=beliefs).model_dump_json(),
|
||||
body=BeliefMessage(create=beliefs).model_dump_json(),
|
||||
thread="beliefs",
|
||||
)
|
||||
|
||||
|
||||
@@ -1,8 +1,23 @@
|
||||
import asyncio
|
||||
import json
|
||||
|
||||
import httpx
|
||||
from pydantic import ValidationError
|
||||
from slugify import slugify
|
||||
|
||||
from control_backend.agents.base import BaseAgent
|
||||
from control_backend.core.agent_system import InternalMessage
|
||||
from control_backend.core.config import settings
|
||||
from control_backend.schemas.belief_message import Belief as InternalBelief
|
||||
from control_backend.schemas.belief_message import BeliefMessage
|
||||
from control_backend.schemas.chat_history import ChatHistory, ChatMessage
|
||||
from control_backend.schemas.program import (
|
||||
Belief,
|
||||
ConditionalNorm,
|
||||
InferredBelief,
|
||||
Program,
|
||||
SemanticBelief,
|
||||
)
|
||||
|
||||
|
||||
class TextBeliefExtractorAgent(BaseAgent):
|
||||
@@ -12,46 +27,110 @@ class TextBeliefExtractorAgent(BaseAgent):
|
||||
This agent is responsible for processing raw text (e.g., from speech transcription) and
|
||||
extracting semantic beliefs from it.
|
||||
|
||||
In the current demonstration version, it performs a simple wrapping of the user's input
|
||||
into a ``user_said`` belief. In a full implementation, this agent would likely interact
|
||||
with an LLM or NLU engine to extract intent, entities, and other structured information.
|
||||
It uses the available beliefs received from the program manager to try to extract beliefs from a
|
||||
user's message, sends and updated beliefs to the BDI core, and forms a ``user_said`` belief from
|
||||
the message itself.
|
||||
"""
|
||||
|
||||
def __init__(self, name: str):
|
||||
super().__init__(name)
|
||||
self.beliefs: dict[str, bool] = {}
|
||||
self.available_beliefs: list[SemanticBelief] = []
|
||||
self.conversation = ChatHistory(messages=[])
|
||||
|
||||
async def setup(self):
|
||||
"""
|
||||
Initialize the agent and its resources.
|
||||
"""
|
||||
self.logger.info("Settting up %s.", self.name)
|
||||
# Setup LLM belief context if needed (currently demo is just passthrough)
|
||||
self.beliefs = {"mood": ["X"], "car": ["Y"]}
|
||||
self.logger.info("Setting up %s.", self.name)
|
||||
|
||||
async def handle_message(self, msg: InternalMessage):
|
||||
"""
|
||||
Handle incoming messages, primarily from the Transcription Agent.
|
||||
Handle incoming messages. Expect messages from the Transcriber agent, LLM agent, and the
|
||||
Program manager agent.
|
||||
|
||||
:param msg: The received message containing transcribed text.
|
||||
:param msg: The received message.
|
||||
"""
|
||||
sender = msg.sender
|
||||
if sender == settings.agent_settings.transcription_name:
|
||||
self.logger.debug("Received text from transcriber: %s", msg.body)
|
||||
await self._process_transcription_demo(msg.body)
|
||||
else:
|
||||
self.logger.info("Discarding message from %s", sender)
|
||||
|
||||
async def _process_transcription_demo(self, txt: str):
|
||||
match sender:
|
||||
case settings.agent_settings.transcription_name:
|
||||
self.logger.debug("Received text from transcriber: %s", msg.body)
|
||||
self._apply_conversation_message(ChatMessage(role="user", content=msg.body))
|
||||
await self._infer_new_beliefs()
|
||||
await self._user_said(msg.body)
|
||||
case settings.agent_settings.llm_name:
|
||||
self.logger.debug("Received text from LLM: %s", msg.body)
|
||||
self._apply_conversation_message(ChatMessage(role="assistant", content=msg.body))
|
||||
case settings.agent_settings.bdi_program_manager_name:
|
||||
self._handle_program_manager_message(msg)
|
||||
case _:
|
||||
self.logger.info("Discarding message from %s", sender)
|
||||
return
|
||||
|
||||
def _apply_conversation_message(self, message: ChatMessage):
|
||||
"""
|
||||
Process the transcribed text and generate beliefs.
|
||||
Save the chat message to our conversation history, taking into account the conversation
|
||||
length limit.
|
||||
|
||||
**Demo Implementation:**
|
||||
Currently, this method takes the raw text ``txt`` and wraps it into a belief structure:
|
||||
``user_said("txt")``.
|
||||
|
||||
This belief is then sent to the :class:`BDIBeliefCollectorAgent`.
|
||||
|
||||
:param txt: The raw transcribed text string.
|
||||
:param message: The chat message to add to the conversation history.
|
||||
"""
|
||||
# For demo, just wrapping user text as user_said belief
|
||||
belief = {"beliefs": {"user_said": [txt]}, "type": "belief_extraction_text"}
|
||||
length_limit = settings.behaviour_settings.conversation_history_length_limit
|
||||
self.conversation.messages = (self.conversation.messages + [message])[-length_limit:]
|
||||
|
||||
def _handle_program_manager_message(self, msg: InternalMessage):
|
||||
"""
|
||||
Handle a message from the program manager: extract available beliefs from it.
|
||||
|
||||
:param msg: The received message from the program manager.
|
||||
"""
|
||||
try:
|
||||
program = Program.model_validate_json(msg.body)
|
||||
except ValidationError:
|
||||
self.logger.warning(
|
||||
"Received message from program manager but it is not a valid program."
|
||||
)
|
||||
return
|
||||
|
||||
self.logger.debug("Received a program from the program manager.")
|
||||
|
||||
self.available_beliefs = self._extract_basic_beliefs_from_program(program)
|
||||
|
||||
# TODO Copied from an incomplete version of the program manager. Use that one instead.
|
||||
@staticmethod
|
||||
def _extract_basic_beliefs_from_program(program: Program) -> list[SemanticBelief]:
|
||||
beliefs = []
|
||||
|
||||
for phase in program.phases:
|
||||
for norm in phase.norms:
|
||||
if isinstance(norm, ConditionalNorm):
|
||||
beliefs += TextBeliefExtractorAgent._extract_basic_beliefs_from_belief(
|
||||
norm.condition
|
||||
)
|
||||
|
||||
for trigger in phase.triggers:
|
||||
beliefs += TextBeliefExtractorAgent._extract_basic_beliefs_from_belief(
|
||||
trigger.condition
|
||||
)
|
||||
|
||||
return beliefs
|
||||
|
||||
# TODO Copied from an incomplete version of the program manager. Use that one instead.
|
||||
@staticmethod
|
||||
def _extract_basic_beliefs_from_belief(belief: Belief) -> list[SemanticBelief]:
|
||||
if isinstance(belief, InferredBelief):
|
||||
return TextBeliefExtractorAgent._extract_basic_beliefs_from_belief(
|
||||
belief.left
|
||||
) + TextBeliefExtractorAgent._extract_basic_beliefs_from_belief(belief.right)
|
||||
return [belief]
|
||||
|
||||
async def _user_said(self, text: str):
|
||||
"""
|
||||
Create a belief for the user's full speech.
|
||||
|
||||
:param text: User's transcribed text.
|
||||
"""
|
||||
belief = {"beliefs": {"user_said": [text]}, "type": "belief_extraction_text"}
|
||||
payload = json.dumps(belief)
|
||||
|
||||
belief_msg = InternalMessage(
|
||||
@@ -60,6 +139,207 @@ class TextBeliefExtractorAgent(BaseAgent):
|
||||
body=payload,
|
||||
thread="beliefs",
|
||||
)
|
||||
|
||||
await self.send(belief_msg)
|
||||
self.logger.info("Sent %d beliefs to the belief collector.", len(belief["beliefs"]))
|
||||
|
||||
async def _infer_new_beliefs(self):
|
||||
"""
|
||||
Process conversation history to extract beliefs, semantically. Any changed beliefs are sent
|
||||
to the BDI core.
|
||||
"""
|
||||
# Return instantly if there are no beliefs to infer
|
||||
if not self.available_beliefs:
|
||||
return
|
||||
|
||||
candidate_beliefs = await self._infer_turn()
|
||||
belief_changes = BeliefMessage()
|
||||
for belief_key, belief_value in candidate_beliefs.items():
|
||||
if belief_value is None:
|
||||
continue
|
||||
old_belief_value = self.beliefs.get(belief_key)
|
||||
if belief_value == old_belief_value:
|
||||
continue
|
||||
|
||||
self.beliefs[belief_key] = belief_value
|
||||
|
||||
belief = InternalBelief(name=belief_key, arguments=None)
|
||||
if belief_value:
|
||||
belief_changes.create.append(belief)
|
||||
else:
|
||||
belief_changes.delete.append(belief)
|
||||
|
||||
# Return if there were no changes in beliefs
|
||||
if not belief_changes.has_values():
|
||||
return
|
||||
|
||||
beliefs_message = InternalMessage(
|
||||
to=settings.agent_settings.bdi_core_name,
|
||||
sender=self.name,
|
||||
body=belief_changes.model_dump_json(),
|
||||
thread="beliefs",
|
||||
)
|
||||
await self.send(beliefs_message)
|
||||
|
||||
@staticmethod
|
||||
def _split_into_chunks[T](items: list[T], n: int) -> list[list[T]]:
|
||||
k, m = divmod(len(items), n)
|
||||
return [items[i * k + min(i, m) : (i + 1) * k + min(i + 1, m)] for i in range(n)]
|
||||
|
||||
async def _infer_turn(self) -> dict:
|
||||
"""
|
||||
Process the stored conversation history to extract semantic beliefs. Returns a list of
|
||||
beliefs that have been set to ``True``, ``False`` or ``None``.
|
||||
|
||||
:return: A dict mapping belief names to a value ``True``, ``False`` or ``None``.
|
||||
"""
|
||||
n_parallel = max(1, min(settings.llm_settings.n_parallel - 1, len(self.available_beliefs)))
|
||||
all_beliefs = await asyncio.gather(
|
||||
*[
|
||||
self._infer_beliefs(self.conversation, beliefs)
|
||||
for beliefs in self._split_into_chunks(self.available_beliefs, n_parallel)
|
||||
]
|
||||
)
|
||||
retval = {}
|
||||
for beliefs in all_beliefs:
|
||||
if beliefs is None:
|
||||
continue
|
||||
retval.update(beliefs)
|
||||
return retval
|
||||
|
||||
@staticmethod
|
||||
def _create_belief_schema(belief: SemanticBelief) -> tuple[str, dict]:
|
||||
# TODO: use real belief names
|
||||
return belief.name or slugify(belief.description), {
|
||||
"type": ["boolean", "null"],
|
||||
"description": belief.description,
|
||||
}
|
||||
|
||||
@staticmethod
|
||||
def _create_beliefs_schema(beliefs: list[SemanticBelief]) -> dict:
|
||||
belief_schemas = [
|
||||
TextBeliefExtractorAgent._create_belief_schema(belief) for belief in beliefs
|
||||
]
|
||||
|
||||
return {
|
||||
"type": "object",
|
||||
"properties": dict(belief_schemas),
|
||||
"required": [name for name, _ in belief_schemas],
|
||||
}
|
||||
|
||||
@staticmethod
|
||||
def _format_message(message: ChatMessage):
|
||||
return f"{message.role.upper()}:\n{message.content}"
|
||||
|
||||
@staticmethod
|
||||
def _format_conversation(conversation: ChatHistory):
|
||||
return "\n\n".join(
|
||||
[TextBeliefExtractorAgent._format_message(message) for message in conversation.messages]
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
def _format_beliefs(beliefs: list[SemanticBelief]):
|
||||
# TODO: use real belief names
|
||||
return "\n".join(
|
||||
[
|
||||
f"- {belief.name or slugify(belief.description)}: {belief.description}"
|
||||
for belief in beliefs
|
||||
]
|
||||
)
|
||||
|
||||
async def _infer_beliefs(
|
||||
self,
|
||||
conversation: ChatHistory,
|
||||
beliefs: list[SemanticBelief],
|
||||
) -> dict | None:
|
||||
"""
|
||||
Infer given beliefs based on the given conversation.
|
||||
:param conversation: The conversation to infer beliefs from.
|
||||
:param beliefs: The beliefs to infer.
|
||||
:return: A dict containing belief names and a boolean whether they hold, or None if the
|
||||
belief cannot be inferred based on the given conversation.
|
||||
"""
|
||||
example = {
|
||||
"example_belief": True,
|
||||
}
|
||||
|
||||
prompt = f"""{self._format_conversation(conversation)}
|
||||
|
||||
Given the above conversation, what beliefs can be inferred?
|
||||
If there is no relevant information about a belief belief, give null.
|
||||
In case messages conflict, prefer using the most recent messages for inference.
|
||||
|
||||
Choose from the following list of beliefs, formatted as (belief_name, description):
|
||||
{self._format_beliefs(beliefs)}
|
||||
|
||||
Respond with a JSON similar to the following, but with the property names as given above:
|
||||
{json.dumps(example, indent=2)}
|
||||
"""
|
||||
|
||||
schema = self._create_beliefs_schema(beliefs)
|
||||
|
||||
return await self._retry_query_llm(prompt, schema)
|
||||
|
||||
async def _retry_query_llm(self, prompt: str, schema: dict, tries: int = 3) -> dict | None:
|
||||
"""
|
||||
Query the LLM with the given prompt and schema, return an instance of a dict conforming
|
||||
to this schema. Try ``tries`` times, or return None.
|
||||
|
||||
:param prompt: Prompt to be queried.
|
||||
:param schema: Schema to be queried.
|
||||
:return: An instance of a dict conforming to this schema, or None if failed.
|
||||
"""
|
||||
try_count = 0
|
||||
while try_count < tries:
|
||||
try_count += 1
|
||||
|
||||
try:
|
||||
return await self._query_llm(prompt, schema)
|
||||
except (httpx.HTTPError, json.JSONDecodeError, KeyError) as e:
|
||||
if try_count < tries:
|
||||
continue
|
||||
self.logger.exception(
|
||||
"Failed to get LLM response after %d tries.",
|
||||
try_count,
|
||||
exc_info=e,
|
||||
)
|
||||
|
||||
return None
|
||||
|
||||
@staticmethod
|
||||
async def _query_llm(prompt: str, schema: dict) -> dict:
|
||||
"""
|
||||
Query an LLM with the given prompt and schema, return an instance of a dict conforming to
|
||||
that schema.
|
||||
|
||||
:param prompt: The prompt to be queried.
|
||||
:param schema: Schema to use during response.
|
||||
:return: A dict conforming to this schema.
|
||||
:raises httpx.HTTPStatusError: If the LLM server responded with an error.
|
||||
:raises json.JSONDecodeError: If the LLM response was not valid JSON. May happen if the
|
||||
response was cut off early due to length limitations.
|
||||
:raises KeyError: If the LLM server responded with no error, but the response was invalid.
|
||||
"""
|
||||
async with httpx.AsyncClient() as client:
|
||||
response = await client.post(
|
||||
settings.llm_settings.local_llm_url,
|
||||
json={
|
||||
"model": settings.llm_settings.local_llm_model,
|
||||
"messages": [{"role": "user", "content": prompt}],
|
||||
"response_format": {
|
||||
"type": "json_schema",
|
||||
"json_schema": {
|
||||
"name": "Beliefs",
|
||||
"strict": True,
|
||||
"schema": schema,
|
||||
},
|
||||
},
|
||||
"reasoning_effort": "low",
|
||||
"temperature": settings.llm_settings.code_temperature,
|
||||
"stream": False,
|
||||
},
|
||||
timeout=None,
|
||||
)
|
||||
response.raise_for_status()
|
||||
|
||||
response_json = response.json()
|
||||
json_message = response_json["choices"][0]["message"]["content"]
|
||||
return json.loads(json_message)
|
||||
|
||||
@@ -3,11 +3,14 @@ import json
|
||||
|
||||
import zmq
|
||||
import zmq.asyncio as azmq
|
||||
from pydantic import ValidationError
|
||||
from zmq.asyncio import Context
|
||||
|
||||
from control_backend.agents import BaseAgent
|
||||
from control_backend.agents.actuation.robot_gesture_agent import RobotGestureAgent
|
||||
from control_backend.core.agent_system import InternalMessage
|
||||
from control_backend.core.config import settings
|
||||
from control_backend.schemas.ri_message import PauseCommand
|
||||
|
||||
from ..actuation.robot_speech_agent import RobotSpeechAgent
|
||||
from ..perception import VADAgent
|
||||
@@ -182,6 +185,7 @@ class RICommunicationAgent(BaseAgent):
|
||||
self._req_socket.bind(addr)
|
||||
case "actuation":
|
||||
gesture_data = port_data.get("gestures", [])
|
||||
single_gesture_data = port_data.get("single_gestures", [])
|
||||
robot_speech_agent = RobotSpeechAgent(
|
||||
settings.agent_settings.robot_speech_name,
|
||||
address=addr,
|
||||
@@ -192,6 +196,7 @@ class RICommunicationAgent(BaseAgent):
|
||||
address=addr,
|
||||
bind=bind,
|
||||
gesture_data=gesture_data,
|
||||
single_gesture_data=single_gesture_data,
|
||||
)
|
||||
await robot_speech_agent.start()
|
||||
await asyncio.sleep(0.1) # Small delay
|
||||
@@ -296,3 +301,11 @@ class RICommunicationAgent(BaseAgent):
|
||||
self.logger.debug("Restarting communication negotiation.")
|
||||
if await self._negotiate_connection(max_retries=1):
|
||||
self.connected = True
|
||||
|
||||
async def handle_message(self, msg : InternalMessage):
|
||||
try:
|
||||
pause_command = PauseCommand.model_validate_json(msg.body)
|
||||
self._req_socket.send_json(pause_command.model_dump())
|
||||
self.logger.debug(self._req_socket.recv_json())
|
||||
except ValidationError:
|
||||
self.logger.warning("Incorrect message format for PauseCommand.")
|
||||
|
||||
@@ -64,11 +64,12 @@ class LLMAgent(BaseAgent):
|
||||
|
||||
:param message: The parsed prompt message containing text, norms, and goals.
|
||||
"""
|
||||
full_message = ""
|
||||
async for chunk in self._query_llm(message.text, message.norms, message.goals):
|
||||
await self._send_reply(chunk)
|
||||
self.logger.debug(
|
||||
"Finished processing BDI message. Response sent in chunks to BDI core."
|
||||
)
|
||||
full_message += chunk
|
||||
self.logger.debug("Finished processing BDI message. Response sent in chunks to BDI core.")
|
||||
await self._send_full_reply(full_message)
|
||||
|
||||
async def _send_reply(self, msg: str):
|
||||
"""
|
||||
@@ -83,6 +84,19 @@ class LLMAgent(BaseAgent):
|
||||
)
|
||||
await self.send(reply)
|
||||
|
||||
async def _send_full_reply(self, msg: str):
|
||||
"""
|
||||
Sends a response message (full) to agents that need it.
|
||||
|
||||
:param msg: The text content of the message.
|
||||
"""
|
||||
message = InternalMessage(
|
||||
to=settings.agent_settings.text_belief_extractor_name,
|
||||
sender=self.name,
|
||||
body=msg,
|
||||
)
|
||||
await self.send(message)
|
||||
|
||||
async def _query_llm(
|
||||
self, prompt: str, norms: list[str], goals: list[str]
|
||||
) -> AsyncGenerator[str]:
|
||||
@@ -172,7 +186,7 @@ class LLMAgent(BaseAgent):
|
||||
json={
|
||||
"model": settings.llm_settings.local_llm_model,
|
||||
"messages": messages,
|
||||
"temperature": 0.3,
|
||||
"temperature": settings.llm_settings.chat_temperature,
|
||||
"stream": True,
|
||||
},
|
||||
) as response:
|
||||
|
||||
68
src/control_backend/agents/mock_agents/test_pause_ri.py
Normal file
68
src/control_backend/agents/mock_agents/test_pause_ri.py
Normal file
@@ -0,0 +1,68 @@
|
||||
import asyncio
|
||||
import json
|
||||
|
||||
import zmq
|
||||
from zmq.asyncio import Context
|
||||
|
||||
from control_backend.agents.base import BaseAgent
|
||||
from control_backend.core.agent_system import InternalMessage
|
||||
from control_backend.core.config import settings
|
||||
|
||||
|
||||
class TestPauseAgent(BaseAgent):
|
||||
def __init__(self, name: str):
|
||||
super().__init__(name)
|
||||
|
||||
async def setup(self):
|
||||
context = Context.instance()
|
||||
self.pub_socket = context.socket(zmq.PUB)
|
||||
self.pub_socket.connect(settings.zmq_settings.internal_pub_address)
|
||||
|
||||
self.add_behavior(self._pause_command_loop())
|
||||
self.logger.debug("TestPauseAgent setup complete.")
|
||||
|
||||
async def _pause_command_loop(self):
|
||||
print("Starting Pause command test loop.")
|
||||
while True:
|
||||
pause_command = {
|
||||
"endpoint": "pause",
|
||||
"data": True,
|
||||
}
|
||||
|
||||
message = InternalMessage(
|
||||
to="ri_communication_agent",
|
||||
sender=self.name,
|
||||
body=json.dumps(pause_command),
|
||||
)
|
||||
await self.send(message)
|
||||
|
||||
# User interrupt message
|
||||
data = {
|
||||
"type": "pause",
|
||||
"context": True,
|
||||
}
|
||||
await self.pub_socket.send_multipart([b"button_pressed", json.dumps(data).encode()])
|
||||
|
||||
self.logger.info("Pausing robot actions.")
|
||||
await asyncio.sleep(15) # Simulate delay between messages
|
||||
|
||||
pause_command = {
|
||||
"endpoint": "pause",
|
||||
"data": False,
|
||||
}
|
||||
message = InternalMessage(
|
||||
to="ri_communication_agent",
|
||||
sender=self.name,
|
||||
body=json.dumps(pause_command),
|
||||
)
|
||||
await self.send(message)
|
||||
|
||||
# User interrupt message
|
||||
data = {
|
||||
"type": "pause",
|
||||
"context": False,
|
||||
}
|
||||
await self.pub_socket.send_multipart([b"button_pressed", json.dumps(data).encode()])
|
||||
|
||||
self.logger.info("Resuming robot actions.")
|
||||
await asyncio.sleep(15) # Simulate delay between messages
|
||||
@@ -7,6 +7,7 @@ import zmq.asyncio as azmq
|
||||
|
||||
from control_backend.agents import BaseAgent
|
||||
from control_backend.core.config import settings
|
||||
from control_backend.schemas.internal_message import InternalMessage
|
||||
|
||||
from ...schemas.program_status import PROGRAM_STATUS, ProgramStatus
|
||||
from .transcription_agent.transcription_agent import TranscriptionAgent
|
||||
@@ -86,6 +87,12 @@ class VADAgent(BaseAgent):
|
||||
self.audio_buffer = np.array([], dtype=np.float32)
|
||||
self.i_since_speech = settings.behaviour_settings.vad_initial_since_speech
|
||||
self._ready = asyncio.Event()
|
||||
|
||||
# Pause control
|
||||
self._reset_needed = False
|
||||
self._paused = asyncio.Event()
|
||||
self._paused.set() # Not paused at start
|
||||
|
||||
self.model = None
|
||||
|
||||
async def setup(self):
|
||||
@@ -213,6 +220,16 @@ class VADAgent(BaseAgent):
|
||||
"""
|
||||
await self._ready.wait()
|
||||
while self._running:
|
||||
await self._paused.wait()
|
||||
|
||||
# After being unpaused, reset stream and buffers
|
||||
if self._reset_needed:
|
||||
self.logger.debug("Resuming: resetting stream and buffers.")
|
||||
await self._reset_stream()
|
||||
self.audio_buffer = np.array([], dtype=np.float32)
|
||||
self.i_since_speech = settings.behaviour_settings.vad_initial_since_speech
|
||||
self._reset_needed = False
|
||||
|
||||
assert self.audio_in_poller is not None
|
||||
data = await self.audio_in_poller.poll()
|
||||
if data is None:
|
||||
@@ -254,3 +271,27 @@ class VADAgent(BaseAgent):
|
||||
# At this point, we know that the speech has ended.
|
||||
# Prepend the last chunk that had no speech, for a more fluent boundary
|
||||
self.audio_buffer = chunk
|
||||
|
||||
async def handle_message(self, msg: InternalMessage):
|
||||
"""
|
||||
Handle incoming messages.
|
||||
|
||||
Expects messages to pause or resume the VAD processing from User Interrupt Agent.
|
||||
|
||||
:param msg: The received internal message.
|
||||
"""
|
||||
sender = msg.sender
|
||||
|
||||
if sender == settings.agent_settings.user_interrupt_name:
|
||||
if msg.body == "PAUSE":
|
||||
self.logger.info("Pausing VAD processing.")
|
||||
self._paused.clear()
|
||||
# If the robot needs to pick up speaking where it left off, do not set _reset_needed
|
||||
self._reset_needed = True
|
||||
elif msg.body == "RESUME":
|
||||
self.logger.info("Resuming VAD processing.")
|
||||
self._paused.set()
|
||||
else:
|
||||
self.logger.warning(f"Unknown command from User Interrupt Agent: {msg.body}")
|
||||
else:
|
||||
self.logger.debug(f"Ignoring message from unknown sender: {sender}")
|
||||
@@ -0,0 +1,189 @@
|
||||
import json
|
||||
|
||||
import zmq
|
||||
from zmq.asyncio import Context
|
||||
|
||||
from control_backend.agents import BaseAgent
|
||||
from control_backend.core.agent_system import InternalMessage
|
||||
from control_backend.core.config import settings
|
||||
from control_backend.schemas.ri_message import (
|
||||
GestureCommand,
|
||||
PauseCommand,
|
||||
RIEndpoint,
|
||||
SpeechCommand,
|
||||
)
|
||||
|
||||
|
||||
class UserInterruptAgent(BaseAgent):
|
||||
"""
|
||||
User Interrupt Agent.
|
||||
|
||||
This agent receives button_pressed events from the external HTTP API
|
||||
(via ZMQ) and uses the associated context to trigger one of the following actions:
|
||||
|
||||
- Send a prioritized message to the `RobotSpeechAgent`
|
||||
- Send a prioritized gesture to the `RobotGestureAgent`
|
||||
- Send a belief override to the `BDIProgramManager`in order to activate a
|
||||
trigger/conditional norm or complete a goal.
|
||||
|
||||
Prioritized actions clear the current RI queue before inserting the new item,
|
||||
ensuring they are executed immediately after Pepper's current action has been fulfilled.
|
||||
|
||||
:ivar sub_socket: The ZMQ SUB socket used to receive user intterupts.
|
||||
"""
|
||||
|
||||
def __init__(self, **kwargs):
|
||||
super().__init__(**kwargs)
|
||||
self.sub_socket = None
|
||||
|
||||
async def _receive_button_event(self):
|
||||
"""
|
||||
The behaviour of the UserInterruptAgent.
|
||||
Continuous loop that receives button_pressed events from the button_pressed HTTP endpoint.
|
||||
These events contain a type and a context.
|
||||
|
||||
These are the different types and contexts:
|
||||
- type: "speech", context: string that the robot has to say.
|
||||
- type: "gesture", context: single gesture name that the robot has to perform.
|
||||
- type: "override", context: belief_id that overrides the goal/trigger/conditional norm.
|
||||
"""
|
||||
while True:
|
||||
topic, body = await self.sub_socket.recv_multipart()
|
||||
|
||||
try:
|
||||
event_data = json.loads(body)
|
||||
event_type = event_data.get("type") # e.g., "speech", "gesture"
|
||||
event_context = event_data.get("context") # e.g., "Hello, I am Pepper!"
|
||||
except json.JSONDecodeError:
|
||||
self.logger.error("Received invalid JSON payload on topic %s", topic)
|
||||
continue
|
||||
|
||||
if event_type == "speech":
|
||||
await self._send_to_speech_agent(event_context)
|
||||
self.logger.info(
|
||||
"Forwarded button press (speech) with context '%s' to RobotSpeechAgent.",
|
||||
event_context,
|
||||
)
|
||||
elif event_type == "gesture":
|
||||
await self._send_to_gesture_agent(event_context)
|
||||
self.logger.info(
|
||||
"Forwarded button press (gesture) with context '%s' to RobotGestureAgent.",
|
||||
event_context,
|
||||
)
|
||||
elif event_type == "override":
|
||||
await self._send_to_program_manager(event_context)
|
||||
self.logger.info(
|
||||
"Forwarded button press (override) with context '%s' to BDIProgramManager.",
|
||||
event_context,
|
||||
)
|
||||
elif event_type == "pause":
|
||||
await self._send_pause_command(event_context)
|
||||
if event_context:
|
||||
self.logger.info("Sent pause command.")
|
||||
else:
|
||||
self.logger.info("Sent resume command.")
|
||||
else:
|
||||
self.logger.warning(
|
||||
"Received button press with unknown type '%s' (context: '%s').",
|
||||
event_type,
|
||||
event_context,
|
||||
)
|
||||
|
||||
async def _send_to_speech_agent(self, text_to_say: str):
|
||||
"""
|
||||
method to send prioritized speech command to RobotSpeechAgent.
|
||||
|
||||
:param text_to_say: The string that the robot has to say.
|
||||
"""
|
||||
cmd = SpeechCommand(data=text_to_say, is_priority=True)
|
||||
out_msg = InternalMessage(
|
||||
to=settings.agent_settings.robot_speech_name,
|
||||
sender=self.name,
|
||||
body=cmd.model_dump_json(),
|
||||
)
|
||||
await self.send(out_msg)
|
||||
|
||||
async def _send_to_gesture_agent(self, single_gesture_name: str):
|
||||
"""
|
||||
method to send prioritized gesture command to RobotGestureAgent.
|
||||
|
||||
:param single_gesture_name: The gesture tag that the robot has to perform.
|
||||
"""
|
||||
# the endpoint is set to always be GESTURE_SINGLE for user interrupts
|
||||
cmd = GestureCommand(
|
||||
endpoint=RIEndpoint.GESTURE_SINGLE, data=single_gesture_name, is_priority=True
|
||||
)
|
||||
out_msg = InternalMessage(
|
||||
to=settings.agent_settings.robot_gesture_name,
|
||||
sender=self.name,
|
||||
body=cmd.model_dump_json(),
|
||||
)
|
||||
await self.send(out_msg)
|
||||
|
||||
async def _send_to_program_manager(self, belief_id: str):
|
||||
"""
|
||||
Send a button_override belief to the BDIProgramManager.
|
||||
|
||||
:param belief_id: The belief_id that overrides the goal/trigger/conditional norm.
|
||||
this id can belong to a basic belief or an inferred belief.
|
||||
See also: https://utrechtuniversity.youtrack.cloud/articles/N25B-A-27/UI-components
|
||||
"""
|
||||
data = {"belief": belief_id}
|
||||
message = InternalMessage(
|
||||
to=settings.agent_settings.bdi_program_manager_name,
|
||||
sender=self.name,
|
||||
body=json.dumps(data),
|
||||
thread="belief_override_id",
|
||||
)
|
||||
await self.send(message)
|
||||
self.logger.info(
|
||||
"Sent button_override belief with id '%s' to Program manager.",
|
||||
belief_id,
|
||||
)
|
||||
|
||||
async def _send_pause_command(self, pause : bool):
|
||||
"""
|
||||
Send a pause command to the Robot Interface via the RI Communication Agent.
|
||||
Send a pause command to the other internal agents; for now just VAD agent.
|
||||
"""
|
||||
cmd = PauseCommand(data=pause)
|
||||
message = InternalMessage(
|
||||
to=settings.agent_settings.ri_communication_name,
|
||||
sender=self.name,
|
||||
body=cmd.model_dump_json(),
|
||||
)
|
||||
await self.send(message)
|
||||
|
||||
if pause:
|
||||
# Send pause to VAD agent
|
||||
vad_message = InternalMessage(
|
||||
to=settings.agent_settings.vad_name,
|
||||
sender=self.name,
|
||||
body="PAUSE",
|
||||
)
|
||||
await self.send(vad_message)
|
||||
self.logger.info("Sent pause command to VAD Agent and RI Communication Agent.")
|
||||
else:
|
||||
# Send resume to VAD agent
|
||||
vad_message = InternalMessage(
|
||||
to=settings.agent_settings.vad_name,
|
||||
sender=self.name,
|
||||
body="RESUME",
|
||||
)
|
||||
await self.send(vad_message)
|
||||
self.logger.info("Sent resume command to VAD Agent and RI Communication Agent.")
|
||||
|
||||
async def setup(self):
|
||||
"""
|
||||
Initialize the agent.
|
||||
|
||||
Connects the internal ZMQ SUB socket and subscribes to the 'button_pressed' topic.
|
||||
Starts the background behavior to receive the user interrupts.
|
||||
"""
|
||||
context = Context.instance()
|
||||
|
||||
self.sub_socket = context.socket(zmq.SUB)
|
||||
self.sub_socket.connect(settings.zmq_settings.internal_sub_address)
|
||||
self.sub_socket.subscribe("button_pressed")
|
||||
|
||||
self.add_behavior(self._receive_button_event())
|
||||
31
src/control_backend/api/v1/endpoints/button_pressed.py
Normal file
31
src/control_backend/api/v1/endpoints/button_pressed.py
Normal file
@@ -0,0 +1,31 @@
|
||||
import logging
|
||||
|
||||
from fastapi import APIRouter, Request
|
||||
|
||||
from control_backend.schemas.events import ButtonPressedEvent
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
router = APIRouter()
|
||||
|
||||
|
||||
@router.post("/button_pressed", status_code=202)
|
||||
async def receive_button_event(event: ButtonPressedEvent, request: Request):
|
||||
"""
|
||||
Endpoint to handle external button press events.
|
||||
|
||||
Validates the event payload and publishes it to the internal 'button_pressed' topic.
|
||||
Subscribers (in this case user_interrupt_agent) will pick this up to trigger
|
||||
specific behaviors or state changes.
|
||||
|
||||
:param event: The parsed ButtonPressedEvent object.
|
||||
:param request: The FastAPI request object.
|
||||
"""
|
||||
logger.debug("Received button event: %s | %s", event.type, event.context)
|
||||
|
||||
topic = b"button_pressed"
|
||||
body = event.model_dump_json().encode()
|
||||
|
||||
pub_socket = request.app.state.endpoints_pub_socket
|
||||
await pub_socket.send_multipart([topic, body])
|
||||
|
||||
return {"status": "Event received"}
|
||||
@@ -1,6 +1,6 @@
|
||||
from fastapi.routing import APIRouter
|
||||
|
||||
from control_backend.api.v1.endpoints import logs, message, program, robot, sse
|
||||
from control_backend.api.v1.endpoints import button_pressed, logs, message, program, robot, sse
|
||||
|
||||
api_router = APIRouter()
|
||||
|
||||
@@ -13,3 +13,5 @@ api_router.include_router(robot.router, prefix="/robot", tags=["Pings", "Command
|
||||
api_router.include_router(logs.router, tags=["Logs"])
|
||||
|
||||
api_router.include_router(program.router, tags=["Program"])
|
||||
|
||||
api_router.include_router(button_pressed.router, tags=["Button Pressed Events"])
|
||||
|
||||
@@ -48,6 +48,7 @@ class AgentSettings(BaseModel):
|
||||
ri_communication_name: str = "ri_communication_agent"
|
||||
robot_speech_name: str = "robot_speech_agent"
|
||||
robot_gesture_name: str = "robot_gesture_agent"
|
||||
user_interrupt_name: str = "user_interrupt_agent"
|
||||
|
||||
|
||||
class BehaviourSettings(BaseModel):
|
||||
@@ -64,6 +65,7 @@ class BehaviourSettings(BaseModel):
|
||||
:ivar transcription_words_per_minute: Estimated words per minute for transcription timing.
|
||||
:ivar transcription_words_per_token: Estimated words per token for transcription timing.
|
||||
:ivar transcription_token_buffer: Buffer for transcription tokens.
|
||||
:ivar conversation_history_length_limit: The maximum amount of messages to extract beliefs from.
|
||||
"""
|
||||
|
||||
sleep_s: float = 1.0
|
||||
@@ -81,6 +83,9 @@ class BehaviourSettings(BaseModel):
|
||||
transcription_words_per_token: float = 0.75 # (3 words = 4 tokens)
|
||||
transcription_token_buffer: int = 10
|
||||
|
||||
# Text belief extractor settings
|
||||
conversation_history_length_limit: int = 10
|
||||
|
||||
|
||||
class LLMSettings(BaseModel):
|
||||
"""
|
||||
@@ -88,10 +93,17 @@ class LLMSettings(BaseModel):
|
||||
|
||||
:ivar local_llm_url: URL for the local LLM API.
|
||||
:ivar local_llm_model: Name of the local LLM model to use.
|
||||
:ivar chat_temperature: The temperature to use while generating chat responses.
|
||||
:ivar code_temperature: The temperature to use while generating code-like responses like during
|
||||
belief inference.
|
||||
:ivar n_parallel: The number of parallel calls allowed to be made to the LLM.
|
||||
"""
|
||||
|
||||
local_llm_url: str = "http://localhost:1234/v1/chat/completions"
|
||||
local_llm_model: str = "gpt-oss"
|
||||
chat_temperature: float = 1.0
|
||||
code_temperature: float = 0.3
|
||||
n_parallel: int = 4
|
||||
|
||||
|
||||
class VADSettings(BaseModel):
|
||||
|
||||
@@ -40,6 +40,10 @@ from control_backend.agents.communication import RICommunicationAgent
|
||||
from control_backend.agents.llm import LLMAgent
|
||||
|
||||
# Other backend imports
|
||||
from control_backend.agents.mock_agents.test_pause_ri import TestPauseAgent
|
||||
|
||||
# User Interrupt Agent
|
||||
from control_backend.agents.user_interrupt.user_interrupt_agent import UserInterruptAgent
|
||||
from control_backend.api.v1.router import api_router
|
||||
from control_backend.core.config import settings
|
||||
from control_backend.logging import setup_logging
|
||||
@@ -138,6 +142,18 @@ async def lifespan(app: FastAPI):
|
||||
"name": settings.agent_settings.bdi_program_manager_name,
|
||||
},
|
||||
),
|
||||
"TestPauseAgent": (
|
||||
TestPauseAgent,
|
||||
{
|
||||
"name": "pause_test_agent",
|
||||
},
|
||||
),
|
||||
"UserInterruptAgent": (
|
||||
UserInterruptAgent,
|
||||
{
|
||||
"name": settings.agent_settings.user_interrupt_name,
|
||||
},
|
||||
),
|
||||
}
|
||||
|
||||
agents = []
|
||||
|
||||
@@ -6,18 +6,27 @@ class Belief(BaseModel):
|
||||
Represents a single belief in the BDI system.
|
||||
|
||||
:ivar name: The functor or name of the belief (e.g., 'user_said').
|
||||
:ivar arguments: A list of string arguments for the belief.
|
||||
:ivar replace: If True, existing beliefs with this name should be replaced by this one.
|
||||
:ivar arguments: A list of string arguments for the belief, or None if the belief has no
|
||||
arguments.
|
||||
"""
|
||||
|
||||
name: str
|
||||
arguments: list[str]
|
||||
replace: bool = False
|
||||
arguments: list[str] | None
|
||||
|
||||
|
||||
class BeliefMessage(BaseModel):
|
||||
"""
|
||||
A container for transporting a list of beliefs between agents.
|
||||
A container for communicating beliefs between agents.
|
||||
|
||||
:ivar create: Beliefs to create.
|
||||
:ivar delete: Beliefs to delete.
|
||||
:ivar replace: Beliefs to replace. Deletes all beliefs with the same name, replacing them with
|
||||
one new belief.
|
||||
"""
|
||||
|
||||
beliefs: list[Belief]
|
||||
create: list[Belief] = []
|
||||
delete: list[Belief] = []
|
||||
replace: list[Belief] = []
|
||||
|
||||
def has_values(self) -> bool:
|
||||
return len(self.create) > 0 or len(self.delete) > 0 or len(self.replace) > 0
|
||||
|
||||
10
src/control_backend/schemas/chat_history.py
Normal file
10
src/control_backend/schemas/chat_history.py
Normal file
@@ -0,0 +1,10 @@
|
||||
from pydantic import BaseModel
|
||||
|
||||
|
||||
class ChatMessage(BaseModel):
|
||||
role: str
|
||||
content: str
|
||||
|
||||
|
||||
class ChatHistory(BaseModel):
|
||||
messages: list[ChatMessage]
|
||||
6
src/control_backend/schemas/events.py
Normal file
6
src/control_backend/schemas/events.py
Normal file
@@ -0,0 +1,6 @@
|
||||
from pydantic import BaseModel
|
||||
|
||||
|
||||
class ButtonPressedEvent(BaseModel):
|
||||
type: str
|
||||
context: str
|
||||
@@ -194,7 +194,7 @@ class Phase(ProgramElement):
|
||||
"""
|
||||
|
||||
name: str = ""
|
||||
norms: list[Norm]
|
||||
norms: list[BasicNorm | ConditionalNorm]
|
||||
goals: list[Goal]
|
||||
triggers: list[Trigger]
|
||||
|
||||
|
||||
@@ -14,6 +14,7 @@ class RIEndpoint(str, Enum):
|
||||
GESTURE_TAG = "actuate/gesture/tag"
|
||||
PING = "ping"
|
||||
NEGOTIATE_PORTS = "negotiate/ports"
|
||||
PAUSE = "pause"
|
||||
|
||||
|
||||
class RIMessage(BaseModel):
|
||||
@@ -38,6 +39,7 @@ class SpeechCommand(RIMessage):
|
||||
|
||||
endpoint: RIEndpoint = RIEndpoint(RIEndpoint.SPEECH)
|
||||
data: str
|
||||
is_priority: bool = False
|
||||
|
||||
|
||||
class GestureCommand(RIMessage):
|
||||
@@ -52,6 +54,7 @@ class GestureCommand(RIMessage):
|
||||
RIEndpoint.GESTURE_SINGLE, RIEndpoint.GESTURE_TAG
|
||||
]
|
||||
data: str
|
||||
is_priority: bool = False
|
||||
|
||||
@model_validator(mode="after")
|
||||
def check_endpoint(self):
|
||||
@@ -62,3 +65,14 @@ class GestureCommand(RIMessage):
|
||||
if self.endpoint not in allowed:
|
||||
raise ValueError("endpoint must be GESTURE_SINGLE or GESTURE_TAG")
|
||||
return self
|
||||
|
||||
class PauseCommand(RIMessage):
|
||||
"""
|
||||
A specific command to pause or unpause the robot's actions.
|
||||
|
||||
:ivar endpoint: Fixed to ``RIEndpoint.PAUSE``.
|
||||
:ivar data: A boolean indicating whether to pause (True) or unpause (False).
|
||||
"""
|
||||
|
||||
endpoint: RIEndpoint = RIEndpoint(RIEndpoint.PAUSE)
|
||||
data: bool
|
||||
@@ -64,7 +64,7 @@ async def test_handle_message_sends_command():
|
||||
agent = mock_speech_agent()
|
||||
agent.pubsocket = pubsocket
|
||||
|
||||
payload = {"endpoint": "actuate/speech", "data": "hello"}
|
||||
payload = {"endpoint": "actuate/speech", "data": "hello", "is_priority": False}
|
||||
msg = InternalMessage(to="robot", sender="tester", body=json.dumps(payload))
|
||||
|
||||
await agent.handle_message(msg)
|
||||
@@ -75,7 +75,7 @@ async def test_handle_message_sends_command():
|
||||
@pytest.mark.asyncio
|
||||
async def test_zmq_command_loop_valid_payload(zmq_context):
|
||||
"""UI command is read from SUB and published."""
|
||||
command = {"endpoint": "actuate/speech", "data": "hello"}
|
||||
command = {"endpoint": "actuate/speech", "data": "hello", "is_priority": False}
|
||||
fake_socket = AsyncMock()
|
||||
|
||||
async def recv_once():
|
||||
|
||||
@@ -51,7 +51,7 @@ async def test_handle_belief_collector_message(agent, mock_settings):
|
||||
msg = InternalMessage(
|
||||
to="bdi_agent",
|
||||
sender=mock_settings.agent_settings.bdi_belief_collector_name,
|
||||
body=BeliefMessage(beliefs=beliefs).model_dump_json(),
|
||||
body=BeliefMessage(create=beliefs).model_dump_json(),
|
||||
thread="beliefs",
|
||||
)
|
||||
|
||||
@@ -64,6 +64,26 @@ async def test_handle_belief_collector_message(agent, mock_settings):
|
||||
assert args[2] == agentspeak.Literal("user_said", (agentspeak.Literal("Hello"),))
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_handle_delete_belief_message(agent, mock_settings):
|
||||
"""Test that incoming beliefs to be deleted are removed from the BDI agent"""
|
||||
beliefs = [Belief(name="user_said", arguments=["Hello"])]
|
||||
|
||||
msg = InternalMessage(
|
||||
to="bdi_agent",
|
||||
sender=mock_settings.agent_settings.bdi_belief_collector_name,
|
||||
body=BeliefMessage(delete=beliefs).model_dump_json(),
|
||||
thread="beliefs",
|
||||
)
|
||||
await agent.handle_message(msg)
|
||||
|
||||
# Expect bdi_agent.call to be triggered to remove belief
|
||||
args = agent.bdi_agent.call.call_args.args
|
||||
assert args[0] == agentspeak.Trigger.removal
|
||||
assert args[1] == agentspeak.GoalType.belief
|
||||
assert args[2] == agentspeak.Literal("user_said", (agentspeak.Literal("Hello"),))
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_incorrect_belief_collector_message(agent, mock_settings):
|
||||
"""Test that incorrect message format triggers an exception."""
|
||||
@@ -128,7 +148,8 @@ def test_add_belief_sets_event(agent):
|
||||
agent._wake_bdi_loop = MagicMock()
|
||||
|
||||
belief = Belief(name="test_belief", arguments=["a", "b"])
|
||||
agent._apply_beliefs([belief])
|
||||
belief_changes = BeliefMessage(replace=[belief])
|
||||
agent._apply_belief_changes(belief_changes)
|
||||
|
||||
assert agent.bdi_agent.call.called
|
||||
agent._wake_bdi_loop.set.assert_called()
|
||||
@@ -137,7 +158,7 @@ def test_add_belief_sets_event(agent):
|
||||
def test_apply_beliefs_empty_returns(agent):
|
||||
"""Line: if not beliefs: return"""
|
||||
agent._wake_bdi_loop = MagicMock()
|
||||
agent._apply_beliefs([])
|
||||
agent._apply_belief_changes(BeliefMessage())
|
||||
agent.bdi_agent.call.assert_not_called()
|
||||
agent._wake_bdi_loop.set.assert_not_called()
|
||||
|
||||
@@ -220,8 +241,9 @@ def test_replace_belief_calls_remove_all(agent):
|
||||
agent._remove_all_with_name = MagicMock()
|
||||
agent._wake_bdi_loop = MagicMock()
|
||||
|
||||
belief = Belief(name="user_said", arguments=["Hello"], replace=True)
|
||||
agent._apply_beliefs([belief])
|
||||
belief = Belief(name="user_said", arguments=["Hello"])
|
||||
belief_changes = BeliefMessage(replace=[belief])
|
||||
agent._apply_belief_changes(belief_changes)
|
||||
|
||||
agent._remove_all_with_name.assert_called_with("user_said")
|
||||
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
import asyncio
|
||||
import json
|
||||
import sys
|
||||
import uuid
|
||||
from unittest.mock import AsyncMock
|
||||
|
||||
import pytest
|
||||
@@ -8,31 +8,45 @@ import pytest
|
||||
from control_backend.agents.bdi.bdi_program_manager import BDIProgramManager
|
||||
from control_backend.core.agent_system import InternalMessage
|
||||
from control_backend.schemas.belief_message import BeliefMessage
|
||||
from control_backend.schemas.program import Program
|
||||
from control_backend.schemas.program import BasicNorm, Goal, Phase, Plan, Program
|
||||
|
||||
# Fix Windows Proactor loop for zmq
|
||||
if sys.platform.startswith("win"):
|
||||
asyncio.set_event_loop_policy(asyncio.WindowsSelectorEventLoopPolicy())
|
||||
|
||||
|
||||
def make_valid_program_json(norm="N1", goal="G1"):
|
||||
return json.dumps(
|
||||
{
|
||||
"phases": [
|
||||
{
|
||||
"id": "phase1",
|
||||
"label": "Phase 1",
|
||||
"triggers": [],
|
||||
"norms": [{"id": "n1", "label": "Norm 1", "norm": norm}],
|
||||
"goals": [
|
||||
{"id": "g1", "label": "Goal 1", "description": goal, "achieved": False}
|
||||
],
|
||||
}
|
||||
]
|
||||
}
|
||||
)
|
||||
def make_valid_program_json(norm="N1", goal="G1") -> str:
|
||||
return Program(
|
||||
phases=[
|
||||
Phase(
|
||||
id=uuid.uuid4(),
|
||||
name="Basic Phase",
|
||||
norms=[
|
||||
BasicNorm(
|
||||
id=uuid.uuid4(),
|
||||
name=norm,
|
||||
norm=norm,
|
||||
),
|
||||
],
|
||||
goals=[
|
||||
Goal(
|
||||
id=uuid.uuid4(),
|
||||
name=goal,
|
||||
plan=Plan(
|
||||
id=uuid.uuid4(),
|
||||
name="Goal Plan",
|
||||
steps=[],
|
||||
),
|
||||
can_fail=False,
|
||||
),
|
||||
],
|
||||
triggers=[],
|
||||
),
|
||||
],
|
||||
).model_dump_json()
|
||||
|
||||
|
||||
@pytest.mark.skip(reason="Functionality being rebuilt.")
|
||||
@pytest.mark.asyncio
|
||||
async def test_send_to_bdi():
|
||||
manager = BDIProgramManager(name="program_manager_test")
|
||||
@@ -73,5 +87,5 @@ async def test_receive_programs_valid_and_invalid():
|
||||
# Only valid Program should have triggered _send_to_bdi
|
||||
assert manager._send_to_bdi.await_count == 1
|
||||
forwarded: Program = manager._send_to_bdi.await_args[0][0]
|
||||
assert forwarded.phases[0].norms[0].norm == "N1"
|
||||
assert forwarded.phases[0].goals[0].description == "G1"
|
||||
assert forwarded.phases[0].norms[0].name == "N1"
|
||||
assert forwarded.phases[0].goals[0].name == "G1"
|
||||
|
||||
@@ -86,7 +86,7 @@ async def test_send_beliefs_to_bdi(agent):
|
||||
sent: InternalMessage = agent.send.call_args.args[0]
|
||||
assert sent.to == settings.agent_settings.bdi_core_name
|
||||
assert sent.thread == "beliefs"
|
||||
assert json.loads(sent.body)["beliefs"] == [belief.model_dump() for belief in beliefs]
|
||||
assert json.loads(sent.body)["create"] == [belief.model_dump() for belief in beliefs]
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
|
||||
346
test/unit/agents/bdi/test_text_belief_extractor.py
Normal file
346
test/unit/agents/bdi/test_text_belief_extractor.py
Normal file
@@ -0,0 +1,346 @@
|
||||
import json
|
||||
import uuid
|
||||
from unittest.mock import AsyncMock, MagicMock, patch
|
||||
|
||||
import httpx
|
||||
import pytest
|
||||
|
||||
from control_backend.agents.bdi import TextBeliefExtractorAgent
|
||||
from control_backend.core.agent_system import InternalMessage
|
||||
from control_backend.core.config import settings
|
||||
from control_backend.schemas.belief_message import BeliefMessage
|
||||
from control_backend.schemas.program import (
|
||||
ConditionalNorm,
|
||||
LLMAction,
|
||||
Phase,
|
||||
Plan,
|
||||
Program,
|
||||
SemanticBelief,
|
||||
Trigger,
|
||||
)
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def agent():
|
||||
agent = TextBeliefExtractorAgent("text_belief_agent")
|
||||
agent.send = AsyncMock()
|
||||
agent._query_llm = AsyncMock()
|
||||
return agent
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def sample_program():
|
||||
return Program(
|
||||
phases=[
|
||||
Phase(
|
||||
name="Some phase",
|
||||
id=uuid.uuid4(),
|
||||
norms=[
|
||||
ConditionalNorm(
|
||||
name="Some norm",
|
||||
id=uuid.uuid4(),
|
||||
norm="Use nautical terms.",
|
||||
critical=False,
|
||||
condition=SemanticBelief(
|
||||
name="is_pirate",
|
||||
id=uuid.uuid4(),
|
||||
description="The user is a pirate. Perhaps because they say "
|
||||
"they are, or because they speak like a pirate "
|
||||
'with terms like "arr".',
|
||||
),
|
||||
),
|
||||
],
|
||||
goals=[],
|
||||
triggers=[
|
||||
Trigger(
|
||||
name="Some trigger",
|
||||
id=uuid.uuid4(),
|
||||
condition=SemanticBelief(
|
||||
name="no_more_booze",
|
||||
id=uuid.uuid4(),
|
||||
description="There is no more alcohol.",
|
||||
),
|
||||
plan=Plan(
|
||||
name="Some plan",
|
||||
id=uuid.uuid4(),
|
||||
steps=[
|
||||
LLMAction(
|
||||
name="Some action",
|
||||
id=uuid.uuid4(),
|
||||
goal="Suggest eating chocolate instead.",
|
||||
),
|
||||
],
|
||||
),
|
||||
),
|
||||
],
|
||||
),
|
||||
],
|
||||
)
|
||||
|
||||
|
||||
def make_msg(sender: str, body: str, thread: str | None = None) -> InternalMessage:
|
||||
return InternalMessage(to="unused", sender=sender, body=body, thread=thread)
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_handle_message_ignores_other_agents(agent):
|
||||
msg = make_msg("unknown", "some data", None)
|
||||
|
||||
await agent.handle_message(msg)
|
||||
|
||||
agent.send.assert_not_called() # noqa # `agent.send` has no such property, but we mock it.
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_handle_message_from_transcriber(agent, mock_settings):
|
||||
transcription = "hello world"
|
||||
msg = make_msg(mock_settings.agent_settings.transcription_name, transcription, None)
|
||||
|
||||
await agent.handle_message(msg)
|
||||
|
||||
agent.send.assert_awaited_once() # noqa # `agent.send` has no such property, but we mock it.
|
||||
sent: InternalMessage = agent.send.call_args.args[0] # noqa
|
||||
assert sent.to == mock_settings.agent_settings.bdi_belief_collector_name
|
||||
assert sent.thread == "beliefs"
|
||||
parsed = json.loads(sent.body)
|
||||
assert parsed == {"beliefs": {"user_said": [transcription]}, "type": "belief_extraction_text"}
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_process_user_said(agent, mock_settings):
|
||||
transcription = "this is a test"
|
||||
|
||||
await agent._user_said(transcription)
|
||||
|
||||
agent.send.assert_awaited_once() # noqa # `agent.send` has no such property, but we mock it.
|
||||
sent: InternalMessage = agent.send.call_args.args[0] # noqa
|
||||
assert sent.to == mock_settings.agent_settings.bdi_belief_collector_name
|
||||
assert sent.thread == "beliefs"
|
||||
parsed = json.loads(sent.body)
|
||||
assert parsed["beliefs"]["user_said"] == [transcription]
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_query_llm():
|
||||
mock_response = MagicMock()
|
||||
mock_response.json.return_value = {
|
||||
"choices": [
|
||||
{
|
||||
"message": {
|
||||
"content": "null",
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
mock_client = AsyncMock()
|
||||
mock_client.post.return_value = mock_response
|
||||
mock_async_client = MagicMock()
|
||||
mock_async_client.__aenter__.return_value = mock_client
|
||||
mock_async_client.__aexit__.return_value = None
|
||||
|
||||
with patch(
|
||||
"control_backend.agents.bdi.text_belief_extractor_agent.httpx.AsyncClient",
|
||||
return_value=mock_async_client,
|
||||
):
|
||||
agent = TextBeliefExtractorAgent("text_belief_agent")
|
||||
|
||||
res = await agent._query_llm("hello world", {"type": "null"})
|
||||
# Response content was set as "null", so should be deserialized as None
|
||||
assert res is None
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_retry_query_llm_success(agent):
|
||||
agent._query_llm.return_value = None
|
||||
res = await agent._retry_query_llm("hello world", {"type": "null"})
|
||||
|
||||
agent._query_llm.assert_called_once()
|
||||
assert res is None
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_retry_query_llm_success_after_failure(agent):
|
||||
agent._query_llm.side_effect = [KeyError(), "real value"]
|
||||
res = await agent._retry_query_llm("hello world", {"type": "string"})
|
||||
|
||||
assert agent._query_llm.call_count == 2
|
||||
assert res == "real value"
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_retry_query_llm_failures(agent):
|
||||
agent._query_llm.side_effect = [KeyError(), KeyError(), KeyError(), "real value"]
|
||||
res = await agent._retry_query_llm("hello world", {"type": "string"})
|
||||
|
||||
assert agent._query_llm.call_count == 3
|
||||
assert res is None
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_retry_query_llm_fail_immediately(agent):
|
||||
agent._query_llm.side_effect = [KeyError(), "real value"]
|
||||
res = await agent._retry_query_llm("hello world", {"type": "string"}, tries=1)
|
||||
|
||||
assert agent._query_llm.call_count == 1
|
||||
assert res is None
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_extracting_beliefs_from_program(agent, sample_program):
|
||||
assert len(agent.available_beliefs) == 0
|
||||
await agent.handle_message(
|
||||
InternalMessage(
|
||||
to=settings.agent_settings.text_belief_extractor_name,
|
||||
sender=settings.agent_settings.bdi_program_manager_name,
|
||||
body=sample_program.model_dump_json(),
|
||||
),
|
||||
)
|
||||
assert len(agent.available_beliefs) == 2
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_handle_invalid_program(agent, sample_program):
|
||||
agent.available_beliefs.append(sample_program.phases[0].norms[0].condition)
|
||||
agent.available_beliefs.append(sample_program.phases[0].triggers[0].condition)
|
||||
assert len(agent.available_beliefs) == 2
|
||||
|
||||
await agent.handle_message(
|
||||
InternalMessage(
|
||||
to=settings.agent_settings.text_belief_extractor_name,
|
||||
sender=settings.agent_settings.bdi_program_manager_name,
|
||||
body=json.dumps({"phases": "Invalid"}),
|
||||
),
|
||||
)
|
||||
|
||||
assert len(agent.available_beliefs) == 2
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_handle_robot_response(agent):
|
||||
initial_length = len(agent.conversation.messages)
|
||||
response = "Hi, I'm Pepper. What's your name?"
|
||||
|
||||
await agent.handle_message(
|
||||
InternalMessage(
|
||||
to=settings.agent_settings.text_belief_extractor_name,
|
||||
sender=settings.agent_settings.llm_name,
|
||||
body=response,
|
||||
),
|
||||
)
|
||||
|
||||
assert len(agent.conversation.messages) == initial_length + 1
|
||||
assert agent.conversation.messages[-1].role == "assistant"
|
||||
assert agent.conversation.messages[-1].content == response
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_simulated_real_turn_with_beliefs(agent, sample_program):
|
||||
"""Test sending user message to extract beliefs from."""
|
||||
agent.available_beliefs.append(sample_program.phases[0].norms[0].condition)
|
||||
agent.available_beliefs.append(sample_program.phases[0].triggers[0].condition)
|
||||
|
||||
# Send a user message with the belief that there's no more booze
|
||||
agent._query_llm.return_value = {"is_pirate": None, "no_more_booze": True}
|
||||
assert len(agent.conversation.messages) == 0
|
||||
await agent.handle_message(
|
||||
InternalMessage(
|
||||
to=settings.agent_settings.text_belief_extractor_name,
|
||||
sender=settings.agent_settings.transcription_name,
|
||||
body="We're all out of schnaps.",
|
||||
),
|
||||
)
|
||||
assert len(agent.conversation.messages) == 1
|
||||
|
||||
# There should be a belief set and sent to the BDI core, as well as the user_said belief
|
||||
assert agent.send.call_count == 2
|
||||
|
||||
# First should be the beliefs message
|
||||
message: InternalMessage = agent.send.call_args_list[0].args[0]
|
||||
beliefs = BeliefMessage.model_validate_json(message.body)
|
||||
assert len(beliefs.create) == 1
|
||||
assert beliefs.create[0].name == "no_more_booze"
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_simulated_real_turn_no_beliefs(agent, sample_program):
|
||||
"""Test a user message to extract beliefs from, but no beliefs are formed."""
|
||||
agent.available_beliefs.append(sample_program.phases[0].norms[0].condition)
|
||||
agent.available_beliefs.append(sample_program.phases[0].triggers[0].condition)
|
||||
|
||||
# Send a user message with no new beliefs
|
||||
agent._query_llm.return_value = {"is_pirate": None, "no_more_booze": None}
|
||||
await agent.handle_message(
|
||||
InternalMessage(
|
||||
to=settings.agent_settings.text_belief_extractor_name,
|
||||
sender=settings.agent_settings.transcription_name,
|
||||
body="Hello there!",
|
||||
),
|
||||
)
|
||||
|
||||
# Only the user_said belief should've been sent
|
||||
agent.send.assert_called_once()
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_simulated_real_turn_no_new_beliefs(agent, sample_program):
|
||||
"""
|
||||
Test a user message to extract beliefs from, but no new beliefs are formed because they already
|
||||
existed.
|
||||
"""
|
||||
agent.available_beliefs.append(sample_program.phases[0].norms[0].condition)
|
||||
agent.available_beliefs.append(sample_program.phases[0].triggers[0].condition)
|
||||
agent.beliefs["is_pirate"] = True
|
||||
|
||||
# Send a user message with the belief the user is a pirate, still
|
||||
agent._query_llm.return_value = {"is_pirate": True, "no_more_booze": None}
|
||||
await agent.handle_message(
|
||||
InternalMessage(
|
||||
to=settings.agent_settings.text_belief_extractor_name,
|
||||
sender=settings.agent_settings.transcription_name,
|
||||
body="Arr, nice to meet you, matey.",
|
||||
),
|
||||
)
|
||||
|
||||
# Only the user_said belief should've been sent, as no beliefs have changed
|
||||
agent.send.assert_called_once()
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_simulated_real_turn_remove_belief(agent, sample_program):
|
||||
"""
|
||||
Test a user message to extract beliefs from, but an existing belief is determined no longer to
|
||||
hold.
|
||||
"""
|
||||
agent.available_beliefs.append(sample_program.phases[0].norms[0].condition)
|
||||
agent.available_beliefs.append(sample_program.phases[0].triggers[0].condition)
|
||||
agent.beliefs["no_more_booze"] = True
|
||||
|
||||
# Send a user message with the belief the user is a pirate, still
|
||||
agent._query_llm.return_value = {"is_pirate": None, "no_more_booze": False}
|
||||
await agent.handle_message(
|
||||
InternalMessage(
|
||||
to=settings.agent_settings.text_belief_extractor_name,
|
||||
sender=settings.agent_settings.transcription_name,
|
||||
body="I found an untouched barrel of wine!",
|
||||
),
|
||||
)
|
||||
|
||||
# Both user_said and belief change should've been sent
|
||||
assert agent.send.call_count == 2
|
||||
|
||||
# Agent's current beliefs should've changed
|
||||
assert not agent.beliefs["no_more_booze"]
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_llm_failure_handling(agent, sample_program):
|
||||
"""
|
||||
Check that the agent handles failures gracefully without crashing.
|
||||
"""
|
||||
agent._query_llm.side_effect = httpx.HTTPError("")
|
||||
agent.available_beliefs.append(sample_program.phases[0].norms[0].condition)
|
||||
agent.available_beliefs.append(sample_program.phases[0].triggers[0].condition)
|
||||
|
||||
belief_changes = await agent._infer_turn()
|
||||
|
||||
assert len(belief_changes) == 0
|
||||
@@ -1,65 +0,0 @@
|
||||
import json
|
||||
from unittest.mock import AsyncMock
|
||||
|
||||
import pytest
|
||||
|
||||
from control_backend.agents.bdi import (
|
||||
TextBeliefExtractorAgent,
|
||||
)
|
||||
from control_backend.core.agent_system import InternalMessage
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def agent():
|
||||
agent = TextBeliefExtractorAgent("text_belief_agent")
|
||||
agent.send = AsyncMock()
|
||||
return agent
|
||||
|
||||
|
||||
def make_msg(sender: str, body: str, thread: str | None = None) -> InternalMessage:
|
||||
return InternalMessage(to="unused", sender=sender, body=body, thread=thread)
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_handle_message_ignores_other_agents(agent):
|
||||
msg = make_msg("unknown", "some data", None)
|
||||
|
||||
await agent.handle_message(msg)
|
||||
|
||||
agent.send.assert_not_called() # noqa # `agent.send` has no such property, but we mock it.
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_handle_message_from_transcriber(agent, mock_settings):
|
||||
transcription = "hello world"
|
||||
msg = make_msg(mock_settings.agent_settings.transcription_name, transcription, None)
|
||||
|
||||
await agent.handle_message(msg)
|
||||
|
||||
agent.send.assert_awaited_once() # noqa # `agent.send` has no such property, but we mock it.
|
||||
sent: InternalMessage = agent.send.call_args.args[0] # noqa
|
||||
assert sent.to == mock_settings.agent_settings.bdi_belief_collector_name
|
||||
assert sent.thread == "beliefs"
|
||||
parsed = json.loads(sent.body)
|
||||
assert parsed == {"beliefs": {"user_said": [transcription]}, "type": "belief_extraction_text"}
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_process_transcription_demo(agent, mock_settings):
|
||||
transcription = "this is a test"
|
||||
|
||||
await agent._process_transcription_demo(transcription)
|
||||
|
||||
agent.send.assert_awaited_once() # noqa # `agent.send` has no such property, but we mock it.
|
||||
sent: InternalMessage = agent.send.call_args.args[0] # noqa
|
||||
assert sent.to == mock_settings.agent_settings.bdi_belief_collector_name
|
||||
assert sent.thread == "beliefs"
|
||||
parsed = json.loads(sent.body)
|
||||
assert parsed["beliefs"]["user_said"] == [transcription]
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_setup_initializes_beliefs(agent):
|
||||
"""Covers the setup method and ensures beliefs are initialized."""
|
||||
await agent.setup()
|
||||
assert agent.beliefs == {"mood": ["X"], "car": ["Y"]}
|
||||
@@ -67,6 +67,7 @@ async def test_setup_success_connects_and_starts_robot(zmq_context):
|
||||
address="tcp://localhost:5556",
|
||||
bind=False,
|
||||
gesture_data=[],
|
||||
single_gesture_data=[],
|
||||
)
|
||||
agent.add_behavior.assert_called_once()
|
||||
|
||||
|
||||
@@ -66,7 +66,7 @@ async def test_llm_processing_success(mock_httpx_client, mock_settings):
|
||||
# "Hello world." constitutes one sentence/chunk based on punctuation split
|
||||
# The agent should call send once with the full sentence
|
||||
assert agent.send.called
|
||||
args = agent.send.call_args[0][0]
|
||||
args = agent.send.call_args_list[0][0][0]
|
||||
assert args.to == mock_settings.agent_settings.bdi_core_name
|
||||
assert "Hello world." in args.body
|
||||
|
||||
@@ -197,6 +197,9 @@ async def test_query_llm_yields_final_tail_chunk(mock_settings):
|
||||
agent = LLMAgent("llm_agent")
|
||||
agent.send = AsyncMock()
|
||||
|
||||
agent.logger = MagicMock()
|
||||
agent.logger.llm = MagicMock()
|
||||
|
||||
# Patch _stream_query_llm to yield tokens that do NOT end with punctuation
|
||||
async def fake_stream(messages):
|
||||
yield "Hello"
|
||||
|
||||
146
test/unit/agents/user_interrupt/test_user_interrupt.py
Normal file
146
test/unit/agents/user_interrupt/test_user_interrupt.py
Normal file
@@ -0,0 +1,146 @@
|
||||
import asyncio
|
||||
import json
|
||||
from unittest.mock import AsyncMock, MagicMock
|
||||
|
||||
import pytest
|
||||
|
||||
from control_backend.agents.user_interrupt.user_interrupt_agent import UserInterruptAgent
|
||||
from control_backend.core.agent_system import InternalMessage
|
||||
from control_backend.core.config import settings
|
||||
from control_backend.schemas.ri_message import RIEndpoint
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def agent():
|
||||
agent = UserInterruptAgent(name="user_interrupt_agent")
|
||||
agent.send = AsyncMock()
|
||||
agent.logger = MagicMock()
|
||||
agent.sub_socket = AsyncMock()
|
||||
return agent
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_send_to_speech_agent(agent):
|
||||
"""Verify speech command format."""
|
||||
await agent._send_to_speech_agent("Hello World")
|
||||
|
||||
agent.send.assert_awaited_once()
|
||||
sent_msg: InternalMessage = agent.send.call_args.args[0]
|
||||
|
||||
assert sent_msg.to == settings.agent_settings.robot_speech_name
|
||||
body = json.loads(sent_msg.body)
|
||||
assert body["data"] == "Hello World"
|
||||
assert body["is_priority"] is True
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_send_to_gesture_agent(agent):
|
||||
"""Verify gesture command format."""
|
||||
await agent._send_to_gesture_agent("wave_hand")
|
||||
|
||||
agent.send.assert_awaited_once()
|
||||
sent_msg: InternalMessage = agent.send.call_args.args[0]
|
||||
|
||||
assert sent_msg.to == settings.agent_settings.robot_gesture_name
|
||||
body = json.loads(sent_msg.body)
|
||||
assert body["data"] == "wave_hand"
|
||||
assert body["is_priority"] is True
|
||||
assert body["endpoint"] == RIEndpoint.GESTURE_SINGLE.value
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_send_to_program_manager(agent):
|
||||
"""Verify belief update format."""
|
||||
context_str = "2"
|
||||
|
||||
await agent._send_to_program_manager(context_str)
|
||||
|
||||
agent.send.assert_awaited_once()
|
||||
sent_msg: InternalMessage = agent.send.call_args.args[0]
|
||||
|
||||
assert sent_msg.to == settings.agent_settings.bdi_program_manager_name
|
||||
assert sent_msg.thread == "belief_override_id"
|
||||
|
||||
body = json.loads(sent_msg.body)
|
||||
|
||||
assert body["belief"] == context_str
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_receive_loop_routing_success(agent):
|
||||
"""
|
||||
Test that the loop correctly:
|
||||
1. Receives 'button_pressed' topic from ZMQ
|
||||
2. Parses the JSON payload to find 'type' and 'context'
|
||||
3. Calls the correct handler method based on 'type'
|
||||
"""
|
||||
# Prepare JSON payloads as bytes
|
||||
payload_speech = json.dumps({"type": "speech", "context": "Hello Speech"}).encode()
|
||||
payload_gesture = json.dumps({"type": "gesture", "context": "Hello Gesture"}).encode()
|
||||
payload_override = json.dumps({"type": "override", "context": "Hello Override"}).encode()
|
||||
|
||||
agent.sub_socket.recv_multipart.side_effect = [
|
||||
(b"button_pressed", payload_speech),
|
||||
(b"button_pressed", payload_gesture),
|
||||
(b"button_pressed", payload_override),
|
||||
asyncio.CancelledError, # Stop the infinite loop
|
||||
]
|
||||
|
||||
agent._send_to_speech_agent = AsyncMock()
|
||||
agent._send_to_gesture_agent = AsyncMock()
|
||||
agent._send_to_program_manager = AsyncMock()
|
||||
|
||||
try:
|
||||
await agent._receive_button_event()
|
||||
except asyncio.CancelledError:
|
||||
pass
|
||||
|
||||
await asyncio.sleep(0)
|
||||
|
||||
# Speech
|
||||
agent._send_to_speech_agent.assert_awaited_once_with("Hello Speech")
|
||||
|
||||
# Gesture
|
||||
agent._send_to_gesture_agent.assert_awaited_once_with("Hello Gesture")
|
||||
|
||||
# Override
|
||||
agent._send_to_program_manager.assert_awaited_once_with("Hello Override")
|
||||
|
||||
assert agent._send_to_speech_agent.await_count == 1
|
||||
assert agent._send_to_gesture_agent.await_count == 1
|
||||
assert agent._send_to_program_manager.await_count == 1
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_receive_loop_unknown_type(agent):
|
||||
"""Test that unknown 'type' values in the JSON log a warning and do not crash."""
|
||||
|
||||
# Prepare a payload with an unknown type
|
||||
payload_unknown = json.dumps({"type": "unknown_thing", "context": "some_data"}).encode()
|
||||
|
||||
agent.sub_socket.recv_multipart.side_effect = [
|
||||
(b"button_pressed", payload_unknown),
|
||||
asyncio.CancelledError,
|
||||
]
|
||||
|
||||
agent._send_to_speech_agent = AsyncMock()
|
||||
agent._send_to_gesture_agent = AsyncMock()
|
||||
agent._send_to_belief_collector = AsyncMock()
|
||||
|
||||
try:
|
||||
await agent._receive_button_event()
|
||||
except asyncio.CancelledError:
|
||||
pass
|
||||
|
||||
await asyncio.sleep(0)
|
||||
|
||||
# Ensure no handlers were called
|
||||
agent._send_to_speech_agent.assert_not_called()
|
||||
agent._send_to_gesture_agent.assert_not_called()
|
||||
agent._send_to_belief_collector.assert_not_called()
|
||||
|
||||
agent.logger.warning.assert_called_with(
|
||||
"Received button press with unknown type '%s' (context: '%s').",
|
||||
"unknown_thing",
|
||||
"some_data",
|
||||
)
|
||||
@@ -1,4 +1,5 @@
|
||||
import json
|
||||
import uuid
|
||||
from unittest.mock import AsyncMock
|
||||
|
||||
import pytest
|
||||
@@ -6,7 +7,7 @@ from fastapi import FastAPI
|
||||
from fastapi.testclient import TestClient
|
||||
|
||||
from control_backend.api.v1.endpoints import program
|
||||
from control_backend.schemas.program import Program
|
||||
from control_backend.schemas.program import BasicNorm, Goal, Phase, Plan, Program
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
@@ -25,29 +26,37 @@ def client(app):
|
||||
|
||||
def make_valid_program_dict():
|
||||
"""Helper to create a valid Program JSON structure."""
|
||||
return {
|
||||
"phases": [
|
||||
{
|
||||
"id": "phase1",
|
||||
"label": "basephase",
|
||||
"norms": [{"id": "n1", "label": "norm", "norm": "be nice"}],
|
||||
"goals": [
|
||||
{"id": "g1", "label": "goal", "description": "test goal", "achieved": False}
|
||||
# Converting to JSON using Pydantic because it knows how to convert a UUID object
|
||||
program_json_str = Program(
|
||||
phases=[
|
||||
Phase(
|
||||
id=uuid.uuid4(),
|
||||
name="Basic Phase",
|
||||
norms=[
|
||||
BasicNorm(
|
||||
id=uuid.uuid4(),
|
||||
name="Some norm",
|
||||
norm="Do normal.",
|
||||
),
|
||||
],
|
||||
"triggers": [
|
||||
{
|
||||
"id": "t1",
|
||||
"label": "trigger",
|
||||
"type": "keywords",
|
||||
"keywords": [
|
||||
{"id": "kw1", "keyword": "keyword1"},
|
||||
{"id": "kw2", "keyword": "keyword2"},
|
||||
],
|
||||
},
|
||||
goals=[
|
||||
Goal(
|
||||
id=uuid.uuid4(),
|
||||
name="Some goal",
|
||||
plan=Plan(
|
||||
id=uuid.uuid4(),
|
||||
name="Goal Plan",
|
||||
steps=[],
|
||||
),
|
||||
can_fail=False,
|
||||
),
|
||||
],
|
||||
}
|
||||
]
|
||||
}
|
||||
triggers=[],
|
||||
),
|
||||
],
|
||||
).model_dump_json()
|
||||
# Converting back to a dict because that's what's expected
|
||||
return json.loads(program_json_str)
|
||||
|
||||
|
||||
def test_receive_program_success(client):
|
||||
@@ -71,7 +80,8 @@ def test_receive_program_success(client):
|
||||
sent_bytes = args[0][1]
|
||||
sent_obj = json.loads(sent_bytes.decode())
|
||||
|
||||
expected_obj = Program.model_validate(program_dict).model_dump()
|
||||
# Converting to JSON using Pydantic because it knows how to handle UUIDs
|
||||
expected_obj = json.loads(Program.model_validate(program_dict).model_dump_json())
|
||||
assert sent_obj == expected_obj
|
||||
|
||||
|
||||
|
||||
@@ -1,49 +1,65 @@
|
||||
import uuid
|
||||
|
||||
import pytest
|
||||
from pydantic import ValidationError
|
||||
|
||||
from control_backend.schemas.program import (
|
||||
BasicNorm,
|
||||
ConditionalNorm,
|
||||
Goal,
|
||||
KeywordTrigger,
|
||||
Norm,
|
||||
InferredBelief,
|
||||
KeywordBelief,
|
||||
LogicalOperator,
|
||||
Phase,
|
||||
Plan,
|
||||
Program,
|
||||
TriggerKeyword,
|
||||
SemanticBelief,
|
||||
Trigger,
|
||||
)
|
||||
|
||||
|
||||
def base_norm() -> Norm:
|
||||
return Norm(
|
||||
id="norm1",
|
||||
label="testNorm",
|
||||
def base_norm() -> BasicNorm:
|
||||
return BasicNorm(
|
||||
id=uuid.uuid4(),
|
||||
name="testNormName",
|
||||
norm="testNormNorm",
|
||||
critical=False,
|
||||
)
|
||||
|
||||
|
||||
def base_goal() -> Goal:
|
||||
return Goal(
|
||||
id="goal1",
|
||||
label="testGoal",
|
||||
description="testGoalDescription",
|
||||
achieved=False,
|
||||
id=uuid.uuid4(),
|
||||
name="testGoalName",
|
||||
plan=Plan(
|
||||
id=uuid.uuid4(),
|
||||
name="testGoalPlanName",
|
||||
steps=[],
|
||||
),
|
||||
can_fail=False,
|
||||
)
|
||||
|
||||
|
||||
def base_trigger() -> KeywordTrigger:
|
||||
return KeywordTrigger(
|
||||
id="trigger1",
|
||||
label="testTrigger",
|
||||
type="keywords",
|
||||
keywords=[
|
||||
TriggerKeyword(id="keyword1", keyword="testKeyword1"),
|
||||
TriggerKeyword(id="keyword1", keyword="testKeyword2"),
|
||||
],
|
||||
def base_trigger() -> Trigger:
|
||||
return Trigger(
|
||||
id=uuid.uuid4(),
|
||||
name="testTriggerName",
|
||||
condition=KeywordBelief(
|
||||
id=uuid.uuid4(),
|
||||
name="testTriggerKeywordBeliefTriggerName",
|
||||
keyword="Keyword",
|
||||
),
|
||||
plan=Plan(
|
||||
id=uuid.uuid4(),
|
||||
name="testTriggerPlanName",
|
||||
steps=[],
|
||||
),
|
||||
)
|
||||
|
||||
|
||||
def base_phase() -> Phase:
|
||||
return Phase(
|
||||
id="phase1",
|
||||
label="basephase",
|
||||
id=uuid.uuid4(),
|
||||
norms=[base_norm()],
|
||||
goals=[base_goal()],
|
||||
triggers=[base_trigger()],
|
||||
@@ -58,7 +74,7 @@ def invalid_program() -> dict:
|
||||
# wrong types inside phases list (not Phase objects)
|
||||
return {
|
||||
"phases": [
|
||||
{"id": "phase1"}, # incomplete
|
||||
{"id": uuid.uuid4()}, # incomplete
|
||||
{"not_a_phase": True},
|
||||
]
|
||||
}
|
||||
@@ -77,11 +93,112 @@ def test_valid_deepprogram():
|
||||
# validate nested components directly
|
||||
phase = validated.phases[0]
|
||||
assert isinstance(phase.goals[0], Goal)
|
||||
assert isinstance(phase.triggers[0], KeywordTrigger)
|
||||
assert isinstance(phase.norms[0], Norm)
|
||||
assert isinstance(phase.triggers[0], Trigger)
|
||||
assert isinstance(phase.norms[0], BasicNorm)
|
||||
|
||||
|
||||
def test_invalid_program():
|
||||
bad = invalid_program()
|
||||
with pytest.raises(ValidationError):
|
||||
Program.model_validate(bad)
|
||||
|
||||
|
||||
def test_conditional_norm_parsing():
|
||||
"""
|
||||
Check that pydantic is able to preserve the type of the norm, that it doesn't lose its
|
||||
"condition" field when serializing and deserializing.
|
||||
"""
|
||||
norm = ConditionalNorm(
|
||||
name="testNormName",
|
||||
id=uuid.uuid4(),
|
||||
norm="testNormNorm",
|
||||
critical=False,
|
||||
condition=KeywordBelief(
|
||||
name="testKeywordBelief",
|
||||
id=uuid.uuid4(),
|
||||
keyword="testKeywordBelief",
|
||||
),
|
||||
)
|
||||
program = Program(
|
||||
phases=[
|
||||
Phase(
|
||||
name="Some phase",
|
||||
id=uuid.uuid4(),
|
||||
norms=[norm],
|
||||
goals=[],
|
||||
triggers=[],
|
||||
),
|
||||
],
|
||||
)
|
||||
|
||||
parsed_program = Program.model_validate_json(program.model_dump_json())
|
||||
parsed_norm = parsed_program.phases[0].norms[0]
|
||||
|
||||
assert hasattr(parsed_norm, "condition")
|
||||
assert isinstance(parsed_norm, ConditionalNorm)
|
||||
|
||||
|
||||
def test_belief_type_parsing():
|
||||
"""
|
||||
Check that pydantic is able to discern between the different types of beliefs when serializing
|
||||
and deserializing.
|
||||
"""
|
||||
keyword_belief = KeywordBelief(
|
||||
name="testKeywordBelief",
|
||||
id=uuid.uuid4(),
|
||||
keyword="something",
|
||||
)
|
||||
semantic_belief = SemanticBelief(
|
||||
name="testSemanticBelief",
|
||||
id=uuid.uuid4(),
|
||||
description="something",
|
||||
)
|
||||
inferred_belief = InferredBelief(
|
||||
name="testInferredBelief",
|
||||
id=uuid.uuid4(),
|
||||
operator=LogicalOperator.OR,
|
||||
left=keyword_belief,
|
||||
right=semantic_belief,
|
||||
)
|
||||
|
||||
program = Program(
|
||||
phases=[
|
||||
Phase(
|
||||
name="Some phase",
|
||||
id=uuid.uuid4(),
|
||||
norms=[],
|
||||
goals=[],
|
||||
triggers=[
|
||||
Trigger(
|
||||
name="testTriggerKeywordTrigger",
|
||||
id=uuid.uuid4(),
|
||||
condition=keyword_belief,
|
||||
plan=Plan(name="testTriggerPlanName", id=uuid.uuid4(), steps=[]),
|
||||
),
|
||||
Trigger(
|
||||
name="testTriggerSemanticTrigger",
|
||||
id=uuid.uuid4(),
|
||||
condition=semantic_belief,
|
||||
plan=Plan(name="testTriggerPlanName", id=uuid.uuid4(), steps=[]),
|
||||
),
|
||||
Trigger(
|
||||
name="testTriggerInferredTrigger",
|
||||
id=uuid.uuid4(),
|
||||
condition=inferred_belief,
|
||||
plan=Plan(name="testTriggerPlanName", id=uuid.uuid4(), steps=[]),
|
||||
),
|
||||
],
|
||||
),
|
||||
],
|
||||
)
|
||||
|
||||
parsed_program = Program.model_validate_json(program.model_dump_json())
|
||||
|
||||
parsed_keyword_belief = parsed_program.phases[0].triggers[0].condition
|
||||
assert isinstance(parsed_keyword_belief, KeywordBelief)
|
||||
|
||||
parsed_semantic_belief = parsed_program.phases[0].triggers[1].condition
|
||||
assert isinstance(parsed_semantic_belief, SemanticBelief)
|
||||
|
||||
parsed_inferred_belief = parsed_program.phases[0].triggers[2].condition
|
||||
assert isinstance(parsed_inferred_belief, InferredBelief)
|
||||
|
||||
Reference in New Issue
Block a user