Compare commits

..

1 Commits

Author SHA1 Message Date
Twirre Meulenbelt
b785493b97 fix: messages are None when no message is received
ref: N25B-265
2025-11-12 11:47:59 +01:00
114 changed files with 3701 additions and 11539 deletions

View File

@@ -1,20 +0,0 @@
# Example .env file. To use, make a copy, call it ".env" (i.e. removing the ".example" suffix), then you edit values.
# The hostname of the Robot Interface. Change if the Control Backend and Robot Interface are running on different computers.
RI_HOST="localhost"
# URL for the local LLM API. Must be an API that implements the OpenAI Chat Completions API, but most do.
LLM_SETTINGS__LOCAL_LLM_URL="http://localhost:1234/v1/chat/completions"
# Name of the local LLM model to use.
LLM_SETTINGS__LOCAL_LLM_MODEL="gpt-oss"
# Number of non-speech chunks to wait before speech ended. A chunk is approximately 31 ms. Increasing this number allows longer pauses in speech, but also increases response time.
BEHAVIOUR_SETTINGS__VAD_NON_SPEECH_PATIENCE_CHUNKS=15
# Timeout in milliseconds for socket polling. Increase this number if network latency/jitter is high, often the case when using Wi-Fi. Perhaps 500 ms. A symptom of this issue is transcriptions getting cut off.
BEHAVIOUR_SETTINGS__SOCKET_POLLER_TIMEOUT_MS=100
# For an exhaustive list of options, see the control_backend.core.config module in the docs.

View File

@@ -30,7 +30,7 @@ HEADER=$(head -n 1 "$COMMIT_MSG_FILE")
# Check for Merge commits (covers 'git merge' and PR merges from GitHub/GitLab)
# Examples: "Merge branch 'main' into ...", "Merge pull request #123 from ..."
MERGE_PATTERN="^Merge (remote-tracking )?(branch|pull request|tag) .*"
MERGE_PATTERN="^Merge (branch|pull request|tag) .*"
if [[ "$HEADER" =~ $MERGE_PATTERN ]]; then
echo -e "${GREEN}Merge commit detected by message content. Skipping validation.${NC}"
exit 0

8
.gitignore vendored
View File

@@ -218,12 +218,8 @@ __marimo__/
# MacOS
.DS_Store
# Docs
docs/*
!docs/conf.py
# Generated files
agentspeak.asl

View File

@@ -22,4 +22,6 @@ test:
tags:
- test
script:
- uv run --only-group test pytest test
# - uv run --group integration-test pytest test/integration
- uv run --only-group test pytest test/unit

View File

@@ -1,9 +0,0 @@
%{first_multiline_commit_description}
To verify:
- [ ] Style checks pass
- [ ] Pipeline (tests) pass
- [ ] Documentation is up to date
- [ ] Tests are up to date (new code is covered)
- [ ] ...

View File

@@ -3,13 +3,12 @@ version: 1
custom_levels:
OBSERVATION: 25
ACTION: 26
LLM: 9
formatters:
# Console output
colored:
(): "colorlog.ColoredFormatter"
format: "{log_color}{asctime}.{msecs:03.0f} | {levelname:11} | {name:70} | {message}"
format: "{log_color}{asctime} | {levelname:11} | {name:70} | {message}"
style: "{"
datefmt: "%H:%M:%S"
@@ -27,7 +26,7 @@ handlers:
stream: ext://sys.stdout
ui:
class: zmq.log.handlers.PUBHandler
level: LLM
level: DEBUG
formatter: json_experiment
# Level of external libraries
@@ -37,5 +36,5 @@ root:
loggers:
control_backend:
level: LLM
level: DEBUG
handlers: [ui]

View File

@@ -27,7 +27,6 @@ This + part might differ based on what model you choose.
copy the model name in the module loaded and replace local_llm_modelL. In settings.
## Running
To run the project (development server), execute the following command (while inside the root repository):
@@ -35,14 +34,6 @@ To run the project (development server), execute the following command (while in
uv run fastapi dev src/control_backend/main.py
```
### Environment Variables
You can use environment variables to change settings. Make a copy of the [`.env.example`](.env.example) file, name it `.env` and put it in the root directory. The file itself describes how to do the configuration.
For an exhaustive list of environment options, see the `control_backend.core.config` module in the docs.
## Testing
Testing happens automatically when opening a merge request to any branch. If you want to manually run the test suite, you can do so by running the following for unit tests:
@@ -72,30 +63,3 @@ git config --local --unset core.hooksPath
```
Then run the pre-commit install commands again.
## Documentation
Generate documentation web pages using:
### Linux & macOS
```bash
PYTHONPATH=src sphinx-apidoc -F -o docs src/control_backend
```
### Windows
```bash
$env:PYTHONPATH="src"; sphinx-apidoc -F -o docs src/control_backend
```
Optionally, in the `conf.py` file in the `docs` folder, change preferences.
In the `docs` folder:
### Linux & macOS
```bash
make html
```
### Windows
```bash
.\make.bat html
```

View File

@@ -1,40 +0,0 @@
# Configuration file for the Sphinx documentation builder.
#
# For the full list of built-in configuration values, see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
import os
import sys
sys.path.insert(0, os.path.abspath("../src"))
# -- Project information -----------------------------------------------------
# https://www.sphinx-doc.org/en/master/usage/configuration.html#project-information
project = "control_backend"
copyright = "2025, Author"
author = "Author"
# -- General configuration ---------------------------------------------------
# https://www.sphinx-doc.org/en/master/usage/configuration.html#general-configuration
extensions = [
"sphinx.ext.autodoc",
"sphinx.ext.viewcode",
"sphinx.ext.todo",
]
templates_path = ["_templates"]
exclude_patterns = ["_build", "Thumbs.db", ".DS_Store"]
language = "en"
# -- Options for HTML output -------------------------------------------------
# https://www.sphinx-doc.org/en/master/usage/configuration.html#options-for-html-output
html_theme = "sphinx_rtd_theme"
html_static_path = ["_static"]
# -- Options for todo extension ----------------------------------------------
# https://www.sphinx-doc.org/en/master/usage/extensions/todo.html#configuration
todo_include_todos = True

View File

@@ -5,52 +5,43 @@ description = "Add your description here"
readme = "README.md"
requires-python = ">=3.13"
dependencies = [
"agentspeak>=0.2.2",
"colorlog>=6.10.1",
"fastapi[all]>=0.115.6",
"mlx-whisper>=0.4.3 ; sys_platform == 'darwin'",
"numpy>=2.3.3",
"openai-whisper>=20250625",
"pyaudio>=0.2.14",
"pydantic>=2.12.0",
"pydantic-settings>=2.11.0",
"python-json-logger>=4.0.0",
"python-slugify>=8.0.4",
"pyyaml>=6.0.3",
"pyzmq>=27.1.0",
"silero-vad>=6.0.0",
"sphinx>=7.3.7",
"sphinx-rtd-theme>=3.0.2",
"torch>=2.8.0",
"uvicorn>=0.37.0",
"colorlog>=6.10.1",
"fastapi[all]>=0.115.6",
"mlx-whisper>=0.4.3 ; sys_platform == 'darwin'",
"numpy>=2.3.3",
"openai-whisper>=20250625",
"pyaudio>=0.2.14",
"pydantic>=2.12.0",
"pydantic-settings>=2.11.0",
"pytest>=8.4.2",
"pytest-asyncio>=1.2.0",
"pytest-cov>=7.0.0",
"pytest-mock>=3.15.1",
"python-json-logger>=4.0.0",
"pyyaml>=6.0.3",
"pyzmq>=27.1.0",
"silero-vad>=6.0.0",
"spade>=4.1.0",
"spade-bdi>=0.3.2",
"torch>=2.8.0",
"uvicorn>=0.37.0",
]
[dependency-groups]
dev = [
"pre-commit>=4.3.0",
"pytest>=8.4.2",
"pytest-asyncio>=1.2.0",
"pytest-cov>=7.0.0",
"pytest-mock>=3.15.1",
"soundfile>=0.13.1",
"ruff>=0.14.2",
"ruff-format>=0.3.0",
"pre-commit>=4.3.0",
"ruff>=0.14.2",
"ruff-format>=0.3.0",
]
integration-test = [
"soundfile>=0.13.1",
]
test = [
"agentspeak>=0.2.2",
"fastapi>=0.115.6",
"httpx>=0.28.1",
"mlx-whisper>=0.4.3 ; sys_platform == 'darwin'",
"openai-whisper>=20250625",
"pydantic>=2.12.0",
"pydantic-settings>=2.11.0",
"pytest>=8.4.2",
"pytest-asyncio>=1.2.0",
"pytest-cov>=7.0.0",
"pytest-mock>=3.15.1",
"pyyaml>=6.0.3",
"pyzmq>=27.1.0",
"soundfile>=0.13.1",
"numpy>=2.3.3",
"pytest>=8.4.2",
"pytest-asyncio>=1.2.0",
"pytest-cov>=7.0.0",
"pytest-mock>=3.15.1",
]
[tool.pytest.ini_options]
@@ -61,15 +52,15 @@ line-length = 100
[tool.ruff.lint]
extend-select = [
"E", # pycodestyle
"F", # pyflakes
"I", # isort (import sorting)
"UP", # pyupgrade (modernize code)
"B", # flake8-bugbear (common bugs)
"C4", # flake8-comprehensions (unnecessary comprehensions)
"E", # pycodestyle
"F", # pyflakes
"I", # isort (import sorting)
"UP", # pyupgrade (modernize code)
"B", # flake8-bugbear (common bugs)
"C4", # flake8-comprehensions (unnecessary comprehensions)
]
ignore = [
"E226", # spaces around operators
"E701", # multiple statements on a single line
"E226", # spaces around operators
"E701", # multiple statements on a single line
]

View File

@@ -1 +1,7 @@
from .base import BaseAgent as BaseAgent
from .belief_collector.belief_collector import BeliefCollectorAgent as BeliefCollectorAgent
from .llm.llm import LLMAgent as LLMAgent
from .ri_command_agent import RICommandAgent as RICommandAgent
from .ri_communication_agent import RICommunicationAgent as RICommunicationAgent
from .transcription.transcription_agent import TranscriptionAgent as TranscriptionAgent
from .vad_agent import VADAgent as VADAgent

View File

@@ -1,2 +0,0 @@
from .robot_gesture_agent import RobotGestureAgent as RobotGestureAgent
from .robot_speech_agent import RobotSpeechAgent as RobotSpeechAgent

View File

@@ -1,173 +0,0 @@
import json
import zmq
import zmq.asyncio as azmq
from control_backend.agents import BaseAgent
from control_backend.core.agent_system import InternalMessage
from control_backend.core.config import settings
from control_backend.schemas.ri_message import GestureCommand, RIEndpoint
class RobotGestureAgent(BaseAgent):
"""
This agent acts as a bridge between the control backend and the Robot Interface (RI).
It receives gesture commands from other agents or from the UI,
and forwards them to the robot via a ZMQ PUB socket.
:ivar subsocket: ZMQ SUB socket for receiving external commands (e.g., from UI).
:ivar pubsocket: ZMQ PUB socket for sending commands to the Robot Interface.
:ivar address: Address to bind/connect the PUB socket.
:ivar bind: Whether to bind or connect the PUB socket.
:ivar gesture_data: A list of strings for available gestures
"""
subsocket: azmq.Socket
repsocket: azmq.Socket
pubsocket: azmq.Socket
address = ""
bind = False
gesture_data = []
single_gesture_data = []
def __init__(
self,
name: str,
address: str,
bind=False,
gesture_data=None,
single_gesture_data=None,
):
self.gesture_data = gesture_data or []
self.single_gesture_data = single_gesture_data or []
super().__init__(name)
self.address = address
self.bind = bind
async def setup(self):
"""
Initialize the agent.
1. Sets up the PUB socket to talk to the robot.
2. Sets up the SUB socket to listen for "command" topics (from UI/External).
3. Starts the loop for handling ZMQ commands.
"""
self.logger.info("Setting up %s", self.name)
context = azmq.Context.instance()
# To the robot
self.pubsocket = context.socket(zmq.PUB)
if self.bind:
self.pubsocket.bind(self.address)
else:
self.pubsocket.connect(self.address)
# Receive internal topics regarding commands
self.subsocket = context.socket(zmq.SUB)
self.subsocket.connect(settings.zmq_settings.internal_sub_address)
self.subsocket.setsockopt(zmq.SUBSCRIBE, b"command")
self.subsocket.setsockopt(zmq.SUBSCRIBE, b"send_gestures")
# REP socket for replying to gesture requests
self.repsocket = context.socket(zmq.REP)
self.repsocket.bind(settings.zmq_settings.internal_gesture_rep_adress)
self.add_behavior(self._zmq_command_loop())
self.add_behavior(self._fetch_gestures_loop())
self.logger.info("Finished setting up %s", self.name)
async def stop(self):
if self.subsocket:
self.subsocket.close()
if self.pubsocket:
self.pubsocket.close()
if self.repsocket:
self.repsocket.close()
await super().stop()
async def handle_message(self, msg: InternalMessage):
"""
Handle commands received from other internal Python agents.
Validates the message as a :class:`GestureCommand` and forwards it to the robot.
:param msg: The internal message containing the command.
"""
try:
gesture_command = GestureCommand.model_validate_json(msg.body)
if gesture_command.endpoint == RIEndpoint.GESTURE_TAG:
if gesture_command.data not in self.gesture_data:
self.logger.warning(
"Received gesture tag '%s' which is not in available tags. Early returning",
gesture_command.data,
)
return
elif gesture_command.endpoint == RIEndpoint.GESTURE_SINGLE:
if gesture_command.data not in self.single_gesture_data:
self.logger.warning(
"Received gesture '%s' which is not in available gestures. Early returning",
gesture_command.data,
)
return
await self.pubsocket.send_json(gesture_command.model_dump())
except Exception:
self.logger.exception("Error processing internal message.")
async def _zmq_command_loop(self):
"""
Loop to handle commands received via ZMQ (e.g., from the UI).
Listens on the 'command' topic, validates the JSON and forwards it to the robot.
"""
while self._running:
try:
topic, body = await self.subsocket.recv_multipart()
# Don't process send_gestures here
if topic != b"command":
continue
body = json.loads(body)
gesture_command = GestureCommand.model_validate(body)
if gesture_command.endpoint == RIEndpoint.GESTURE_TAG:
if gesture_command.data not in self.gesture_data:
self.logger.warning(
"Received gesture tag '%s' which is not in available tags.\
Early returning",
gesture_command.data,
)
continue
await self.pubsocket.send_json(gesture_command.model_dump())
except Exception:
self.logger.exception("Error processing ZMQ message.")
async def _fetch_gestures_loop(self):
"""
Loop to handle fetching gestures received via ZMQ (e.g., from the UI).
Listens on the 'send_gestures' topic, and returns a list on the get_gestures topic.
"""
while self._running:
try:
# Get a request
body = await self.repsocket.recv()
# Figure out amount, if specified
try:
body = json.loads(body)
except json.JSONDecodeError:
body = None
amount = None
if isinstance(body, int):
amount = body
# Fetch tags from gesture data and respond
tags = self.gesture_data[:amount] if amount else self.gesture_data
response = json.dumps({"tags": tags}).encode()
await self.repsocket.send(response)
except Exception:
self.logger.exception("Error fetching gesture tags.")

View File

@@ -1,103 +0,0 @@
import json
import zmq
import zmq.asyncio as azmq
from control_backend.agents import BaseAgent
from control_backend.core.agent_system import InternalMessage
from control_backend.core.config import settings
from control_backend.schemas.ri_message import SpeechCommand
class RobotSpeechAgent(BaseAgent):
"""
This agent acts as a bridge between the control backend and the Robot Interface (RI).
It receives speech commands from other agents or from the UI,
and forwards them to the robot via a ZMQ PUB socket.
:ivar subsocket: ZMQ SUB socket for receiving external commands (e.g., from UI).
:ivar pubsocket: ZMQ PUB socket for sending commands to the Robot Interface.
:ivar address: Address to bind/connect the PUB socket.
:ivar bind: Whether to bind or connect the PUB socket.
"""
subsocket: azmq.Socket
pubsocket: azmq.Socket
address = ""
bind = False
def __init__(
self,
name: str,
address: str,
bind=False,
):
super().__init__(name)
self.address = address
self.bind = bind
async def setup(self):
"""
Initialize the agent.
1. Sets up the PUB socket to talk to the robot.
2. Sets up the SUB socket to listen for "command" topics (from UI/External).
3. Starts the loop for handling ZMQ commands.
"""
self.logger.info("Setting up %s", self.name)
context = azmq.Context.instance()
# To the robot
self.pubsocket = context.socket(zmq.PUB)
if self.bind: # TODO: Should this ever be the case?
self.pubsocket.bind(self.address)
else:
self.pubsocket.connect(self.address)
# Receive internal topics regarding commands
self.subsocket = context.socket(zmq.SUB)
self.subsocket.connect(settings.zmq_settings.internal_sub_address)
self.subsocket.setsockopt(zmq.SUBSCRIBE, b"command")
self.add_behavior(self._zmq_command_loop())
self.logger.info("Finished setting up %s", self.name)
async def stop(self):
if self.subsocket:
self.subsocket.close()
if self.pubsocket:
self.pubsocket.close()
await super().stop()
async def handle_message(self, msg: InternalMessage):
"""
Handle commands received from other internal Python agents.
Validates the message as a :class:`SpeechCommand` and forwards it to the robot.
:param msg: The internal message containing the command.
"""
try:
speech_command = SpeechCommand.model_validate_json(msg.body)
await self.pubsocket.send_json(speech_command.model_dump())
except Exception:
self.logger.exception("Error processing internal message.")
async def _zmq_command_loop(self):
"""
Loop to handle commands received via ZMQ (e.g., from the UI).
Listens on the 'command' topic, validates the JSON, and forwards it to the robot.
"""
while self._running:
try:
_, body = await self.subsocket.recv_multipart()
body = json.loads(body)
message = SpeechCommand.model_validate(body)
await self.pubsocket.send_json(message.model_dump())
except Exception:
self.logger.exception("Error processing ZMQ message.")

View File

@@ -1,26 +1,18 @@
import logging
from control_backend.core.agent_system import BaseAgent as CoreBaseAgent
from spade.agent import Agent
class BaseAgent(CoreBaseAgent):
class BaseAgent(Agent):
"""
The primary base class for all implementation agents.
Inherits from :class:`control_backend.core.agent_system.BaseAgent`.
This class ensures that every agent instance is automatically equipped with a
properly configured ``logger``.
:ivar logger: A logger instance named after the agent's package and class.
Base agent class for our agents to inherit from.
This ensures that all agents have a logger.
"""
logger: logging.Logger
# Whenever a subclass is initiated, give it the correct logger
def __init_subclass__(cls, **kwargs) -> None:
"""
Whenever a subclass is initiated, give it the correct logger.
:param kwargs: Keyword arguments for the subclass.
"""
super().__init_subclass__(**kwargs)
cls.logger = logging.getLogger(__package__).getChild(cls.__name__)

View File

@@ -1,5 +1,2 @@
from control_backend.agents.bdi.bdi_core_agent import BDICoreAgent as BDICoreAgent
from .text_belief_extractor_agent import (
TextBeliefExtractorAgent as TextBeliefExtractorAgent,
)
from .bdi_core import BDICoreAgent as BDICoreAgent
from .text_extractor import TBeliefExtractorAgent as TBeliefExtractorAgent

View File

@@ -1,273 +0,0 @@
from __future__ import annotations
from abc import ABC, abstractmethod
from dataclasses import dataclass, field
from enum import StrEnum
class AstNode(ABC):
"""
Abstract base class for all elements of an AgentSpeak program.
"""
@abstractmethod
def _to_agentspeak(self) -> str:
"""
Generates the AgentSpeak code string.
"""
pass
def __str__(self) -> str:
return self._to_agentspeak()
class AstExpression(AstNode, ABC):
"""
Intermediate class for anything that can be used in a logical expression.
"""
def __and__(self, other: ExprCoalescible) -> AstBinaryOp:
return AstBinaryOp(self, BinaryOperatorType.AND, _coalesce_expr(other))
def __or__(self, other: ExprCoalescible) -> AstBinaryOp:
return AstBinaryOp(self, BinaryOperatorType.OR, _coalesce_expr(other))
def __invert__(self) -> AstLogicalExpression:
if isinstance(self, AstLogicalExpression):
self.negated = not self.negated
return self
return AstLogicalExpression(self, negated=True)
type ExprCoalescible = AstExpression | str | int | float
def _coalesce_expr(value: ExprCoalescible) -> AstExpression:
if isinstance(value, AstExpression):
return value
if isinstance(value, str):
return AstString(value)
if isinstance(value, (int, float)):
return AstNumber(value)
raise TypeError(f"Cannot coalesce type {type(value)} into an AstTerm.")
@dataclass
class AstTerm(AstExpression, ABC):
"""
Base class for terms appearing inside literals.
"""
def __ge__(self, other: ExprCoalescible) -> AstBinaryOp:
return AstBinaryOp(self, BinaryOperatorType.GREATER_EQUALS, _coalesce_expr(other))
def __gt__(self, other: ExprCoalescible) -> AstBinaryOp:
return AstBinaryOp(self, BinaryOperatorType.GREATER_THAN, _coalesce_expr(other))
def __le__(self, other: ExprCoalescible) -> AstBinaryOp:
return AstBinaryOp(self, BinaryOperatorType.LESS_EQUALS, _coalesce_expr(other))
def __lt__(self, other: ExprCoalescible) -> AstBinaryOp:
return AstBinaryOp(self, BinaryOperatorType.LESS_THAN, _coalesce_expr(other))
def __eq__(self, other: ExprCoalescible) -> AstBinaryOp:
return AstBinaryOp(self, BinaryOperatorType.EQUALS, _coalesce_expr(other))
def __ne__(self, other: ExprCoalescible) -> AstBinaryOp:
return AstBinaryOp(self, BinaryOperatorType.NOT_EQUALS, _coalesce_expr(other))
@dataclass(eq=False)
class AstAtom(AstTerm):
"""
Grounded expression in all lowercase.
"""
value: str
def _to_agentspeak(self) -> str:
return self.value.lower()
@dataclass(eq=False)
class AstVar(AstTerm):
"""
Ungrounded variable expression. First letter capitalized.
"""
name: str
def _to_agentspeak(self) -> str:
return self.name.capitalize()
@dataclass(eq=False)
class AstNumber(AstTerm):
value: int | float
def _to_agentspeak(self) -> str:
return str(self.value)
@dataclass(eq=False)
class AstString(AstTerm):
value: str
def _to_agentspeak(self) -> str:
return f'"{self.value}"'
@dataclass(eq=False)
class AstLiteral(AstTerm):
functor: str
terms: list[AstTerm] = field(default_factory=list)
def _to_agentspeak(self) -> str:
if not self.terms:
return self.functor
args = ", ".join(map(str, self.terms))
return f"{self.functor}({args})"
class BinaryOperatorType(StrEnum):
AND = "&"
OR = "|"
GREATER_THAN = ">"
LESS_THAN = "<"
EQUALS = "=="
NOT_EQUALS = "\\=="
GREATER_EQUALS = ">="
LESS_EQUALS = "<="
@dataclass
class AstBinaryOp(AstExpression):
left: AstExpression
operator: BinaryOperatorType
right: AstExpression
def __post_init__(self):
self.left = _as_logical(self.left)
self.right = _as_logical(self.right)
def _to_agentspeak(self) -> str:
l_str = str(self.left)
r_str = str(self.right)
assert isinstance(self.left, AstLogicalExpression)
assert isinstance(self.right, AstLogicalExpression)
if isinstance(self.left.expression, AstBinaryOp) or self.left.negated:
l_str = f"({l_str})"
if isinstance(self.right.expression, AstBinaryOp) or self.right.negated:
r_str = f"({r_str})"
return f"{l_str} {self.operator.value} {r_str}"
@dataclass
class AstLogicalExpression(AstExpression):
expression: AstExpression
negated: bool = False
def _to_agentspeak(self) -> str:
expr_str = str(self.expression)
if isinstance(self.expression, AstBinaryOp) and self.negated:
expr_str = f"({expr_str})"
return f"{'not ' if self.negated else ''}{expr_str}"
def _as_logical(expr: AstExpression) -> AstLogicalExpression:
if isinstance(expr, AstLogicalExpression):
return expr
return AstLogicalExpression(expr)
class StatementType(StrEnum):
EMPTY = ""
DO_ACTION = "."
ACHIEVE_GOAL = "!"
TEST_GOAL = "?"
ADD_BELIEF = "+"
REMOVE_BELIEF = "-"
REPLACE_BELIEF = "-+"
@dataclass
class AstStatement(AstNode):
"""
A statement that can appear inside a plan.
"""
type: StatementType
expression: AstExpression
def _to_agentspeak(self) -> str:
return f"{self.type.value}{self.expression}"
@dataclass
class AstRule(AstNode):
result: AstExpression
condition: AstExpression | None = None
def __post_init__(self):
if self.condition is not None:
self.condition = _as_logical(self.condition)
def _to_agentspeak(self) -> str:
if not self.condition:
return f"{self.result}."
return f"{self.result} :- {self.condition}."
class TriggerType(StrEnum):
ADDED_BELIEF = "+"
# REMOVED_BELIEF = "-" # TODO
# MODIFIED_BELIEF = "^" # TODO
ADDED_GOAL = "+!"
# REMOVED_GOAL = "-!" # TODO
@dataclass
class AstPlan(AstNode):
type: TriggerType
trigger_literal: AstExpression
context: list[AstExpression]
body: list[AstStatement]
def _to_agentspeak(self) -> str:
assert isinstance(self.trigger_literal, AstLiteral)
indent = " " * 6
colon = " : "
arrow = " <- "
lines = []
lines.append(f"{self.type.value}{self.trigger_literal}")
if self.context:
lines.append(colon + f" &\n{indent}".join(str(c) for c in self.context))
if self.body:
lines.append(arrow + f";\n{indent}".join(str(s) for s in self.body) + ".")
lines.append("")
return "\n".join(lines)
@dataclass
class AstProgram(AstNode):
rules: list[AstRule] = field(default_factory=list)
plans: list[AstPlan] = field(default_factory=list)
def _to_agentspeak(self) -> str:
lines = []
lines.extend(map(str, self.rules))
lines.extend(["", ""])
lines.extend(map(str, self.plans))
return "\n".join(lines)

View File

@@ -1,504 +0,0 @@
from functools import singledispatchmethod
from slugify import slugify
from control_backend.agents.bdi.agentspeak_ast import (
AstAtom,
AstBinaryOp,
AstExpression,
AstLiteral,
AstNumber,
AstPlan,
AstProgram,
AstRule,
AstStatement,
AstString,
AstVar,
BinaryOperatorType,
StatementType,
TriggerType,
)
from control_backend.schemas.program import (
BaseGoal,
BasicNorm,
ConditionalNorm,
GestureAction,
Goal,
InferredBelief,
KeywordBelief,
LLMAction,
LogicalOperator,
Norm,
Phase,
PlanElement,
Program,
ProgramElement,
SemanticBelief,
SpeechAction,
Trigger,
)
class AgentSpeakGenerator:
_asp: AstProgram
def generate(self, program: Program) -> str:
self._asp = AstProgram()
if program.phases:
self._asp.rules.append(AstRule(self._astify(program.phases[0])))
else:
self._asp.rules.append(AstRule(AstLiteral("phase", [AstString("end")])))
self._asp.rules.append(AstRule(AstLiteral("!notify_cycle")))
self._add_keyword_inference()
self._add_default_plans()
self._process_phases(program.phases)
self._add_fallbacks()
return str(self._asp)
def _add_keyword_inference(self) -> None:
keyword = AstVar("Keyword")
message = AstVar("Message")
position = AstVar("Pos")
self._asp.rules.append(
AstRule(
AstLiteral("keyword_said", [keyword]),
AstLiteral("user_said", [message])
& AstLiteral(".substring", [keyword, message, position])
& (position >= 0),
)
)
def _add_default_plans(self):
self._add_reply_with_goal_plan()
self._add_say_plan()
self._add_reply_plan()
self._add_notify_cycle_plan()
def _add_reply_with_goal_plan(self):
self._asp.plans.append(
AstPlan(
TriggerType.ADDED_GOAL,
AstLiteral("reply_with_goal", [AstVar("Goal")]),
[AstLiteral("user_said", [AstVar("Message")])],
[
AstStatement(StatementType.ADD_BELIEF, AstLiteral("responded_this_turn")),
AstStatement(
StatementType.DO_ACTION,
AstLiteral(
"findall",
[AstVar("Norm"), AstLiteral("norm", [AstVar("Norm")]), AstVar("Norms")],
),
),
AstStatement(
StatementType.DO_ACTION,
AstLiteral(
"reply_with_goal", [AstVar("Message"), AstVar("Norms"), AstVar("Goal")]
),
),
],
)
)
def _add_say_plan(self):
self._asp.plans.append(
AstPlan(
TriggerType.ADDED_GOAL,
AstLiteral("say", [AstVar("Text")]),
[],
[
AstStatement(StatementType.ADD_BELIEF, AstLiteral("responded_this_turn")),
AstStatement(StatementType.DO_ACTION, AstLiteral("say", [AstVar("Text")])),
],
)
)
def _add_reply_plan(self):
self._asp.plans.append(
AstPlan(
TriggerType.ADDED_GOAL,
AstLiteral("reply"),
[AstLiteral("user_said", [AstVar("Message")])],
[
AstStatement(StatementType.ADD_BELIEF, AstLiteral("responded_this_turn")),
AstStatement(
StatementType.DO_ACTION,
AstLiteral(
"findall",
[AstVar("Norm"), AstLiteral("norm", [AstVar("Norm")]), AstVar("Norms")],
),
),
AstStatement(
StatementType.DO_ACTION,
AstLiteral("reply", [AstVar("Message"), AstVar("Norms")]),
),
],
)
)
def _add_notify_cycle_plan(self):
self._asp.plans.append(
AstPlan(
TriggerType.ADDED_GOAL,
AstLiteral("notify_cycle"),
[],
[
AstStatement(
StatementType.DO_ACTION,
AstLiteral(
"findall",
[AstVar("Norm"), AstLiteral("norm", [AstVar("Norm")]), AstVar("Norms")],
),
),
AstStatement(
StatementType.DO_ACTION, AstLiteral("notify_norms", [AstVar("Norms")])
),
AstStatement(StatementType.DO_ACTION, AstLiteral("wait", [AstNumber(100)])),
AstStatement(StatementType.ACHIEVE_GOAL, AstLiteral("notify_cycle")),
],
)
)
def _process_phases(self, phases: list[Phase]) -> None:
for curr_phase, next_phase in zip([None] + phases, phases + [None], strict=True):
if curr_phase:
self._process_phase(curr_phase)
self._add_phase_transition(curr_phase, next_phase)
# End phase behavior
# When deleting this, the entire `reply` plan and action can be deleted
self._asp.plans.append(
AstPlan(
type=TriggerType.ADDED_BELIEF,
trigger_literal=AstLiteral("user_said", [AstVar("Message")]),
context=[AstLiteral("phase", [AstString("end")])],
body=[
AstStatement(
StatementType.DO_ACTION, AstLiteral("notify_user_said", [AstVar("Message")])
),
AstStatement(StatementType.ACHIEVE_GOAL, AstLiteral("reply")),
],
)
)
def _process_phase(self, phase: Phase) -> None:
for norm in phase.norms:
self._process_norm(norm, phase)
self._add_default_loop(phase)
previous_goal = None
for goal in phase.goals:
self._process_goal(goal, phase, previous_goal, main_goal=True)
previous_goal = goal
for trigger in phase.triggers:
self._process_trigger(trigger, phase)
def _add_phase_transition(self, from_phase: Phase | None, to_phase: Phase | None) -> None:
if from_phase is None:
return
from_phase_ast = self._astify(from_phase)
to_phase_ast = (
self._astify(to_phase) if to_phase else AstLiteral("phase", [AstString("end")])
)
check_context = [from_phase_ast]
if from_phase:
for goal in from_phase.goals:
check_context.append(self._astify(goal, achieved=True))
force_context = [from_phase_ast]
body = [
AstStatement(
StatementType.DO_ACTION,
AstLiteral(
"notify_transition_phase",
[
AstString(str(from_phase.id)),
AstString(str(to_phase.id) if to_phase else "end"),
],
),
),
AstStatement(StatementType.REMOVE_BELIEF, from_phase_ast),
AstStatement(StatementType.ADD_BELIEF, to_phase_ast),
]
# if from_phase:
# body.extend(
# [
# AstStatement(
# StatementType.TEST_GOAL, AstLiteral("user_said", [AstVar("Message")])
# ),
# AstStatement(
# StatementType.REPLACE_BELIEF, AstLiteral("user_said", [AstVar("Message")])
# ),
# ]
# )
# Check
self._asp.plans.append(
AstPlan(
TriggerType.ADDED_GOAL,
AstLiteral("transition_phase"),
check_context,
[
AstStatement(StatementType.ACHIEVE_GOAL, AstLiteral("force_transition_phase")),
],
)
)
# Force
self._asp.plans.append(
AstPlan(
TriggerType.ADDED_GOAL, AstLiteral("force_transition_phase"), force_context, body
)
)
def _process_norm(self, norm: Norm, phase: Phase) -> None:
rule: AstRule | None = None
match norm:
case ConditionalNorm(condition=cond):
rule = AstRule(
self._astify(norm),
self._astify(phase) & self._astify(cond)
| AstAtom(f"force_{self.slugify(norm)}"),
)
case BasicNorm():
rule = AstRule(self._astify(norm), self._astify(phase))
if not rule:
return
self._asp.rules.append(rule)
def _add_default_loop(self, phase: Phase) -> None:
actions = []
actions.append(
AstStatement(
StatementType.DO_ACTION, AstLiteral("notify_user_said", [AstVar("Message")])
)
)
actions.append(AstStatement(StatementType.REMOVE_BELIEF, AstLiteral("responded_this_turn")))
actions.append(AstStatement(StatementType.ACHIEVE_GOAL, AstLiteral("check_triggers")))
for goal in phase.goals:
actions.append(AstStatement(StatementType.ACHIEVE_GOAL, self._astify(goal)))
actions.append(AstStatement(StatementType.ACHIEVE_GOAL, AstLiteral("transition_phase")))
self._asp.plans.append(
AstPlan(
TriggerType.ADDED_BELIEF,
AstLiteral("user_said", [AstVar("Message")]),
[self._astify(phase)],
actions,
)
)
def _process_goal(
self,
goal: Goal,
phase: Phase,
previous_goal: Goal | None = None,
continues_response: bool = False,
main_goal: bool = False,
) -> None:
context: list[AstExpression] = [self._astify(phase)]
context.append(~self._astify(goal, achieved=True))
if previous_goal and previous_goal.can_fail:
context.append(self._astify(previous_goal, achieved=True))
if not continues_response:
context.append(~AstLiteral("responded_this_turn"))
body = []
if main_goal: # UI only needs to know about the main goals
body.append(
AstStatement(
StatementType.DO_ACTION,
AstLiteral("notify_goal_start", [AstString(self.slugify(goal))]),
)
)
subgoals = []
for step in goal.plan.steps:
body.append(self._step_to_statement(step))
if isinstance(step, Goal):
subgoals.append(step)
if not goal.can_fail and not continues_response:
body.append(AstStatement(StatementType.ADD_BELIEF, self._astify(goal, achieved=True)))
self._asp.plans.append(AstPlan(TriggerType.ADDED_GOAL, self._astify(goal), context, body))
self._asp.plans.append(
AstPlan(
TriggerType.ADDED_GOAL,
self._astify(goal),
context=[],
body=[AstStatement(StatementType.EMPTY, AstLiteral("true"))],
)
)
prev_goal = None
for subgoal in subgoals:
self._process_goal(subgoal, phase, prev_goal)
prev_goal = subgoal
def _step_to_statement(self, step: PlanElement) -> AstStatement:
match step:
case Goal() | SpeechAction() | LLMAction() as a:
return AstStatement(StatementType.ACHIEVE_GOAL, self._astify(a))
case GestureAction() as a:
return AstStatement(StatementType.DO_ACTION, self._astify(a))
# TODO: separate handling of keyword and others
def _process_trigger(self, trigger: Trigger, phase: Phase) -> None:
body = []
subgoals = []
body.append(
AstStatement(
StatementType.DO_ACTION,
AstLiteral("notify_trigger_start", [AstString(self.slugify(trigger))]),
)
)
for step in trigger.plan.steps:
body.append(self._step_to_statement(step))
if isinstance(step, Goal):
step.can_fail = False # triggers are continuous sequence
subgoals.append(step)
# Arbitrary wait for UI to display nicely
body.append(AstStatement(StatementType.DO_ACTION, AstLiteral("wait", [AstNumber(2000)])))
body.append(
AstStatement(
StatementType.DO_ACTION,
AstLiteral("notify_trigger_end", [AstString(self.slugify(trigger))]),
)
)
self._asp.plans.append(
AstPlan(
TriggerType.ADDED_GOAL,
AstLiteral("check_triggers"),
[self._astify(phase), self._astify(trigger.condition)],
body,
)
)
# Force trigger (from UI)
self._asp.plans.append(AstPlan(TriggerType.ADDED_GOAL, self._astify(trigger), [], body))
for subgoal in subgoals:
self._process_goal(subgoal, phase, continues_response=True)
def _add_fallbacks(self):
# Trigger fallback
self._asp.plans.append(
AstPlan(
TriggerType.ADDED_GOAL,
AstLiteral("check_triggers"),
[],
[AstStatement(StatementType.EMPTY, AstLiteral("true"))],
)
)
# Phase transition fallback
self._asp.plans.append(
AstPlan(
TriggerType.ADDED_GOAL,
AstLiteral("transition_phase"),
[],
[AstStatement(StatementType.EMPTY, AstLiteral("true"))],
)
)
@singledispatchmethod
def _astify(self, element: ProgramElement) -> AstExpression:
raise NotImplementedError(f"Cannot convert element {element} to an AgentSpeak expression.")
@_astify.register
def _(self, kwb: KeywordBelief) -> AstExpression:
return AstLiteral("keyword_said", [AstString(kwb.keyword)])
@_astify.register
def _(self, sb: SemanticBelief) -> AstExpression:
return AstLiteral(self.slugify(sb))
@_astify.register
def _(self, ib: InferredBelief) -> AstExpression:
return AstBinaryOp(
self._astify(ib.left),
BinaryOperatorType.AND if ib.operator == LogicalOperator.AND else BinaryOperatorType.OR,
self._astify(ib.right),
)
@_astify.register
def _(self, norm: Norm) -> AstExpression:
functor = "critical_norm" if norm.critical else "norm"
return AstLiteral(functor, [AstString(norm.norm)])
@_astify.register
def _(self, phase: Phase) -> AstExpression:
return AstLiteral("phase", [AstString(str(phase.id))])
@_astify.register
def _(self, goal: Goal, achieved: bool = False) -> AstExpression:
return AstLiteral(f"{'achieved_' if achieved else ''}{self._slugify_str(goal.name)}")
@_astify.register
def _(self, trigger: Trigger) -> AstExpression:
return AstLiteral(self.slugify(trigger))
@_astify.register
def _(self, sa: SpeechAction) -> AstExpression:
return AstLiteral("say", [AstString(sa.text)])
@_astify.register
def _(self, ga: GestureAction) -> AstExpression:
gesture = ga.gesture
return AstLiteral("gesture", [AstString(gesture.type), AstString(gesture.name)])
@_astify.register
def _(self, la: LLMAction) -> AstExpression:
return AstLiteral("reply_with_goal", [AstString(la.goal)])
@singledispatchmethod
@staticmethod
def slugify(element: ProgramElement) -> str:
raise NotImplementedError(f"Cannot convert element {element} to a slug.")
@slugify.register
@staticmethod
def _(n: Norm) -> str:
return f"norm_{AgentSpeakGenerator._slugify_str(n.norm)}"
@slugify.register
@staticmethod
def _(sb: SemanticBelief) -> str:
return f"semantic_{AgentSpeakGenerator._slugify_str(sb.name)}"
@slugify.register
@staticmethod
def _(g: BaseGoal) -> str:
return AgentSpeakGenerator._slugify_str(g.name)
@slugify.register
@staticmethod
def _(t: Trigger):
return f"trigger_{AgentSpeakGenerator._slugify_str(t.name)}"
@staticmethod
def _slugify_str(text: str) -> str:
return slugify(text, separator="_", stopwords=["a", "an", "the", "we", "you", "I"])

View File

@@ -0,0 +1,67 @@
import logging
import agentspeak
from spade.behaviour import OneShotBehaviour
from spade.message import Message
from spade_bdi.bdi import BDIAgent
from control_backend.core.config import settings
from .behaviours.belief_setter import BeliefSetterBehaviour
from .behaviours.receive_llm_resp_behaviour import ReceiveLLMResponseBehaviour
class BDICoreAgent(BDIAgent):
"""
This is the Brain agent that does the belief inference with AgentSpeak.
This is a continous process that happens automatically in the background.
This class contains all the actions that can be called from AgentSpeak plans.
It has the BeliefSetter behaviour and can aks and recieve requests from the LLM agent.
"""
logger = logging.getLogger(__package__).getChild(__name__)
async def setup(self) -> None:
"""
Initializes belief behaviors and message routing.
"""
self.logger.info("BDICoreAgent setup started.")
self.add_behaviour(BeliefSetterBehaviour())
self.add_behaviour(ReceiveLLMResponseBehaviour())
self.logger.info("BDICoreAgent setup complete.")
def add_custom_actions(self, actions) -> None:
"""
Registers custom AgentSpeak actions callable from plans.
"""
@actions.add(".reply", 1)
def _reply(agent: "BDICoreAgent", term, intention):
"""
Sends text to the LLM (AgentSpeak action).
Example: .reply("Hello LLM!")
"""
message_text = agentspeak.grounded(term.args[0], intention.scope)
self.logger.debug("Reply action sending: %s", message_text)
self._send_to_llm(str(message_text))
yield
def _send_to_llm(self, text: str):
"""
Sends a text query to the LLM Agent asynchronously.
"""
class SendBehaviour(OneShotBehaviour):
async def run(self) -> None:
msg = Message(
to=settings.agent_settings.llm_agent_name + "@" + settings.agent_settings.host,
body=text,
)
await self.send(msg)
self.agent.logger.info("Message sent to LLM agent: %s", text)
self.add_behaviour(SendBehaviour())

View File

@@ -1,538 +0,0 @@
import asyncio
import copy
import json
import time
from collections.abc import Iterable
import agentspeak
import agentspeak.runtime
import agentspeak.stdlib
from pydantic import ValidationError
from control_backend.agents.base import BaseAgent
from control_backend.core.agent_system import InternalMessage
from control_backend.core.config import settings
from control_backend.schemas.belief_message import BeliefMessage
from control_backend.schemas.llm_prompt_message import LLMPromptMessage
from control_backend.schemas.ri_message import GestureCommand, RIEndpoint, SpeechCommand
DELIMITER = ";\n" # TODO: temporary until we support lists in AgentSpeak
class BDICoreAgent(BaseAgent):
"""
BDI Core Agent.
This is the central reasoning agent of the system, powered by the **AgentSpeak(L)** language.
It maintains a belief base (representing the state of the world) and a set of plans (rules).
It runs an internal BDI (Belief-Desire-Intention) cycle using the ``agentspeak`` library.
When beliefs change (e.g., via :meth:`_apply_beliefs`), the agent evaluates its plans to
determine the best course of action.
**Custom Actions:**
It defines custom actions (like ``.reply``) that allow the AgentSpeak code to interact with
external Python agents (e.g., querying the LLM).
:ivar bdi_agent: The internal AgentSpeak agent instance.
:ivar asl_file: Path to the AgentSpeak source file (.asl).
:ivar env: The AgentSpeak environment.
:ivar actions: A registry of custom actions available to the AgentSpeak code.
:ivar _wake_bdi_loop: Event used to wake up the reasoning loop when new beliefs arrive.
"""
bdi_agent: agentspeak.runtime.Agent
def __init__(self, name: str):
super().__init__(name)
self.env = agentspeak.runtime.Environment()
# Deep copy because we don't actually want to modify the standard actions globally
self.actions = copy.deepcopy(agentspeak.stdlib.actions)
self._wake_bdi_loop = asyncio.Event()
self._bdi_loop_task = None
async def setup(self) -> None:
"""
Initialize the BDI agent.
1. Registers custom actions (like ``.reply``).
2. Loads the .asl source file.
3. Starts the reasoning loop (:meth:`_bdi_loop`) in the background.
"""
self.logger.debug("Setup started.")
self._add_custom_actions()
await self._load_asl()
# Start the BDI cycle loop
self._bdi_loop_task = self.add_behavior(self._bdi_loop())
self._wake_bdi_loop.set()
self.logger.debug("Setup complete.")
async def _load_asl(self, file_name: str | None = None) -> None:
"""
Load and parse the AgentSpeak source file.
"""
file_name = file_name or "src/control_backend/agents/bdi/default_behavior.asl"
try:
with open(file_name) as source:
self.bdi_agent = self.env.build_agent(source, self.actions)
self.logger.info(f"Loaded new ASL from {file_name}.")
except FileNotFoundError:
self.logger.warning(f"Could not find the specified ASL file at {file_name}.")
self.bdi_agent = agentspeak.runtime.Agent(self.env, self.name)
async def _bdi_loop(self):
"""
The main BDI reasoning loop.
It waits for the ``_wake_bdi_loop`` event (set when beliefs change or actions complete).
When awake, it steps through the AgentSpeak interpreter. It also handles sleeping if
the agent has deferred intentions (deadlines).
"""
while self._running:
await (
self._wake_bdi_loop.wait()
) # gets set whenever there's an update to the belief base
# Agent knows when it's expected to have to do its next thing
maybe_more_work = True
while maybe_more_work:
maybe_more_work = False
if self.bdi_agent.step():
maybe_more_work = True
if not maybe_more_work:
deadline = self.bdi_agent.shortest_deadline()
if deadline:
await asyncio.sleep(deadline - time.time())
maybe_more_work = True
else:
self._wake_bdi_loop.clear()
self.logger.debug("No more deadlines. Halting BDI loop.")
async def handle_message(self, msg: InternalMessage):
"""
Handle incoming messages.
- **Beliefs**: Updates the internal belief base.
- **Program**: Updates the internal agentspeak file to match the current program.
- **LLM Responses**: Forwards the generated text to the Robot Speech Agent (actuation).
:param msg: The received internal message.
"""
self.logger.debug("Processing message from %s.", msg.sender)
if msg.thread == "beliefs":
try:
belief_changes = BeliefMessage.model_validate_json(msg.body)
self._apply_belief_changes(belief_changes)
except ValidationError:
self.logger.exception("Error processing belief.")
return
# New agentspeak file
if msg.thread == "new_program":
if self._bdi_loop_task:
self._bdi_loop_task.cancel()
await self._load_asl(msg.body)
self.add_behavior(self._bdi_loop())
# The message was not a belief, handle special cases based on sender
match msg.sender:
case settings.agent_settings.llm_name:
content = msg.body
self.logger.info("Received LLM response: %s", content)
# Forward to Robot Speech Agent
cmd = SpeechCommand(data=content)
out_msg = InternalMessage(
to=settings.agent_settings.robot_speech_name,
sender=self.name,
body=cmd.model_dump_json(),
)
await self.send(out_msg)
case settings.agent_settings.user_interrupt_name:
self.logger.debug("Received user interruption: %s", msg)
match msg.thread:
case "force_phase_transition":
self._set_goal("transition_phase")
case "force_trigger":
self._force_trigger(msg.body)
case "force_norm":
self._force_norm(msg.body)
case "force_next_phase":
self._force_next_phase()
case _:
self.logger.warning("Received unknown user interruption: %s", msg)
def _apply_belief_changes(self, belief_changes: BeliefMessage):
"""
Update the belief base with a list of new beliefs.
For beliefs in ``belief_changes.replace``, it removes all existing beliefs with that name
before adding one new one.
:param belief_changes: The changes in beliefs to apply.
"""
if not belief_changes.create and not belief_changes.replace and not belief_changes.delete:
return
for belief in belief_changes.create:
self._add_belief(belief.name, belief.arguments)
for belief in belief_changes.replace:
self._remove_all_with_name(belief.name)
self._add_belief(belief.name, belief.arguments)
for belief in belief_changes.delete:
self._remove_belief(belief.name, belief.arguments)
def _add_belief(self, name: str, args: list[str] = None):
"""
Add a single belief to the BDI agent.
:param name: The functor/name of the belief (e.g., "user_said").
:param args: Arguments for the belief.
"""
# new_args = (agentspeak.Literal(arg) for arg in args) # TODO: Eventually support multiple
args = args or []
if args:
merged_args = DELIMITER.join(arg for arg in args)
new_args = (agentspeak.Literal(merged_args),)
term = agentspeak.Literal(name, new_args)
else:
term = agentspeak.Literal(name)
self.bdi_agent.call(
agentspeak.Trigger.addition,
agentspeak.GoalType.belief,
term,
agentspeak.runtime.Intention(),
)
# Check for transitions
self.bdi_agent.call(
agentspeak.Trigger.addition,
agentspeak.GoalType.achievement,
agentspeak.Literal("transition_phase"),
agentspeak.runtime.Intention(),
)
# Check triggers
self.bdi_agent.call(
agentspeak.Trigger.addition,
agentspeak.GoalType.achievement,
agentspeak.Literal("check_triggers"),
agentspeak.runtime.Intention(),
)
self._wake_bdi_loop.set()
self.logger.debug(f"Added belief {self.format_belief_string(name, args)}")
def _remove_belief(self, name: str, args: Iterable[str] | None):
"""
Removes a specific belief (with arguments), if it exists.
"""
if args is None:
term = agentspeak.Literal(name)
else:
new_args = (agentspeak.Literal(arg) for arg in args)
term = agentspeak.Literal(name, new_args)
result = self.bdi_agent.call(
agentspeak.Trigger.removal,
agentspeak.GoalType.belief,
term,
agentspeak.runtime.Intention(),
)
if result:
self.logger.debug(f"Removed belief {self.format_belief_string(name, args)}")
self._wake_bdi_loop.set()
else:
self.logger.debug("Failed to remove belief (it was not in the belief base).")
def _remove_all_with_name(self, name: str):
"""
Removes all beliefs that match the given `name`.
"""
relevant_groups = []
for key in self.bdi_agent.beliefs:
if key[0] == name:
relevant_groups.append(key)
removed_count = 0
for group in relevant_groups:
beliefs_to_remove = list(self.bdi_agent.beliefs[group])
for belief in beliefs_to_remove:
self.bdi_agent.call(
agentspeak.Trigger.removal,
agentspeak.GoalType.belief,
belief,
agentspeak.runtime.Intention(),
)
removed_count += 1
self._wake_bdi_loop.set()
self.logger.debug(f"Removed {removed_count} beliefs.")
def _set_goal(self, name: str, args: Iterable[str] | None = None):
args = args or []
if args:
merged_args = DELIMITER.join(arg for arg in args)
new_args = (agentspeak.Literal(merged_args),)
term = agentspeak.Literal(name, new_args)
else:
term = agentspeak.Literal(name)
self.bdi_agent.call(
agentspeak.Trigger.addition,
agentspeak.GoalType.achievement,
term,
agentspeak.runtime.Intention(),
)
self._wake_bdi_loop.set()
self.logger.debug(f"Set goal !{self.format_belief_string(name, args)}.")
def _force_trigger(self, name: str):
self._set_goal(name)
self.logger.info("Manually forced trigger %s.", name)
# TODO: make this compatible for critical norms
def _force_norm(self, name: str):
self._add_belief(f"force_{name}")
self.logger.info("Manually forced norm %s.", name)
def _force_next_phase(self):
self._set_goal("force_transition_phase")
self.logger.info("Manually forced phase transition.")
def _add_custom_actions(self) -> None:
"""
Add any custom actions here. Inside `@self.actions.add()`, the first argument is
the name of the function in the ASL file, and the second the amount of arguments
the function expects (which will be located in `term.args`).
"""
@self.actions.add(".reply", 2)
def _reply(agent, term, intention):
"""
Let the LLM generate a response to a user's utterance with the current norms and goals.
"""
message_text = agentspeak.grounded(term.args[0], intention.scope)
norms = agentspeak.grounded(term.args[1], intention.scope)
self.add_behavior(self._send_to_llm(str(message_text), str(norms), ""))
yield
@self.actions.add(".reply_with_goal", 3)
def _reply_with_goal(agent: "BDICoreAgent", term, intention):
"""
Let the LLM generate a response to a user's utterance with the current norms and a
specific goal.
"""
message_text = agentspeak.grounded(term.args[0], intention.scope)
norms = agentspeak.grounded(term.args[1], intention.scope)
goal = agentspeak.grounded(term.args[2], intention.scope)
self.add_behavior(self._send_to_llm(str(message_text), str(norms), str(goal)))
yield
@self.actions.add(".notify_norms", 1)
def _notify_norms(agent, term, intention):
norms = agentspeak.grounded(term.args[0], intention.scope)
norm_update_message = InternalMessage(
to=settings.agent_settings.user_interrupt_name,
thread="active_norms_update",
body=str(norms),
)
self.add_behavior(self.send(norm_update_message, should_log=False))
yield
@self.actions.add(".say", 1)
def _say(agent, term, intention):
"""
Make the robot say the given text instantly.
"""
message_text = agentspeak.grounded(term.args[0], intention.scope)
self.logger.debug('"say" action called with text=%s', message_text)
speech_command = SpeechCommand(data=message_text)
speech_message = InternalMessage(
to=settings.agent_settings.robot_speech_name,
sender=settings.agent_settings.bdi_core_name,
body=speech_command.model_dump_json(),
)
self.add_behavior(self.send(speech_message))
chat_history_message = InternalMessage(
to=settings.agent_settings.llm_name,
thread="assistant_message",
body=str(message_text),
)
self.add_behavior(self.send(chat_history_message))
yield
@self.actions.add(".gesture", 2)
def _gesture(agent, term, intention):
"""
Make the robot perform the given gesture instantly.
"""
gesture_type = agentspeak.grounded(term.args[0], intention.scope)
gesture_name = agentspeak.grounded(term.args[1], intention.scope)
self.logger.debug(
'"gesture" action called with type=%s, name=%s',
gesture_type,
gesture_name,
)
if str(gesture_type) == "single":
endpoint = RIEndpoint.GESTURE_SINGLE
elif str(gesture_type) == "tag":
endpoint = RIEndpoint.GESTURE_TAG
else:
self.logger.warning("Gesture type %s could not be resolved.", gesture_type)
endpoint = RIEndpoint.GESTURE_SINGLE
gesture_command = GestureCommand(endpoint=endpoint, data=gesture_name)
gesture_message = InternalMessage(
to=settings.agent_settings.robot_gesture_name,
sender=settings.agent_settings.bdi_core_name,
body=gesture_command.model_dump_json(),
)
self.add_behavior(self.send(gesture_message))
yield
@self.actions.add(".notify_user_said", 1)
def _notify_user_said(agent, term, intention):
user_said = agentspeak.grounded(term.args[0], intention.scope)
msg = InternalMessage(
to=settings.agent_settings.llm_name, thread="user_message", body=str(user_said)
)
self.add_behavior(self.send(msg))
yield
@self.actions.add(".notify_trigger_start", 1)
def _notify_trigger_start(agent, term, intention):
"""
Notify the UI about the trigger we just started doing.
"""
trigger_name = agentspeak.grounded(term.args[0], intention.scope)
self.logger.debug("Started trigger %s", trigger_name)
msg = InternalMessage(
to=settings.agent_settings.user_interrupt_name,
sender=self.name,
thread="trigger_start",
body=str(trigger_name),
)
# TODO: check with Pim
self.add_behavior(self.send(msg))
yield
@self.actions.add(".notify_trigger_end", 1)
def _notify_trigger_end(agent, term, intention):
"""
Notify the UI about the trigger we just started doing.
"""
trigger_name = agentspeak.grounded(term.args[0], intention.scope)
self.logger.debug("Finished trigger %s", trigger_name)
msg = InternalMessage(
to=settings.agent_settings.user_interrupt_name,
sender=self.name,
thread="trigger_end",
body=str(trigger_name),
)
self.add_behavior(self.send(msg))
yield
@self.actions.add(".notify_goal_start", 1)
def _notify_goal_start(agent, term, intention):
"""
Notify the UI about the goal we just started chasing.
"""
goal_name = agentspeak.grounded(term.args[0], intention.scope)
self.logger.debug("Started chasing goal %s", goal_name)
msg = InternalMessage(
to=settings.agent_settings.user_interrupt_name,
sender=self.name,
thread="goal_start",
body=str(goal_name),
)
self.add_behavior(self.send(msg))
yield
@self.actions.add(".notify_transition_phase", 2)
def _notify_transition_phase(agent, term, intention):
"""
Notify the BDI program manager about a phase transition.
"""
old = agentspeak.grounded(term.args[0], intention.scope)
new = agentspeak.grounded(term.args[1], intention.scope)
msg = InternalMessage(
to=settings.agent_settings.bdi_program_manager_name,
thread="transition_phase",
body=json.dumps({"old": str(old), "new": str(new)}),
)
self.add_behavior(self.send(msg))
yield
@self.actions.add(".notify_ui", 0)
def _notify_ui(agent, term, intention):
pass
async def _send_to_llm(self, text: str, norms: str, goals: str):
"""
Sends a text query to the LLM agent asynchronously.
"""
prompt = LLMPromptMessage(text=text, norms=norms.split("\n"), goals=goals.split("\n"))
msg = InternalMessage(
to=settings.agent_settings.llm_name,
sender=self.name,
body=prompt.model_dump_json(),
thread="prompt_message",
)
await self.send(msg)
self.logger.info("Message sent to LLM agent: %s", text)
@staticmethod
def format_belief_string(name: str, args: Iterable[str] | None = []):
"""
Given a belief's name and its args, return a string of the form "name(*args)"
"""
return f"{name}{'(' if args else ''}{','.join(args or [])}{')' if args else ''}"

View File

@@ -1,301 +0,0 @@
import asyncio
import json
import zmq
from pydantic import ValidationError
from zmq.asyncio import Context
from control_backend.agents import BaseAgent
from control_backend.agents.bdi.agentspeak_generator import AgentSpeakGenerator
from control_backend.core.config import settings
from control_backend.schemas.belief_list import BeliefList, GoalList
from control_backend.schemas.internal_message import InternalMessage
from control_backend.schemas.program import (
Belief,
ConditionalNorm,
Goal,
InferredBelief,
Phase,
Program,
)
class BDIProgramManager(BaseAgent):
"""
BDI Program Manager Agent.
This agent is responsible for receiving high-level programs (sequences of instructions/goals)
from the external HTTP API (via ZMQ) and translating them into core beliefs (norms and goals)
for the BDI Core Agent. In the future, it will be responsible for determining when goals are
met, and passing on new norms and goals accordingly.
:ivar sub_socket: The ZMQ SUB socket used to receive program updates.
"""
_program: Program
_phase: Phase | None
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.sub_socket = None
def _initialize_internal_state(self, program: Program):
self._program = program
self._phase = program.phases[0] # start in first phase
self._goal_mapping: dict[str, Goal] = {}
for phase in program.phases:
for goal in phase.goals:
self._populate_goal_mapping_with_goal(goal)
def _populate_goal_mapping_with_goal(self, goal: Goal):
self._goal_mapping[str(goal.id)] = goal
for step in goal.plan.steps:
if isinstance(step, Goal):
self._populate_goal_mapping_with_goal(step)
async def _create_agentspeak_and_send_to_bdi(self, program: Program):
"""
Convert a received program into an AgentSpeak file and send it to the BDI Core Agent.
:param program: The program object received from the API.
"""
asg = AgentSpeakGenerator()
asl_str = asg.generate(program)
file_name = "src/control_backend/agents/bdi/agentspeak.asl"
with open(file_name, "w") as f:
f.write(asl_str)
msg = InternalMessage(
sender=self.name,
to=settings.agent_settings.bdi_core_name,
body=file_name,
thread="new_program",
)
await self.send(msg)
async def handle_message(self, msg: InternalMessage):
match msg.thread:
case "transition_phase":
phases = json.loads(msg.body)
await self._transition_phase(phases["old"], phases["new"])
case "achieve_goal":
goal_id = msg.body
await self._send_achieved_goal_to_semantic_belief_extractor(goal_id)
async def _transition_phase(self, old: str, new: str):
if old != str(self._phase.id):
self.logger.warning(
f"Phase transition desync detected! ASL requested move from '{old}', "
f"but Python is currently in '{self._phase.id}'. Request ignored."
)
return
if new == "end":
self._phase = None
# Notify user interaction agent
msg = InternalMessage(
to=settings.agent_settings.user_interrupt_name,
thread="transition_phase",
body="end",
)
self.logger.info("Transitioned to end phase, notifying UserInterruptAgent.")
self.add_behavior(self.send(msg))
return
for phase in self._program.phases:
if str(phase.id) == new:
self._phase = phase
await self._send_beliefs_to_semantic_belief_extractor()
await self._send_goals_to_semantic_belief_extractor()
# Notify user interaction agent
msg = InternalMessage(
to=settings.agent_settings.user_interrupt_name,
thread="transition_phase",
body=str(self._phase.id),
)
self.logger.info(f"Transitioned to phase {new}, notifying UserInterruptAgent.")
self.add_behavior(self.send(msg))
def _extract_current_beliefs(self) -> list[Belief]:
beliefs: list[Belief] = []
for norm in self._phase.norms:
if isinstance(norm, ConditionalNorm):
beliefs += self._extract_beliefs_from_belief(norm.condition)
for trigger in self._phase.triggers:
beliefs += self._extract_beliefs_from_belief(trigger.condition)
return beliefs
@staticmethod
def _extract_beliefs_from_belief(belief: Belief) -> list[Belief]:
if isinstance(belief, InferredBelief):
return BDIProgramManager._extract_beliefs_from_belief(
belief.left
) + BDIProgramManager._extract_beliefs_from_belief(belief.right)
return [belief]
async def _send_beliefs_to_semantic_belief_extractor(self):
"""
Extract beliefs from the program and send them to the Semantic Belief Extractor Agent.
"""
beliefs = BeliefList(beliefs=self._extract_current_beliefs())
message = InternalMessage(
to=settings.agent_settings.text_belief_extractor_name,
sender=self.name,
body=beliefs.model_dump_json(),
thread="beliefs",
)
await self.send(message)
@staticmethod
def _extract_goals_from_goal(goal: Goal) -> list[Goal]:
"""
Extract all goals from a given goal, that is: the goal itself and any subgoals.
:return: All goals within and including the given goal.
"""
goals: list[Goal] = [goal]
for plan in goal.plan:
if isinstance(plan, Goal):
goals.extend(BDIProgramManager._extract_goals_from_goal(plan))
return goals
def _extract_current_goals(self) -> list[Goal]:
"""
Extract all goals from the program, including subgoals.
:return: A list of Goal objects.
"""
goals: list[Goal] = []
for goal in self._phase.goals:
goals.extend(self._extract_goals_from_goal(goal))
return goals
async def _send_goals_to_semantic_belief_extractor(self):
"""
Extract goals for the current phase and send them to the Semantic Belief Extractor Agent.
"""
goals = GoalList(goals=self._extract_current_goals())
message = InternalMessage(
to=settings.agent_settings.text_belief_extractor_name,
sender=self.name,
body=goals.model_dump_json(),
thread="goals",
)
await self.send(message)
async def _send_achieved_goal_to_semantic_belief_extractor(self, achieved_goal_id: str):
"""
Inform the semantic belief extractor when a goal is marked achieved.
:param achieved_goal_id: The id of the achieved goal.
"""
goal = self._goal_mapping.get(achieved_goal_id)
if goal is None:
self.logger.debug(f"Goal with ID {achieved_goal_id} marked achieved but was not found.")
return
goals = self._extract_goals_from_goal(goal)
message = InternalMessage(
to=settings.agent_settings.text_belief_extractor_name,
body=GoalList(goals=goals).model_dump_json(),
thread="achieved_goals",
)
await self.send(message)
async def _send_clear_llm_history(self):
"""
Clear the LLM Agent's conversation history.
Sends an empty history to the LLM Agent to reset its state.
"""
message = InternalMessage(
to=settings.agent_settings.llm_name,
body="clear_history",
)
await self.send(message)
self.logger.debug("Sent message to LLM agent to clear history.")
extractor_msg = InternalMessage(
to=settings.agent_settings.text_belief_extractor_name,
thread="conversation_history",
body="reset",
)
await self.send(extractor_msg)
self.logger.debug("Sent message to extractor agent to clear history.")
async def _receive_programs(self):
"""
Continuous loop that receives program updates from the HTTP endpoint.
It listens to the ``program`` topic on the internal ZMQ SUB socket.
When a program is received, it is validated and forwarded to BDI via :meth:`_send_to_bdi`.
Additionally, the LLM history is cleared via :meth:`_send_clear_llm_history`.
"""
while True:
topic, body = await self.sub_socket.recv_multipart()
try:
program = Program.model_validate_json(body)
except ValidationError:
self.logger.warning("Received an invalid program.")
continue
self._initialize_internal_state(program)
await self._send_program_to_user_interrupt(program)
await self._send_clear_llm_history()
await asyncio.gather(
self._create_agentspeak_and_send_to_bdi(program),
self._send_beliefs_to_semantic_belief_extractor(),
self._send_goals_to_semantic_belief_extractor(),
)
async def _send_program_to_user_interrupt(self, program: Program):
"""
Send the received program to the User Interrupt Agent.
:param program: The program object received from the API.
"""
msg = InternalMessage(
sender=self.name,
to=settings.agent_settings.user_interrupt_name,
body=program.model_dump_json(),
thread="new_program",
)
await self.send(msg)
async def setup(self):
"""
Initialize the agent.
Connects the internal ZMQ SUB socket and subscribes to the 'program' topic.
Starts the background behavior to receive programs. Initializes a default program.
"""
await self._create_agentspeak_and_send_to_bdi(Program(phases=[]))
context = Context.instance()
self.sub_socket = context.socket(zmq.SUB)
self.sub_socket.connect(settings.zmq_settings.internal_sub_address)
self.sub_socket.subscribe("program")
self.add_behavior(self._receive_programs())

View File

@@ -0,0 +1,87 @@
import json
from spade.agent import Message
from spade.behaviour import CyclicBehaviour
from spade_bdi.bdi import BDIAgent
from control_backend.core.config import settings
class BeliefSetterBehaviour(CyclicBehaviour):
"""
This is the behaviour that the BDI agent runs. This behaviour waits for incoming
message and updates the agent's beliefs accordingly.
"""
agent: BDIAgent
async def run(self):
"""Polls for messages and processes them."""
msg = await self.receive(timeout=1)
if not msg:
return
self.agent.logger.debug(
"Received message from %s with thread '%s' and body: %s",
msg.sender,
msg.thread,
msg.body,
)
self._process_message(msg)
def _process_message(self, message: Message):
"""Routes the message to the correct processing function based on the sender."""
sender = message.sender.node # removes host from jid and converts to str
self.agent.logger.debug("Processing message from sender: %s", sender)
match sender:
case settings.agent_settings.belief_collector_agent_name:
self.agent.logger.debug(
"Message is from the belief collector agent. Processing as belief message."
)
self._process_belief_message(message)
case _:
self.agent.logger.debug("Not the belief agent, discarding message")
pass
def _process_belief_message(self, message: Message):
if not message.body:
self.agent.logger.debug("Ignoring message with empty body from %s", message.sender.node)
return
match message.thread:
case "beliefs":
try:
beliefs: dict[str, list[str]] = json.loads(message.body)
self._set_beliefs(beliefs)
except json.JSONDecodeError:
self.agent.logger.error(
"Could not decode beliefs from JSON. Message body: '%s'",
message.body,
exc_info=True,
)
case _:
pass
def _set_beliefs(self, beliefs: dict[str, list[str]]):
"""Removes previous values for beliefs and updates them with the provided values."""
if self.agent.bdi is None:
self.agent.logger.warning("Cannot set beliefs; agent's BDI is not yet initialized.")
return
if not beliefs:
self.agent.logger.debug("Received an empty set of beliefs. No beliefs were updated.")
return
# Set new beliefs (outdated beliefs are automatically removed)
for belief, arguments in beliefs.items():
self.agent.logger.debug("Setting belief %s with arguments %s", belief, arguments)
self.agent.bdi.set_belief(belief, *arguments)
# Special case: if there's a new user message, flag that we haven't responded yet
if belief == "user_said":
self.agent.bdi.set_belief("new_message")
self.agent.logger.debug(
"Detected 'user_said' belief, also setting 'new_message' belief."
)
self.agent.logger.info("Successfully updated %d beliefs.", len(beliefs))

View File

@@ -0,0 +1,39 @@
from spade.behaviour import CyclicBehaviour
from spade.message import Message
from control_backend.core.config import settings
from control_backend.schemas.ri_message import SpeechCommand
class ReceiveLLMResponseBehaviour(CyclicBehaviour):
"""
Adds behavior to receive responses from the LLM Agent.
"""
async def run(self):
msg = await self.receive(timeout=1)
if not msg:
return
sender = msg.sender.node
match sender:
case settings.agent_settings.llm_agent_name:
content = msg.body
self.agent.logger.info("Received LLM response: %s", content)
speech_command = SpeechCommand(data=content)
message = Message(
to=settings.agent_settings.ri_command_agent_name
+ "@"
+ settings.agent_settings.host,
sender=self.agent.jid,
body=speech_command.model_dump_json(),
)
self.agent.logger.debug("Sending message: %s", message)
await self.send(message)
case _:
self.agent.logger.debug("Discarding message from %s", sender)
pass

View File

@@ -0,0 +1,104 @@
import json
import logging
from spade.behaviour import CyclicBehaviour
from spade.message import Message
from control_backend.core.config import settings
class BeliefFromText(CyclicBehaviour):
logger = logging.getLogger(__name__)
# TODO: LLM prompt nog hardcoded
llm_instruction_prompt = """
You are an information extraction assistent for a BDI agent. Your task is to extract values \
from a user's text to bind a list of ungrounded beliefs. Rules:
You will receive a JSON object with "beliefs" (a list of ungrounded AgentSpeak beliefs) \
and "text" (user's transcript).
Analyze the text to find values that sematically match the variables (X,Y,Z) in the beliefs.
A single piece of text might contain multiple instances that match a belief.
Respond ONLY with a single JSON object.
The JSON object's keys should be the belief functors (e.g., "weather").
The value for each key must be a list of lists.
Each inner list must contain the extracted arguments (as strings) for one instance \
of that belief.
CRITICAL: If no information in the text matches a belief, DO NOT include that key \
in your response.
"""
# on_start agent receives message containing the beliefs to look out for and
# sets up the LLM with instruction prompt
# async def on_start(self):
# msg = await self.receive(timeout=0.1)
# self.beliefs = dict uit message
# send instruction prompt to LLM
beliefs: dict[str, list[str]]
beliefs = {"mood": ["X"], "car": ["Y"]}
async def run(self):
msg = await self.receive(timeout=1)
if not msg:
return
sender = msg.sender.node
match sender:
case settings.agent_settings.transcription_agent_name:
self.logger.debug("Received text from transcriber: %s", msg.body)
await self._process_transcription_demo(msg.body)
case _:
self.logger.info("Discarding message from %s", sender)
pass
async def _process_transcription(self, text: str):
text_prompt = f"Text: {text}"
beliefs_prompt = "These are the beliefs to be bound:\n"
for belief, values in self.beliefs.items():
beliefs_prompt += f"{belief}({', '.join(values)})\n"
prompt = text_prompt + beliefs_prompt
self.logger.info(prompt)
# prompt_msg = Message(to="LLMAgent@whatever")
# response = self.send(prompt_msg)
# Mock response; response is beliefs in JSON format, it parses do dict[str,list[list[str]]]
response = '{"mood": [["happy"]]}'
# Verify by trying to parse
try:
json.loads(response)
belief_message = Message()
belief_message.to = (
settings.agent_settings.belief_collector_agent_name
+ "@"
+ settings.agent_settings.host
)
belief_message.body = response
belief_message.thread = "beliefs"
await self.send(belief_message)
self.agent.logger.info("Sent beliefs to BDI.")
except json.JSONDecodeError:
# Parsing failed, so the response is in the wrong format, log warning
self.agent.logger.warning("Received LLM response in incorrect format.")
async def _process_transcription_demo(self, txt: str):
"""
Demo version to process the transcription input to beliefs. For the demo only the belief
'user_said' is relevant, so this function simply makes a dict with key: "user_said",
value: txt and passes this to the Belief Collector agent.
"""
belief = {"beliefs": {"user_said": [txt]}, "type": "belief_extraction_text"}
payload = json.dumps(belief)
belief_msg = Message()
belief_msg.to = (
settings.agent_settings.belief_collector_agent_name + "@" + settings.agent_settings.host
)
belief_msg.body = payload
belief_msg.thread = "beliefs"
await self.send(belief_msg)
self.logger.info("Sent %d beliefs to the belief collector.", len(belief["beliefs"]))

View File

@@ -1,34 +0,0 @@
phase("end").
keyword_said(Keyword) :- (user_said(Message) & .substring(Keyword, Message, Pos)) & (Pos >= 0).
+!reply_with_goal(Goal)
: user_said(Message)
<- +responded_this_turn;
.findall(Norm, norm(Norm), Norms);
.reply_with_goal(Message, Norms, Goal).
+!say(Text)
<- +responded_this_turn;
.say(Text).
+!reply
: user_said(Message)
<- +responded_this_turn;
.findall(Norm, norm(Norm), Norms);
.reply(Message, Norms).
+!notify_cycle
<- .notify_ui;
.wait(1).
+user_said(Message)
: phase("end")
<- .notify_user_said(Message);
!reply.
+!check_triggers
<- true.
+!transition_phase
<- true.

View File

@@ -0,0 +1,3 @@
+new_message : user_said(Message) <-
-new_message;
.reply(Message).

View File

@@ -1,503 +0,0 @@
import asyncio
import json
import httpx
from pydantic import BaseModel, ValidationError
from control_backend.agents.base import BaseAgent
from control_backend.agents.bdi.agentspeak_generator import AgentSpeakGenerator
from control_backend.core.agent_system import InternalMessage
from control_backend.core.config import settings
from control_backend.schemas.belief_list import BeliefList, GoalList
from control_backend.schemas.belief_message import Belief as InternalBelief
from control_backend.schemas.belief_message import BeliefMessage
from control_backend.schemas.chat_history import ChatHistory, ChatMessage
from control_backend.schemas.program import BaseGoal, SemanticBelief
type JSONLike = None | bool | int | float | str | list["JSONLike"] | dict[str, "JSONLike"]
class BeliefState(BaseModel):
true: set[InternalBelief] = set()
false: set[InternalBelief] = set()
def difference(self, other: "BeliefState") -> "BeliefState":
return BeliefState(
true=self.true - other.true,
false=self.false - other.false,
)
def union(self, other: "BeliefState") -> "BeliefState":
return BeliefState(
true=self.true | other.true,
false=self.false | other.false,
)
def __sub__(self, other):
return self.difference(other)
def __or__(self, other):
return self.union(other)
def __bool__(self):
return bool(self.true) or bool(self.false)
class TextBeliefExtractorAgent(BaseAgent):
"""
Text Belief Extractor Agent.
This agent is responsible for processing raw text (e.g., from speech transcription) and
extracting semantic beliefs from it.
It uses the available beliefs received from the program manager to try to extract beliefs from a
user's message, sends and updated beliefs to the BDI core, and forms a ``user_said`` belief from
the message itself.
"""
def __init__(self, name: str):
super().__init__(name)
self._llm = self.LLM(self, settings.llm_settings.n_parallel)
self.belief_inferrer = SemanticBeliefInferrer(self._llm)
self.goal_inferrer = GoalAchievementInferrer(self._llm)
self._current_beliefs = BeliefState()
self._current_goal_completions: dict[str, bool] = {}
self._force_completed_goals: set[BaseGoal] = set()
self.conversation = ChatHistory(messages=[])
async def setup(self):
"""
Initialize the agent and its resources.
"""
self.logger.info("Setting up %s.", self.name)
async def handle_message(self, msg: InternalMessage):
"""
Handle incoming messages. Expect messages from the Transcriber agent, LLM agent, and the
Program manager agent.
:param msg: The received message.
"""
sender = msg.sender
match sender:
case settings.agent_settings.transcription_name:
self.logger.debug("Received text from transcriber: %s", msg.body)
self._apply_conversation_message(ChatMessage(role="user", content=msg.body))
await self._user_said(msg.body)
await self._infer_new_beliefs()
await self._infer_goal_completions()
case settings.agent_settings.llm_name:
self.logger.debug("Received text from LLM: %s", msg.body)
self._apply_conversation_message(ChatMessage(role="assistant", content=msg.body))
case settings.agent_settings.bdi_program_manager_name:
await self._handle_program_manager_message(msg)
case _:
self.logger.info("Discarding message from %s", sender)
return
def _apply_conversation_message(self, message: ChatMessage):
"""
Save the chat message to our conversation history, taking into account the conversation
length limit.
:param message: The chat message to add to the conversation history.
"""
length_limit = settings.behaviour_settings.conversation_history_length_limit
self.conversation.messages = (self.conversation.messages + [message])[-length_limit:]
async def _handle_program_manager_message(self, msg: InternalMessage):
"""
Handle a message from the program manager: extract available beliefs and goals from it.
:param msg: The received message from the program manager.
"""
match msg.thread:
case "beliefs":
self._handle_beliefs_message(msg)
await self._infer_new_beliefs()
case "goals":
self._handle_goals_message(msg)
await self._infer_goal_completions()
case "achieved_goals":
self._handle_goal_achieved_message(msg)
case "conversation_history":
if msg.body == "reset":
self._reset_phase()
case _:
self.logger.warning("Received unexpected message from %s", msg.sender)
def _reset_phase(self):
self.conversation = ChatHistory(messages=[])
self.belief_inferrer.available_beliefs.clear()
self._current_beliefs = BeliefState()
self.goal_inferrer.goals.clear()
self._current_goal_completions = {}
def _handle_beliefs_message(self, msg: InternalMessage):
try:
belief_list = BeliefList.model_validate_json(msg.body)
except ValidationError:
self.logger.warning(
"Received message from program manager but it is not a valid list of beliefs."
)
return
available_beliefs = [b for b in belief_list.beliefs if isinstance(b, SemanticBelief)]
self.belief_inferrer.available_beliefs = available_beliefs
self.logger.debug(
"Received %d semantic beliefs from the program manager: %s",
len(available_beliefs),
", ".join(b.name for b in available_beliefs),
)
def _handle_goals_message(self, msg: InternalMessage):
try:
goals_list = GoalList.model_validate_json(msg.body)
except ValidationError:
self.logger.warning(
"Received message from program manager but it is not a valid list of goals."
)
return
# Use only goals that can fail, as the others are always assumed to be completed
available_goals = {g for g in goals_list.goals if g.can_fail}
available_goals -= self._force_completed_goals
self.goal_inferrer.goals = available_goals
self.logger.debug(
"Received %d failable goals from the program manager: %s",
len(available_goals),
", ".join(g.name for g in available_goals),
)
def _handle_goal_achieved_message(self, msg: InternalMessage):
# NOTE: When goals can be marked unachieved, remember to re-add them to the goal_inferrer
try:
goals_list = GoalList.model_validate_json(msg.body)
except ValidationError:
self.logger.warning(
"Received goal achieved message from the program manager, "
"but it is not a valid list of goals."
)
return
for goal in goals_list.goals:
self._force_completed_goals.add(goal)
self._current_goal_completions[f"achieved_{AgentSpeakGenerator.slugify(goal)}"] = True
self.goal_inferrer.goals -= self._force_completed_goals
async def _user_said(self, text: str):
"""
Create a belief for the user's full speech.
:param text: User's transcribed text.
"""
belief_msg = InternalMessage(
to=settings.agent_settings.bdi_core_name,
sender=self.name,
body=BeliefMessage(
replace=[InternalBelief(name="user_said", arguments=[text])],
).model_dump_json(),
thread="beliefs",
)
await self.send(belief_msg)
async def _infer_new_beliefs(self):
conversation_beliefs = await self.belief_inferrer.infer_from_conversation(self.conversation)
new_beliefs = conversation_beliefs - self._current_beliefs
if not new_beliefs:
self.logger.debug("No new beliefs detected.")
return
self._current_beliefs |= new_beliefs
belief_changes = BeliefMessage(
create=list(new_beliefs.true),
delete=list(new_beliefs.false),
)
message = InternalMessage(
to=settings.agent_settings.bdi_core_name,
sender=self.name,
body=belief_changes.model_dump_json(),
thread="beliefs",
)
await self.send(message)
async def _infer_goal_completions(self):
goal_completions = await self.goal_inferrer.infer_from_conversation(self.conversation)
new_achieved = [
InternalBelief(name=goal, arguments=None)
for goal, achieved in goal_completions.items()
if achieved and self._current_goal_completions.get(goal) != achieved
]
new_not_achieved = [
InternalBelief(name=goal, arguments=None)
for goal, achieved in goal_completions.items()
if not achieved and self._current_goal_completions.get(goal) != achieved
]
for goal, achieved in goal_completions.items():
self._current_goal_completions[goal] = achieved
if not new_achieved and not new_not_achieved:
self.logger.debug("No goal achievement changes detected.")
return
belief_changes = BeliefMessage(
create=new_achieved,
delete=new_not_achieved,
)
message = InternalMessage(
to=settings.agent_settings.bdi_core_name,
sender=self.name,
body=belief_changes.model_dump_json(),
thread="beliefs",
)
await self.send(message)
class LLM:
"""
Class that handles sending structured generation requests to an LLM.
"""
def __init__(self, agent: "TextBeliefExtractorAgent", n_parallel: int):
self._agent = agent
self._semaphore = asyncio.Semaphore(n_parallel)
async def query(self, prompt: str, schema: dict, tries: int = 3) -> JSONLike | None:
"""
Query the LLM with the given prompt and schema, return an instance of a dict conforming
to this schema. Try ``tries`` times, or return None.
:param prompt: Prompt to be queried.
:param schema: Schema to be queried.
:param tries: Number of times to try to query the LLM.
:return: An instance of a dict conforming to this schema, or None if failed.
"""
try_count = 0
while try_count < tries:
try_count += 1
try:
return await self._query_llm(prompt, schema)
except (httpx.HTTPError, json.JSONDecodeError, KeyError) as e:
if try_count < tries:
continue
self._agent.logger.exception(
"Failed to get LLM response after %d tries.",
try_count,
exc_info=e,
)
return None
async def _query_llm(self, prompt: str, schema: dict) -> JSONLike:
"""
Query an LLM with the given prompt and schema, return an instance of a dict conforming
to that schema.
:param prompt: The prompt to be queried.
:param schema: Schema to use during response.
:return: A dict conforming to this schema.
:raises httpx.HTTPStatusError: If the LLM server responded with an error.
:raises json.JSONDecodeError: If the LLM response was not valid JSON. May happen if the
response was cut off early due to length limitations.
:raises KeyError: If the LLM server responded with no error, but the response was
invalid.
"""
async with self._semaphore:
async with httpx.AsyncClient() as client:
response = await client.post(
settings.llm_settings.local_llm_url,
json={
"model": settings.llm_settings.local_llm_model,
"messages": [{"role": "user", "content": prompt}],
"response_format": {
"type": "json_schema",
"json_schema": {
"name": "Beliefs",
"strict": True,
"schema": schema,
},
},
"reasoning_effort": "low",
"temperature": settings.llm_settings.code_temperature,
"stream": False,
},
timeout=30.0,
)
response.raise_for_status()
response_json = response.json()
json_message = response_json["choices"][0]["message"]["content"]
return json.loads(json_message)
class SemanticBeliefInferrer:
"""
Class that handles only prompting an LLM for semantic beliefs.
"""
def __init__(
self,
llm: "TextBeliefExtractorAgent.LLM",
available_beliefs: list[SemanticBelief] | None = None,
):
self._llm = llm
self.available_beliefs: list[SemanticBelief] = available_beliefs or []
async def infer_from_conversation(self, conversation: ChatHistory) -> BeliefState:
"""
Process conversation history to extract beliefs, semantically. The result is an object that
describes all beliefs that hold or don't hold based on the full conversation.
:param conversation: The conversation history to be processed.
:return: An object that describes beliefs.
"""
# Return instantly if there are no beliefs to infer
if not self.available_beliefs:
return BeliefState()
n_parallel = max(1, min(settings.llm_settings.n_parallel - 1, len(self.available_beliefs)))
all_beliefs: list[dict[str, bool | None] | None] = await asyncio.gather(
*[
self._infer_beliefs(conversation, beliefs)
for beliefs in self._split_into_chunks(self.available_beliefs, n_parallel)
]
)
retval = BeliefState()
for beliefs in all_beliefs:
if beliefs is None:
continue
for belief_name, belief_holds in beliefs.items():
if belief_holds is None:
continue
belief = InternalBelief(name=belief_name, arguments=None)
if belief_holds:
retval.true.add(belief)
else:
retval.false.add(belief)
return retval
@staticmethod
def _split_into_chunks[T](items: list[T], n: int) -> list[list[T]]:
"""
Split a list into ``n`` chunks, making each chunk approximately ``len(items) / n`` long.
:param items: The list of items to split.
:param n: The number of desired chunks.
:return: A list of chunks each approximately ``len(items) / n`` long.
"""
k, m = divmod(len(items), n)
return [items[i * k + min(i, m) : (i + 1) * k + min(i + 1, m)] for i in range(n)]
async def _infer_beliefs(
self,
conversation: ChatHistory,
beliefs: list[SemanticBelief],
) -> dict[str, bool | None] | None:
"""
Infer given beliefs based on the given conversation.
:param conversation: The conversation to infer beliefs from.
:param beliefs: The beliefs to infer.
:return: A dict containing belief names and a boolean whether they hold, or None if the
belief cannot be inferred based on the given conversation.
"""
example = {
"example_belief": True,
}
prompt = f"""{self._format_conversation(conversation)}
Given the above conversation, what beliefs can be inferred?
If there is no relevant information about a belief belief, give null.
In case messages conflict, prefer using the most recent messages for inference.
Choose from the following list of beliefs, formatted as `- <belief_name>: <description>`:
{self._format_beliefs(beliefs)}
Respond with a JSON similar to the following, but with the property names as given above:
{json.dumps(example, indent=2)}
"""
schema = self._create_beliefs_schema(beliefs)
return await self._llm.query(prompt, schema)
@staticmethod
def _create_belief_schema(belief: SemanticBelief) -> tuple[str, dict]:
return AgentSpeakGenerator.slugify(belief), {
"type": ["boolean", "null"],
"description": belief.description,
}
@staticmethod
def _create_beliefs_schema(beliefs: list[SemanticBelief]) -> dict:
belief_schemas = [
SemanticBeliefInferrer._create_belief_schema(belief) for belief in beliefs
]
return {
"type": "object",
"properties": dict(belief_schemas),
"required": [name for name, _ in belief_schemas],
}
@staticmethod
def _format_message(message: ChatMessage):
return f"{message.role.upper()}:\n{message.content}"
@staticmethod
def _format_conversation(conversation: ChatHistory):
return "\n\n".join(
[SemanticBeliefInferrer._format_message(message) for message in conversation.messages]
)
@staticmethod
def _format_beliefs(beliefs: list[SemanticBelief]):
return "\n".join(
[f"- {AgentSpeakGenerator.slugify(belief)}: {belief.description}" for belief in beliefs]
)
class GoalAchievementInferrer(SemanticBeliefInferrer):
def __init__(self, llm: TextBeliefExtractorAgent.LLM):
super().__init__(llm)
self.goals: set[BaseGoal] = set()
async def infer_from_conversation(self, conversation: ChatHistory) -> dict[str, bool]:
"""
Determine which goals have been achieved based on the given conversation.
:param conversation: The conversation to infer goal completion from.
:return: A mapping of goals and a boolean whether they have been achieved.
"""
if not self.goals:
return {}
goals_achieved = await asyncio.gather(
*[self._infer_goal(conversation, g) for g in self.goals]
)
return {
f"achieved_{AgentSpeakGenerator.slugify(goal)}": achieved
for goal, achieved in zip(self.goals, goals_achieved, strict=True)
}
async def _infer_goal(self, conversation: ChatHistory, goal: BaseGoal) -> bool:
prompt = f"""{self._format_conversation(conversation)}
Given the above conversation, what has the following goal been achieved?
The name of the goal: {goal.name}
Description of the goal: {goal.description}
Answer with literally only `true` or `false` (without backticks)."""
schema = {
"type": "boolean",
}
return await self._llm.query(prompt, schema)

View File

@@ -0,0 +1,8 @@
from control_backend.agents.base import BaseAgent
from .behaviours.text_belief_extractor import BeliefFromText
class TBeliefExtractorAgent(BaseAgent):
async def setup(self):
self.add_behaviour(BeliefFromText())

View File

@@ -0,0 +1,94 @@
import json
from json import JSONDecodeError
from spade.agent import Message
from spade.behaviour import CyclicBehaviour
from control_backend.core.config import settings
class ContinuousBeliefCollector(CyclicBehaviour):
"""
Continuously collects beliefs/emotions from extractor agents:
Then we send a unified belief packet to the BDI agent.
"""
async def run(self):
msg = await self.receive(timeout=1)
if not msg:
return
await self._process_message(msg)
async def _process_message(self, msg: Message):
sender_node = msg.sender.node
# Parse JSON payload
try:
payload = json.loads(msg.body)
except JSONDecodeError as e:
self.agent.logger.warning(
"BeliefCollector: failed to parse JSON from %s. Body=%r Error=%s",
sender_node,
msg.body,
e,
)
return
msg_type = payload.get("type")
# Prefer explicit 'type' field
if msg_type == "belief_extraction_text" or sender_node == "belief_text_agent_mock":
self.agent.logger.debug(
"Message routed to _handle_belief_text (sender=%s)", sender_node
)
await self._handle_belief_text(payload, sender_node)
# This is not implemented yet, but we keep the structure for future use
elif msg_type == "emotion_extraction_text" or sender_node == "emo_text_agent_mock":
self.agent.logger.debug("Message routed to _handle_emo_text (sender=%s)", sender_node)
await self._handle_emo_text(payload, sender_node)
else:
self.agent.logger.warning(
"Unrecognized message (sender=%s, type=%r). Ignoring.", sender_node, msg_type
)
async def _handle_belief_text(self, payload: dict, origin: str):
"""
Expected payload:
{
"type": "belief_extraction_text",
"beliefs": {"user_said": ["Can you help me?"]}
}
"""
beliefs = payload.get("beliefs", {})
if not beliefs:
self.agent.logger.debug("Received empty beliefs set.")
return
self.agent.logger.debug("Forwarding %d beliefs.", len(beliefs))
for belief_name, belief_list in beliefs.items():
for belief in belief_list:
self.agent.logger.debug(" - %s %s", belief_name, str(belief))
await self._send_beliefs_to_bdi(beliefs, origin=origin)
async def _handle_emo_text(self, payload: dict, origin: str):
"""TODO: implement (after we have emotional recogntion)"""
pass
async def _send_beliefs_to_bdi(self, beliefs: list[str], origin: str | None = None):
"""
Sends a unified belief packet to the BDI agent.
"""
if not beliefs:
return
to_jid = f"{settings.agent_settings.bdi_core_agent_name}@{settings.agent_settings.host}"
msg = Message(to=to_jid, sender=self.agent.jid, thread="beliefs")
msg.body = json.dumps(beliefs)
await self.send(msg)
self.agent.logger.info("Sent %d belief(s) to BDI core.", len(beliefs))

View File

@@ -0,0 +1,11 @@
from control_backend.agents.base import BaseAgent
from .behaviours.continuous_collect import ContinuousBeliefCollector
class BeliefCollectorAgent(BaseAgent):
async def setup(self):
self.logger.info("BeliefCollectorAgent starting (%s)", self.jid)
# Attach the continuous collector behaviour (listens and forwards to BDI)
self.add_behaviour(ContinuousBeliefCollector())
self.logger.info("BeliefCollectorAgent ready.")

View File

@@ -1 +0,0 @@
from .ri_communication_agent import RICommunicationAgent as RICommunicationAgent

View File

@@ -1,330 +0,0 @@
import asyncio
import json
import zmq
import zmq.asyncio as azmq
from pydantic import ValidationError
from zmq.asyncio import Context
from control_backend.agents import BaseAgent
from control_backend.agents.actuation.robot_gesture_agent import RobotGestureAgent
from control_backend.core.config import settings
from control_backend.schemas.internal_message import InternalMessage
from control_backend.schemas.ri_message import PauseCommand
from ..actuation.robot_speech_agent import RobotSpeechAgent
from ..perception import VADAgent
class RICommunicationAgent(BaseAgent):
"""
Robot Interface (RI) Communication Agent.
This agent manages the high-level connection negotiation and health checking (heartbeat)
between the Control Backend and the Robot Interface (or UI).
It acts as a service discovery mechanism:
1. It initiates a handshake (negotiation) to discover where other services (like the robot
command listener) are listening.
2. It spawns specific agents
(like :class:`~control_backend.agents.actuation.robot_speech_agent.RobotSpeechAgent`)
once the connection details are established.
3. It maintains a "ping" loop to ensure the connection remains active.
:ivar _address: The ZMQ address to attempt the initial connection negotiation.
:ivar _bind: Whether to bind or connect the negotiation socket.
:ivar _req_socket: ZMQ REQ socket for negotiation and pings.
:ivar pub_socket: ZMQ PUB socket for internal notifications (e.g., ping status).
:ivar connected: Boolean flag indicating active connection status.
"""
def __init__(
self,
name: str,
address=settings.zmq_settings.ri_communication_address,
bind=False,
):
super().__init__(name)
self._address = address
self._bind = bind
self._req_socket: azmq.Socket | None = None
self.pub_socket: azmq.Socket | None = None
self.connected = False
self.gesture_agent: RobotGestureAgent | None = None
self.speech_agent: RobotSpeechAgent | None = None
async def setup(self):
"""
Initialize the agent and attempt connection.
Tries to negotiate connection up to ``behaviour_settings.comm_setup_max_retries`` times.
If successful, starts the :meth:`_listen_loop`.
"""
self.logger.info("Setting up %s", self.name)
# Bind request socket
await self._setup_sockets()
if await self._negotiate_connection():
self.connected = True
self.add_behavior(self._listen_loop())
else:
self.logger.warning("Failed to negotiate connection during setup.")
self.logger.info("Finished setting up %s", self.name)
async def _setup_sockets(self, force=False):
"""
Initialize ZMQ sockets (REQ for negotiation, PUB for internal updates).
"""
# Bind request socket
if self._req_socket is None or force:
self._req_socket = Context.instance().socket(zmq.REQ)
if self._bind:
self._req_socket.bind(self._address)
else:
self._req_socket.connect(self._address)
if self.pub_socket is None or force:
self.pub_socket = Context.instance().socket(zmq.PUB)
self.pub_socket.connect(settings.zmq_settings.internal_pub_address)
async def _negotiate_connection(
self, max_retries: int = settings.behaviour_settings.comm_setup_max_retries
):
"""
Perform the handshake protocol with the Robot Interface.
Sends a ``negotiate/ports`` request and expects a configuration response containing
port assignments for various services (e.g., actuation).
:param max_retries: Number of attempts before giving up.
:return: True if negotiation succeeded, False otherwise.
"""
retries = 0
while retries < max_retries:
if self._req_socket is None:
retries += 1
continue
# Send our message and receive one back
message = {"endpoint": "negotiate/ports", "data": {}}
await self._req_socket.send_json(message)
retry_frequency = 1.0
try:
received_message = await asyncio.wait_for(
self._req_socket.recv_json(), timeout=retry_frequency
)
except TimeoutError:
self.logger.warning(
"No connection established in %d seconds (attempt %d/%d)",
retries * retry_frequency,
retries + 1,
max_retries,
)
retries += 1
continue
except Exception as e:
self.logger.warning("Unexpected error during negotiation: %s", e)
retries += 1
continue
# Validate endpoint
endpoint = received_message.get("endpoint")
if endpoint != "negotiate/ports":
self.logger.warning(
"Invalid endpoint '%s' received (attempt %d/%d)",
endpoint,
retries + 1,
max_retries,
)
retries += 1
await asyncio.sleep(1)
continue
# At this point, we have a valid response
try:
self.logger.debug("Negotiation successful. Handling rn")
await self._handle_negotiation_response(received_message)
# Let UI know that we're connected
topic = b"ping"
data = json.dumps(True).encode()
if self.pub_socket:
await self.pub_socket.send_multipart([topic, data])
return True
except Exception as e:
self.logger.warning("Error unpacking negotiation data: %s", e)
retries += 1
await asyncio.sleep(settings.behaviour_settings.sleep_s)
continue
return False
async def _handle_negotiation_response(self, received_message):
"""
Parse the negotiation response and initialize services.
Based on the response, it might re-connect the main socket or spawn new agents
(e.g., for robot actuation).
"""
for port_data in received_message["data"]:
id = port_data["id"]
port = port_data["port"]
bind = port_data["bind"]
if not bind:
addr = f"tcp://{settings.ri_host}:{port}"
else:
addr = f"tcp://*:{port}"
match id:
case "main":
if addr != self._address:
assert self._req_socket is not None
if not bind:
self._req_socket.connect(addr)
else:
self._req_socket.bind(addr)
case "actuation":
gesture_data = port_data.get("gestures", [])
single_gesture_data = port_data.get("single_gestures", [])
robot_speech_agent = RobotSpeechAgent(
settings.agent_settings.robot_speech_name,
address=addr,
bind=bind,
)
self.speech_agent = robot_speech_agent
robot_gesture_agent = RobotGestureAgent(
settings.agent_settings.robot_gesture_name,
address=addr,
bind=bind,
gesture_data=gesture_data,
single_gesture_data=single_gesture_data,
)
self.gesture_agent = robot_gesture_agent
await robot_speech_agent.start()
await asyncio.sleep(0.1) # Small delay
await robot_gesture_agent.start()
case "audio":
vad_agent = VADAgent(audio_in_address=addr, audio_in_bind=bind)
await vad_agent.start()
case _:
self.logger.warning("Unhandled negotiation id: %s", id)
async def stop(self):
"""
Closes all sockets.
:return:
"""
if self._req_socket:
self._req_socket.close()
if self.pub_socket:
self.pub_socket.close()
await super().stop()
async def _listen_loop(self):
"""
Maintain the connection via a heartbeat (ping) loop.
Sends a ``ping`` request periodically and waits for a reply.
If pings fail repeatedly, it triggers a disconnection handler to restart negotiation.
"""
while self._running:
if not self.connected:
await asyncio.sleep(settings.behaviour_settings.sleep_s)
self.logger.debug("Not connected, skipping ping loop iteration.")
continue
# We need to listen and send pings.
message = {"endpoint": "ping", "data": {"id": "e.g. some reference id"}}
seconds_to_wait_total = settings.behaviour_settings.sleep_s
try:
assert self._req_socket is not None
await asyncio.wait_for(
self._req_socket.send_json(message), timeout=seconds_to_wait_total / 2
)
except TimeoutError:
self.logger.debug(
"Waited too long to send message - "
"we probably dont have any receivers... but let's check!"
)
# Wait up to {seconds_to_wait_total/2} seconds for a reply
try:
assert self._req_socket is not None
message = await asyncio.wait_for(
self._req_socket.recv_json(), timeout=seconds_to_wait_total / 2
)
if "endpoint" in message and message["endpoint"] != "ping":
self.logger.debug(f'Received message "{message}" from RI.')
if "endpoint" not in message:
self.logger.warning("No received endpoint in message, expected ping endpoint.")
continue
# See what endpoint we received
match message["endpoint"]:
case "ping":
topic = b"ping"
data = json.dumps(True).encode()
if self.pub_socket is not None:
await self.pub_socket.send_multipart([topic, data])
await asyncio.sleep(settings.behaviour_settings.sleep_s)
case _:
self.logger.debug(
"Received message with topic different than ping, while ping expected."
)
# We didnt get a reply
except TimeoutError:
self.logger.info(
f"No ping retrieved in {seconds_to_wait_total} seconds, "
"sending UI disconnection event and attempting to restart."
)
await self._handle_disconnection()
continue
except Exception:
self.logger.error("Error while waiting for ping message.", exc_info=True)
raise
async def _handle_disconnection(self):
"""
Handle connection loss.
Notifies the UI of disconnection (via internal PUB) and attempts to restart negotiation.
"""
self.connected = False
# Tell UI we're disconnected.
topic = b"ping"
data = json.dumps(False).encode()
self.logger.debug("1")
if self.pub_socket:
try:
self.logger.debug("2")
await asyncio.wait_for(self.pub_socket.send_multipart([topic, data]), 5)
except TimeoutError:
self.logger.debug("3")
self.logger.warning("Connection ping for router timed out.")
# Try to reboot/renegotiate
if self.gesture_agent is not None:
await self.gesture_agent.stop()
if self.speech_agent is not None:
await self.speech_agent.stop()
if self.pub_socket is not None:
self.pub_socket.close()
self.logger.debug("Restarting communication negotiation.")
if await self._negotiate_connection(max_retries=2):
self.connected = True
async def handle_message(self, msg: InternalMessage):
try:
pause_command = PauseCommand.model_validate_json(msg.body)
await self._req_socket.send_json(pause_command.model_dump())
self.logger.debug(await self._req_socket.recv_json())
except ValidationError:
self.logger.warning("Incorrect message format for PauseCommand.")

View File

@@ -1 +0,0 @@
from .llm_agent import LLMAgent as LLMAgent

View File

@@ -0,0 +1,163 @@
import json
import re
from collections.abc import AsyncGenerator
import httpx
from spade.behaviour import CyclicBehaviour
from spade.message import Message
from control_backend.agents import BaseAgent
from control_backend.core.config import settings
from .llm_instructions import LLMInstructions
class LLMAgent(BaseAgent):
"""
Agent responsible for processing user text input and querying a locally
hosted LLM for text generation. Receives messages from the BDI Core Agent
and responds with processed LLM output.
"""
class ReceiveMessageBehaviour(CyclicBehaviour):
"""
Cyclic behaviour to continuously listen for incoming messages from
the BDI Core Agent and handle them.
"""
async def run(self):
"""
Receives SPADE messages and processes only those originating from the
configured BDI agent.
"""
msg = await self.receive(timeout=1)
if not msg:
return
sender = msg.sender.node
self.agent.logger.debug(
"Received message: %s from %s",
msg.body,
sender,
)
if sender == settings.agent_settings.bdi_core_agent_name:
self.agent.logger.debug("Processing message from BDI Core Agent")
await self._process_bdi_message(msg)
else:
self.agent.logger.debug("Message ignored (not from BDI Core Agent)")
async def _process_bdi_message(self, message: Message):
"""
Forwards user text from the BDI to the LLM and replies with the generated text in chunks
separated by punctuation.
"""
user_text = message.body
# Consume the streaming generator and send a reply for every chunk
async for chunk in self._query_llm(user_text):
await self._reply(chunk)
self.agent.logger.debug(
"Finished processing BDI message. Response sent in chunks to BDI Core Agent."
)
async def _reply(self, msg: str):
"""
Sends a response message back to the BDI Core Agent.
"""
reply = Message(
to=settings.agent_settings.bdi_core_agent_name + "@" + settings.agent_settings.host,
body=msg,
)
await self.send(reply)
async def _query_llm(self, prompt: str) -> AsyncGenerator[str]:
"""
Sends a chat completion request to the local LLM service and streams the response by
yielding fragments separated by punctuation like.
:param prompt: Input text prompt to pass to the LLM.
:yield: Fragments of the LLM-generated content.
"""
instructions = LLMInstructions(
"- Be friendly and respectful.\n"
"- Make the conversation feel natural and engaging.\n"
"- Speak like a pirate.\n"
"- When the user asks what you can do, tell them.",
"- Try to learn the user's name during conversation.\n"
"- Suggest playing a game of asking yes or no questions where you think of a word "
"and the user must guess it.",
)
messages = [
{
"role": "developer",
"content": instructions.build_developer_instruction(),
},
{
"role": "user",
"content": prompt,
},
]
try:
current_chunk = ""
async for token in self._stream_query_llm(messages):
current_chunk += token
# Stream the message in chunks separated by punctuation.
# We include the delimiter in the emitted chunk for natural flow.
pattern = re.compile(r".*?(?:,|;|:|—||\.{3}|…|\.|\?|!)\s*", re.DOTALL)
for m in pattern.finditer(current_chunk):
chunk = m.group(0)
if chunk:
yield current_chunk
current_chunk = ""
# Yield any remaining tail
if current_chunk:
yield current_chunk
except httpx.HTTPError as err:
self.agent.logger.error("HTTP error.", exc_info=err)
yield "LLM service unavailable."
except Exception as err:
self.agent.logger.error("Unexpected error.", exc_info=err)
yield "Error processing the request."
async def _stream_query_llm(self, messages) -> AsyncGenerator[str]:
"""Raises httpx.HTTPError when the API gives an error."""
async with httpx.AsyncClient(timeout=None) as client:
async with client.stream(
"POST",
settings.llm_settings.local_llm_url,
json={
"model": settings.llm_settings.local_llm_model,
"messages": messages,
"temperature": 0.3,
"stream": True,
},
) as response:
response.raise_for_status()
async for line in response.aiter_lines():
if not line or not line.startswith("data: "):
continue
data = line[len("data: ") :]
if data.strip() == "[DONE]":
break
try:
event = json.loads(data)
delta = event.get("choices", [{}])[0].get("delta", {}).get("content")
if delta:
yield delta
except json.JSONDecodeError:
self.agent.logger.error("Failed to parse LLM response: %s", data)
async def setup(self):
"""
Sets up the SPADE behaviour to filter and process messages from the
BDI Core Agent.
"""
behaviour = self.ReceiveMessageBehaviour()
self.add_behaviour(behaviour)
self.logger.info("LLMAgent setup complete")

View File

@@ -1,211 +0,0 @@
import json
import re
import uuid
from collections.abc import AsyncGenerator
import httpx
from pydantic import ValidationError
from control_backend.agents import BaseAgent
from control_backend.core.agent_system import InternalMessage
from control_backend.core.config import settings
from ...schemas.llm_prompt_message import LLMPromptMessage
from .llm_instructions import LLMInstructions
class LLMAgent(BaseAgent):
"""
LLM Agent.
This agent is responsible for processing user text input and querying a locally
hosted LLM for text generation. It acts as the conversational brain of the system.
It receives :class:`~control_backend.schemas.llm_prompt_message.LLMPromptMessage`
payloads from the BDI Core Agent, constructs a conversation history, queries the
LLM via HTTP, and streams the response back to the BDI agent in natural chunks
(e.g., sentence by sentence).
:ivar history: A list of dictionaries representing the conversation history (Role/Content).
"""
def __init__(self, name: str):
super().__init__(name)
self.history = []
async def setup(self):
self.logger.info("Setting up %s.", self.name)
async def handle_message(self, msg: InternalMessage):
"""
Handle incoming messages.
Expects messages from :attr:`settings.agent_settings.bdi_core_name` containing
an :class:`LLMPromptMessage` in the body.
:param msg: The received internal message.
"""
if msg.sender == settings.agent_settings.bdi_core_name:
match msg.thread:
case "prompt_message":
try:
prompt_message = LLMPromptMessage.model_validate_json(msg.body)
await self._process_bdi_message(prompt_message)
except ValidationError:
self.logger.debug("Prompt message from BDI core is invalid.")
case "assistant_message":
self.history.append({"role": "assistant", "content": msg.body})
case "user_message":
self.history.append({"role": "user", "content": msg.body})
elif msg.sender == settings.agent_settings.bdi_program_manager_name:
if msg.body == "clear_history":
self.logger.debug("Clearing conversation history.")
self.history.clear()
else:
self.logger.debug("Message ignored.")
async def _process_bdi_message(self, message: LLMPromptMessage):
"""
Orchestrate the LLM query and response streaming.
Iterates over the chunks yielded by :meth:`_query_llm` and forwards them
individually to the BDI agent via :meth:`_send_reply`.
:param message: The parsed prompt message containing text, norms, and goals.
"""
full_message = ""
async for chunk in self._query_llm(message.text, message.norms, message.goals):
await self._send_reply(chunk)
full_message += chunk
self.logger.debug("Finished processing BDI message. Response sent in chunks to BDI core.")
await self._send_full_reply(full_message)
async def _send_reply(self, msg: str):
"""
Sends a response message (chunk) back to the BDI Core Agent.
:param msg: The text content of the chunk.
"""
reply = InternalMessage(
to=settings.agent_settings.bdi_core_name,
sender=self.name,
body=msg,
)
await self.send(reply)
async def _send_full_reply(self, msg: str):
"""
Sends a response message (full) to agents that need it.
:param msg: The text content of the message.
"""
message = InternalMessage(
to=settings.agent_settings.text_belief_extractor_name,
sender=self.name,
body=msg,
)
await self.send(message)
async def _query_llm(
self, prompt: str, norms: list[str], goals: list[str]
) -> AsyncGenerator[str]:
"""
Send a chat completion request to the local LLM service and stream the response.
It constructs the full prompt using
:class:`~control_backend.agents.llm.llm_instructions.LLMInstructions`.
It streams the response from the LLM and buffers tokens until a natural break (punctuation)
is reached, then yields the chunk. This ensures that the robot speaks in complete phrases
rather than individual tokens.
:param prompt: Input text prompt to pass to the LLM.
:param norms: Norms the LLM should hold itself to.
:param goals: Goals the LLM should achieve.
:yield: Fragments of the LLM-generated content (e.g., sentences/phrases).
"""
instructions = LLMInstructions(norms if norms else None, goals if goals else None)
messages = [
{
"role": "developer",
"content": instructions.build_developer_instruction(),
},
*self.history,
]
message_id = str(uuid.uuid4()) # noqa
try:
full_message = ""
current_chunk = ""
async for token in self._stream_query_llm(messages):
full_message += token
current_chunk += token
self.logger.llm(
"Received token: %s",
full_message,
extra={"reference": message_id}, # Used in the UI to update old logs
)
# Stream the message in chunks separated by punctuation.
# We include the delimiter in the emitted chunk for natural flow.
pattern = re.compile(r".*?(?:,|;|:|—||\.{3}|…|\.|\?|!)\s*", re.DOTALL)
for m in pattern.finditer(current_chunk):
chunk = m.group(0)
if chunk:
yield current_chunk
current_chunk = ""
# Yield any remaining tail
if current_chunk:
yield current_chunk
self.history.append(
{
"role": "assistant",
"content": full_message,
}
)
except httpx.HTTPError as err:
self.logger.error("HTTP error.", exc_info=err)
yield "LLM service unavailable."
except Exception as err:
self.logger.error("Unexpected error.", exc_info=err)
yield "Error processing the request."
async def _stream_query_llm(self, messages) -> AsyncGenerator[str]:
"""
Perform the raw HTTP streaming request to the LLM API.
:param messages: The list of message dictionaries (role/content).
:yield: Raw text tokens (deltas) from the SSE stream.
:raises httpx.HTTPError: If the API returns a non-200 status.
"""
async with httpx.AsyncClient() as client:
async with client.stream(
"POST",
settings.llm_settings.local_llm_url,
json={
"model": settings.llm_settings.local_llm_model,
"messages": messages,
"temperature": settings.llm_settings.chat_temperature,
"stream": True,
},
) as response:
response.raise_for_status()
async for line in response.aiter_lines():
if not line or not line.startswith("data: "):
continue
data = line[len("data: ") :]
if data.strip() == "[DONE]":
break
try:
event = json.loads(data)
delta = event.get("choices", [{}])[0].get("delta", {}).get("content")
if delta:
yield delta
except json.JSONDecodeError:
self.logger.error("Failed to parse LLM response: %s", data)

View File

@@ -1,45 +1,30 @@
class LLMInstructions:
"""
Helper class to construct the system instructions for the LLM.
It combines the base persona (Pepper robot) with dynamic norms and goals
provided by the BDI system.
If no norms/goals are given it assumes empty lists.
:ivar norms: A list of behavioral norms.
:ivar goals: A list of specific conversational goals.
Defines structured instructions that are sent along with each request
to the LLM to guide its behavior (norms, goals, etc.).
"""
@staticmethod
def default_norms() -> list[str]:
return [
"Be friendly and respectful.",
"Make the conversation feel natural and engaging.",
]
def default_norms() -> str:
return """
Be friendly and respectful.
Make the conversation feel natural and engaging.
""".strip()
@staticmethod
def default_goals() -> list[str]:
return [
"Try to learn the user's name during conversation.",
]
def default_goals() -> str:
return """
Try to learn the user's name during conversation.
""".strip()
def __init__(self, norms: list[str] | None = None, goals: list[str] | None = None):
self.norms = norms or self.default_norms()
self.goals = goals or self.default_goals()
def __init__(self, norms: str | None = None, goals: str | None = None):
self.norms = norms if norms is not None else self.default_norms()
self.goals = goals if goals is not None else self.default_goals()
def build_developer_instruction(self) -> str:
"""
Builds the final system prompt string.
The prompt includes:
1. Persona definition.
2. Constraint on response length.
3. Instructions on how to handle goals (reach them in order, but prioritize natural flow).
4. The specific list of norms.
5. The specific list of goals.
:return: The formatted system prompt string.
Builds a multi-line formatted instruction string for the LLM.
Includes only non-empty structured fields.
"""
sections = [
"You are a Pepper robot engaging in natural human conversation.",
@@ -50,14 +35,12 @@ class LLMInstructions:
if self.norms:
sections.append("Norms to follow:")
for norm in self.norms:
sections.append("- " + norm)
sections.append(self.norms)
sections.append("")
if self.goals:
sections.append("Goals to reach:")
for goal in self.goals:
sections.append("- " + goal)
sections.append(self.goals)
sections.append("")
return "\n".join(sections).strip()

View File

@@ -0,0 +1,44 @@
import json
from spade.agent import Agent
from spade.behaviour import OneShotBehaviour
from spade.message import Message
from control_backend.core.config import settings
class BeliefTextAgent(Agent):
class SendOnceBehaviourBlfText(OneShotBehaviour):
async def run(self):
to_jid = (
settings.agent_settings.belief_collector_agent_name
+ "@"
+ settings.agent_settings.host
)
# Send multiple beliefs in one JSON payload
payload = {
"type": "belief_extraction_text",
"beliefs": {
"user_said": [
"hello test",
"Can you help me?",
"stop talking to me",
"No",
"Pepper do a dance",
]
},
}
msg = Message(to=to_jid)
msg.body = json.dumps(payload)
await self.send(msg)
print(f"Beliefs sent to {to_jid}!")
self.exit_code = "Job Finished!"
await self.agent.stop()
async def setup(self):
print("BeliefTextAgent started")
self.b = self.SendOnceBehaviourBlfText()
self.add_behaviour(self.b)

View File

@@ -1,4 +0,0 @@
from .transcription_agent.transcription_agent import (
TranscriptionAgent as TranscriptionAgent,
)
from .vad_agent import VADAgent as VADAgent

View File

@@ -1,138 +0,0 @@
import asyncio
import numpy as np
import zmq
import zmq.asyncio as azmq
from control_backend.agents import BaseAgent
from control_backend.core.agent_system import InternalMessage
from control_backend.core.config import settings
from .speech_recognizer import SpeechRecognizer
class TranscriptionAgent(BaseAgent):
"""
Transcription Agent.
This agent listens to audio fragments (containing speech) on a ZMQ SUB socket,
transcribes them using the configured :class:`SpeechRecognizer`, and sends the
resulting text to other agents (e.g., the Text Belief Extractor).
It uses an internal semaphore to limit the number of concurrent transcription tasks.
:ivar audio_in_address: The ZMQ address to receive audio from (usually from VAD Agent).
:ivar audio_in_socket: The ZMQ SUB socket instance.
:ivar speech_recognizer: The speech recognition engine instance.
:ivar _concurrency: Semaphore to limit concurrent transcriptions.
"""
def __init__(self, audio_in_address: str):
"""
Initialize the Transcription Agent.
:param audio_in_address: The ZMQ address of the audio source (e.g., VAD output).
"""
super().__init__(settings.agent_settings.transcription_name)
self.audio_in_address = audio_in_address
self.audio_in_socket: azmq.Socket | None = None
self.speech_recognizer = None
self._concurrency = None
async def setup(self):
"""
Initialize the agent resources.
1. Connects to the audio input ZMQ socket.
2. Initializes the :class:`SpeechRecognizer` (choosing the best available backend).
3. Starts the background transcription loop.
"""
self.logger.info("Setting up %s", self.name)
self._connect_audio_in_socket()
# Initialize recognizer and semaphore
max_concurrent_tasks = settings.behaviour_settings.transcription_max_concurrent_tasks
self._concurrency = asyncio.Semaphore(max_concurrent_tasks)
self.speech_recognizer = SpeechRecognizer.best_type()
self.speech_recognizer.load_model() # Warmup
# Start background loop
self.add_behavior(self._transcribing_loop())
self.logger.info("Finished setting up %s", self.name)
async def stop(self):
"""
Stop the agent and close sockets.
"""
assert self.audio_in_socket is not None
self.audio_in_socket.close()
self.audio_in_socket = None
return await super().stop()
def _connect_audio_in_socket(self):
"""
Helper to connect the ZMQ SUB socket for audio input.
"""
self.audio_in_socket = azmq.Context.instance().socket(zmq.SUB)
self.audio_in_socket.setsockopt_string(zmq.SUBSCRIBE, "")
self.audio_in_socket.connect(self.audio_in_address)
async def _transcribe(self, audio: np.ndarray) -> str:
"""
Run the speech recognition on the audio data.
This runs in a separate thread (via `asyncio.to_thread`) to avoid blocking the event loop,
constrained by the concurrency semaphore.
:param audio: The audio data as a numpy array.
:return: The transcribed text string.
"""
assert self._concurrency is not None and self.speech_recognizer is not None
async with self._concurrency:
return await asyncio.to_thread(self.speech_recognizer.recognize_speech, audio)
async def _share_transcription(self, transcription: str):
"""
Share a transcription to the other agents that depend on it.
Currently sends to:
- :attr:`settings.agent_settings.text_belief_extractor_name`
:param transcription: The transcribed text.
"""
receiver_names = [
settings.agent_settings.text_belief_extractor_name,
]
for receiver_name in receiver_names:
message = InternalMessage(
to=receiver_name,
sender=self.name,
body=transcription,
)
await self.send(message)
async def _transcribing_loop(self) -> None:
"""
The main loop for receiving audio and triggering transcription.
Receives audio chunks from ZMQ, decodes them to float32, and calls :meth:`_transcribe`.
If speech is found, it calls :meth:`_share_transcription`.
"""
while self._running:
try:
assert self.audio_in_socket is not None
audio_data = await self.audio_in_socket.recv()
audio = np.frombuffer(audio_data, dtype=np.float32)
speech = await self._transcribe(audio)
if not speech:
self.logger.info("Nothing transcribed.")
continue
self.logger.info("Transcribed speech: %s", speech)
await self._share_transcription(speech)
except Exception as e:
self.logger.error(f"Error in transcription loop: {e}")

View File

@@ -1,298 +0,0 @@
import asyncio
import numpy as np
import torch
import zmq
import zmq.asyncio as azmq
from control_backend.agents import BaseAgent
from control_backend.core.config import settings
from control_backend.schemas.internal_message import InternalMessage
from ...schemas.program_status import PROGRAM_STATUS, ProgramStatus
from .transcription_agent.transcription_agent import TranscriptionAgent
class SocketPoller[T]:
"""
Convenience class for polling a socket for data with a timeout, persisting a zmq.Poller for
multiple usages.
:param T: The type of data returned by the socket.
"""
def __init__(
self,
socket: azmq.Socket,
timeout_ms: int = settings.behaviour_settings.socket_poller_timeout_ms,
):
"""
:param socket: The socket to poll and get data from.
:param timeout_ms: A timeout in milliseconds to wait for data.
"""
self.socket = socket
self.poller = azmq.Poller()
self.poller.register(self.socket, zmq.POLLIN)
self.timeout_ms = timeout_ms
async def poll(self, timeout_ms: int | None = None) -> T | None:
"""
Get data from the socket, or None if the timeout is reached.
:param timeout_ms: If given, the timeout. Otherwise, ``self.timeout_ms`` is used.
:return: Data from the socket or None.
"""
timeout_ms = timeout_ms or self.timeout_ms
socks = dict(await self.poller.poll(timeout_ms))
if socks.get(self.socket) == zmq.POLLIN:
return await self.socket.recv()
return None
class VADAgent(BaseAgent):
"""
Voice Activity Detection (VAD) Agent.
This agent:
1. Receives an audio stream (via ZMQ).
2. Processes the audio using the Silero VAD model to detect speech.
3. Buffers potential speech segments.
4. Publishes valid speech fragments (containing speech plus small buffer) to a ZMQ PUB socket.
5. Instantiates and starts agents (like :class:`TranscriptionAgent`) that use this output.
:ivar audio_in_address: Address of the input audio stream.
:ivar audio_in_bind: Whether to bind or connect to the input address.
:ivar audio_out_socket: ZMQ PUB socket for sending speech fragments.
:ivar program_sub_socket: ZMQ SUB socket for receiving program status updates.
"""
def __init__(self, audio_in_address: str, audio_in_bind: bool):
"""
Initialize the VAD Agent.
:param audio_in_address: ZMQ address for input audio.
:param audio_in_bind: True if this agent should bind to the input address, False to connect.
"""
super().__init__(settings.agent_settings.vad_name)
self.audio_in_address = audio_in_address
self.audio_in_bind = audio_in_bind
self.audio_in_socket: azmq.Socket | None = None
self.audio_out_socket: azmq.Socket | None = None
self.audio_in_poller: SocketPoller | None = None
self.program_sub_socket: azmq.Socket | None = None
self.audio_buffer = np.array([], dtype=np.float32)
self.i_since_speech = settings.behaviour_settings.vad_initial_since_speech
self._ready = asyncio.Event()
# Pause control
self._reset_needed = False
self._paused = asyncio.Event()
self._paused.set() # Not paused at start
self.model = None
async def setup(self):
"""
Initialize resources.
1. Connects audio input socket.
2. Binds audio output socket (random port).
3. Connects to program communication socket.
4. Loads VAD model from Torch Hub.
5. Starts the streaming loop.
6. Instantiates and starts the :class:`TranscriptionAgent` with the output address.
"""
self.logger.info("Setting up %s", self.name)
self._connect_audio_in_socket()
audio_out_address = self._connect_audio_out_socket()
if audio_out_address is None:
self.logger.error("Could not bind output socket, stopping.")
await self.stop()
return
# Connect to internal communication socket
self.program_sub_socket = azmq.Context.instance().socket(zmq.SUB)
self.program_sub_socket.connect(settings.zmq_settings.internal_sub_address)
self.program_sub_socket.subscribe(PROGRAM_STATUS)
# Initialize VAD model
try:
self.model, _ = torch.hub.load(
repo_or_dir=settings.vad_settings.repo_or_dir,
model=settings.vad_settings.model_name,
force_reload=False,
)
except Exception:
self.logger.exception("Failed to load VAD model.")
await self.stop()
return
self.add_behavior(self._streaming_loop())
self.add_behavior(self._status_loop())
# Start agents dependent on the output audio fragments here
transcriber = TranscriptionAgent(audio_out_address)
await transcriber.start()
self.logger.info("Finished setting up %s", self.name)
async def stop(self):
"""
Stop listening to audio, stop publishing audio, close sockets.
"""
if self.audio_in_socket is not None:
self.audio_in_socket.close()
self.audio_in_socket = None
if self.audio_out_socket is not None:
self.audio_out_socket.close()
self.audio_out_socket = None
await super().stop()
def _connect_audio_in_socket(self):
"""
Connects (or binds) the socket for listening to audio from RI.
:return:
"""
self.audio_in_socket = azmq.Context.instance().socket(zmq.SUB)
self.audio_in_socket.setsockopt_string(zmq.SUBSCRIBE, "")
if self.audio_in_bind:
self.audio_in_socket.bind(self.audio_in_address)
else:
self.audio_in_socket.connect(self.audio_in_address)
self.audio_in_poller = SocketPoller[bytes](self.audio_in_socket)
def _connect_audio_out_socket(self) -> str | None:
"""
Returns the address that was bound to, or None if binding failed.
"""
try:
self.audio_out_socket = azmq.Context.instance().socket(zmq.PUB)
self.audio_out_socket.bind(settings.zmq_settings.vad_pub_address)
return settings.zmq_settings.vad_pub_address
except zmq.ZMQBindError:
self.logger.error("Failed to bind an audio output socket after 100 tries.")
self.audio_out_socket = None
return None
async def _reset_stream(self):
"""
Clears the ZeroMQ queue and sets ready state.
"""
discarded = 0
assert self.audio_in_poller is not None
while await self.audio_in_poller.poll(1) is not None:
discarded += 1
self.logger.info(f"Discarded {discarded} audio packets before starting.")
self._ready.set()
async def _status_loop(self):
"""Loop for checking program status. Only start listening if program is RUNNING."""
while self._running:
topic, body = await self.program_sub_socket.recv_multipart()
if topic != PROGRAM_STATUS:
continue
if body != ProgramStatus.RUNNING.value:
continue
# Program is now running, we can start our stream
await self._reset_stream()
# We don't care about further status updates
self.program_sub_socket.close()
break
async def _streaming_loop(self):
"""
Main loop for processing audio stream.
1. Polls for new audio chunks.
2. Passes chunk to VAD model.
3. Manages `i_since_speech` counter to determine start/end of speech.
4. Buffers speech + context.
5. Sends complete speech segment to output socket when silence is detected.
"""
await self._ready.wait()
while self._running:
await self._paused.wait()
# After being unpaused, reset stream and buffers
if self._reset_needed:
self.logger.debug("Resuming: resetting stream and buffers.")
await self._reset_stream()
self.audio_buffer = np.array([], dtype=np.float32)
self.i_since_speech = settings.behaviour_settings.vad_initial_since_speech
self._reset_needed = False
assert self.audio_in_poller is not None
data = await self.audio_in_poller.poll()
if data is None:
if len(self.audio_buffer) > 0:
self.logger.debug(
"No audio data received. Discarding buffer until new data arrives."
)
self.audio_buffer = np.array([], dtype=np.float32)
self.i_since_speech = settings.behaviour_settings.vad_initial_since_speech
continue
# copy otherwise Torch will be sad that it's immutable
chunk = np.frombuffer(data, dtype=np.float32).copy()
assert self.model is not None
prob = self.model(torch.from_numpy(chunk), settings.vad_settings.sample_rate_hz).item()
non_speech_patience = settings.behaviour_settings.vad_non_speech_patience_chunks
begin_silence_length = settings.behaviour_settings.vad_begin_silence_chunks
prob_threshold = settings.behaviour_settings.vad_prob_threshold
if prob > prob_threshold:
if self.i_since_speech > non_speech_patience + begin_silence_length:
self.logger.debug("Speech started.")
self.audio_buffer = np.append(self.audio_buffer, chunk)
self.i_since_speech = 0
continue
self.i_since_speech += 1
# prob < threshold, so speech maybe ended. Wait a bit more before to be more certain
if self.i_since_speech <= non_speech_patience:
self.audio_buffer = np.append(self.audio_buffer, chunk)
continue
# Speech probably ended. Make sure we have a usable amount of data.
if len(self.audio_buffer) > begin_silence_length * len(chunk):
self.logger.debug("Speech ended.")
assert self.audio_out_socket is not None
await self.audio_out_socket.send(self.audio_buffer[: -2 * len(chunk)].tobytes())
# At this point, we know that the speech has ended.
# Prepend the last chunk that had no speech, for a more fluent boundary
self.audio_buffer = chunk
async def handle_message(self, msg: InternalMessage):
"""
Handle incoming messages.
Expects messages to pause or resume the VAD processing from User Interrupt Agent.
:param msg: The received internal message.
"""
sender = msg.sender
if sender == settings.agent_settings.user_interrupt_name:
if msg.body == "PAUSE":
self.logger.info("Pausing VAD processing.")
self._paused.clear()
# If the robot needs to pick up speaking where it left off, do not set _reset_needed
self._reset_needed = True
elif msg.body == "RESUME":
self.logger.info("Resuming VAD processing.")
self._paused.set()
else:
self.logger.warning(f"Unknown command from User Interrupt Agent: {msg.body}")
else:
self.logger.debug(f"Ignoring message from unknown sender: {sender}")

View File

@@ -0,0 +1,92 @@
import json
import spade.agent
import zmq
from spade.behaviour import CyclicBehaviour
from zmq.asyncio import Context
from control_backend.agents import BaseAgent
from control_backend.core.config import settings
from control_backend.schemas.ri_message import SpeechCommand
class RICommandAgent(BaseAgent):
subsocket: zmq.Socket
pubsocket: zmq.Socket
address = ""
bind = False
def __init__(
self,
jid: str,
password: str,
port: int = 5222,
verify_security: bool = False,
address="tcp://localhost:0000",
bind=False,
):
super().__init__(jid, password, port, verify_security)
self.address = address
self.bind = bind
class SendCommandsBehaviour(CyclicBehaviour):
"""Behaviour for sending commands received from the UI."""
async def run(self):
"""
Run the command publishing loop indefinetely.
"""
assert self.agent is not None
# Get a message internally (with topic command)
topic, body = await self.agent.subsocket.recv_multipart()
# Try to get body
try:
body = json.loads(body)
message = SpeechCommand.model_validate(body)
# Send to the robot.
await self.agent.pubsocket.send_json(message.model_dump())
except Exception as e:
self.agent.logger.error("Error processing message: %s", e)
class SendPythonCommandsBehaviour(CyclicBehaviour):
"""Behaviour for sending commands received from other Python agents."""
async def run(self):
message: spade.agent.Message = await self.receive(timeout=1)
if not message:
return
if message and message.to == self.agent.jid:
try:
speech_command = SpeechCommand.model_validate_json(message.body)
await self.agent.pubsocket.send_json(speech_command.model_dump())
except Exception as e:
self.agent.logger.error("Error processing message: %s", e)
async def setup(self):
"""
Setup the command agent
"""
self.logger.info("Setting up %s", self.jid)
context = Context.instance()
# To the robot
self.pubsocket = context.socket(zmq.PUB)
if self.bind:
self.pubsocket.bind(self.address)
else:
self.pubsocket.connect(self.address)
# Receive internal topics regarding commands
self.subsocket = context.socket(zmq.SUB)
self.subsocket.connect(settings.zmq_settings.internal_sub_address)
self.subsocket.setsockopt(zmq.SUBSCRIBE, b"command")
# Add behaviour to our agent
commands_behaviour = self.SendCommandsBehaviour()
self.add_behaviour(commands_behaviour)
self.add_behaviour(self.SendPythonCommandsBehaviour())
self.logger.info("Finished setting up %s", self.jid)

View File

@@ -0,0 +1,162 @@
import asyncio
import zmq
from spade.behaviour import CyclicBehaviour
from zmq.asyncio import Context
from control_backend.agents import BaseAgent
from control_backend.core.config import settings
from .ri_command_agent import RICommandAgent
class RICommunicationAgent(BaseAgent):
req_socket: zmq.Socket
_address = ""
_bind = True
def __init__(
self,
jid: str,
password: str,
port: int = 5222,
verify_security: bool = False,
address="tcp://localhost:0000",
bind=False,
):
super().__init__(jid, password, port, verify_security)
self._address = address
self._bind = bind
class ListenBehaviour(CyclicBehaviour):
async def run(self):
"""
Run the listening (ping) loop indefinetely.
"""
assert self.agent is not None
# We need to listen and sent pings.
message = {"endpoint": "ping", "data": {"id": "e.g. some reference id"}}
await self.agent.req_socket.send_json(message)
# Wait up to three seconds for a reply:)
try:
message = await asyncio.wait_for(self.agent.req_socket.recv_json(), timeout=3.0)
# We didnt get a reply :(
except TimeoutError:
self.agent.logger.info("No ping retrieved in 3 seconds, killing myself.")
self.kill()
self.agent.logger.debug('Received message "%s"', message)
if "endpoint" not in message:
self.agent.logger.error("No received endpoint in message, excepted ping endpoint.")
return
# See what endpoint we received
match message["endpoint"]:
case "ping":
await asyncio.sleep(1)
case _:
self.agent.logger.info(
"Received message with topic different than ping, while ping expected."
)
async def setup(self, max_retries: int = 5):
"""
Try to setup the communication agent, we have 5 retries in case we dont have a response yet.
"""
self.logger.info("Setting up %s", self.jid)
retries = 0
# Let's try a certain amount of times before failing connection
while retries < max_retries:
# Bind request socket
self.req_socket = Context.instance().socket(zmq.REQ)
if self._bind:
self.req_socket.bind(self._address)
else:
self.req_socket.connect(self._address)
# Send our message and receive one back:)
message = {"endpoint": "negotiate/ports", "data": None}
await self.req_socket.send_json(message)
try:
received_message = await asyncio.wait_for(self.req_socket.recv_json(), timeout=20.0)
except TimeoutError:
self.logger.warning(
"No connection established in 20 seconds (attempt %d/%d)",
retries + 1,
max_retries,
)
retries += 1
continue
except Exception as e:
self.logger.error("Unexpected error during negotiation: %s", e)
retries += 1
continue
# Validate endpoint
endpoint = received_message.get("endpoint")
if endpoint != "negotiate/ports":
# TODO: Should this send a message back?
self.logger.error(
"Invalid endpoint '%s' received (attempt %d/%d)",
endpoint,
retries + 1,
max_retries,
)
retries += 1
continue
# At this point, we have a valid response
try:
for port_data in received_message["data"]:
id = port_data["id"]
port = port_data["port"]
bind = port_data["bind"]
if not bind:
addr = f"tcp://localhost:{port}"
else:
addr = f"tcp://*:{port}"
match id:
case "main":
if addr != self._address:
if not bind:
self.req_socket.connect(addr)
else:
self.req_socket.bind(addr)
case "actuation":
ri_commands_agent = RICommandAgent(
settings.agent_settings.ri_command_agent_name
+ "@"
+ settings.agent_settings.host,
settings.agent_settings.ri_command_agent_name,
address=addr,
bind=bind,
)
await ri_commands_agent.start()
case _:
self.logger.warning("Unhandled negotiation id: %s", id)
except Exception as e:
self.logger.error("Error unpacking negotiation data: %s", e)
retries += 1
continue
# setup succeeded
break
else:
self.logger.error("Failed to set up RICommunicationAgent after %d retries", max_retries)
return
# Set up ping behaviour
listen_behaviour = self.ListenBehaviour()
self.add_behaviour(listen_behaviour)
self.logger.info("Finished setting up %s", self.jid)

View File

@@ -10,32 +10,17 @@ import numpy as np
import torch
import whisper
from control_backend.core.config import settings
class SpeechRecognizer(abc.ABC):
"""
Abstract base class for speech recognition backends.
Provides a common interface for loading models and transcribing audio,
as well as heuristics for estimating token counts to optimize decoding.
:ivar limit_output_length: If True, limits the generated text length based on audio duration.
"""
def __init__(self, limit_output_length=True):
"""
:param limit_output_length: When ``True``, the length of the generated speech will be
limited by the length of the input audio and some heuristics.
:param limit_output_length: When `True`, the length of the generated speech will be limited
by the length of the input audio and some heuristics.
"""
self.limit_output_length = limit_output_length
@abc.abstractmethod
def load_model(self):
"""
Load the speech recognition model into memory.
"""
...
def load_model(self): ...
@abc.abstractmethod
def recognize_speech(self, audio: np.ndarray) -> str:
@@ -43,33 +28,29 @@ class SpeechRecognizer(abc.ABC):
Recognize speech from the given audio sample.
:param audio: A full utterance sample. Audio must be 16 kHz, mono, np.float32, values in the
range [-1.0, 1.0].
:return: The recognized speech text.
range [-1.0, 1.0].
:return: Recognized speech.
"""
@staticmethod
def _estimate_max_tokens(audio: np.ndarray) -> int:
"""
Estimate the maximum length of a given audio sample in tokens.
Assumes a maximum speaking rate of 450 words per minute (3x average), and assumes that
3 words is approx. 4 tokens.
Estimate the maximum length of a given audio sample in tokens. Assumes a maximum speaking
rate of 450 words per minute (3x average), and assumes that 3 words is 4 tokens.
:param audio: The audio sample (16 kHz) to use for length estimation.
:return: The estimated length of the transcribed audio in tokens.
"""
length_seconds = len(audio) / settings.vad_settings.sample_rate_hz
length_seconds = len(audio) / 16_000
length_minutes = length_seconds / 60
word_count = length_minutes * settings.behaviour_settings.transcription_words_per_minute
token_count = word_count / settings.behaviour_settings.transcription_words_per_token
return int(token_count) + settings.behaviour_settings.transcription_token_buffer
word_count = length_minutes * 450
token_count = word_count / 3 * 4
return int(token_count) + 10
def _get_decode_options(self, audio: np.ndarray) -> dict:
"""
Construct decoding options for the Whisper model.
:param audio: The audio sample (16 kHz) to use to determine options like max decode length.
:return: A dict that can be used to construct ``whisper.DecodingOptions`` (or equivalent).
:return: A dict that can be used to construct `whisper.DecodingOptions`.
"""
options = {}
if self.limit_output_length:
@@ -78,12 +59,7 @@ class SpeechRecognizer(abc.ABC):
@staticmethod
def best_type():
"""
Factory method to get the best available `SpeechRecognizer`.
:return: An instance of :class:`MLXWhisperSpeechRecognizer` if on macOS with Apple Silicon,
otherwise :class:`OpenAIWhisperSpeechRecognizer`.
"""
"""Get the best type of SpeechRecognizer based on system capabilities."""
if torch.mps.is_available():
print("Choosing MLX Whisper model.")
return MLXWhisperSpeechRecognizer()
@@ -93,20 +69,12 @@ class SpeechRecognizer(abc.ABC):
class MLXWhisperSpeechRecognizer(SpeechRecognizer):
"""
Speech recognizer using the MLX framework (optimized for Apple Silicon).
"""
def __init__(self, limit_output_length=True):
super().__init__(limit_output_length)
self.was_loaded = False
self.model_name = settings.speech_model_settings.mlx_model_name
self.model_name = "mlx-community/whisper-small.en-mlx"
def load_model(self):
"""
Ensures the model is downloaded and cached. MLX loads dynamically, so this
pre-fetches the model.
"""
if self.was_loaded:
return
# There appears to be no dedicated mechanism to preload a model, but this `get_model` does
@@ -124,24 +92,15 @@ class MLXWhisperSpeechRecognizer(SpeechRecognizer):
class OpenAIWhisperSpeechRecognizer(SpeechRecognizer):
"""
Speech recognizer using the standard OpenAI Whisper library (PyTorch).
"""
def __init__(self, limit_output_length=True):
super().__init__(limit_output_length)
self.model = None
def load_model(self):
"""
Loads the OpenAI Whisper model onto the available device (CUDA or CPU).
"""
if self.model is not None:
return
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
self.model = whisper.load_model(
settings.speech_model_settings.openai_model_name, device=device
)
self.model = whisper.load_model("small.en", device=device)
def recognize_speech(self, audio: np.ndarray) -> str:
self.load_model()

View File

@@ -0,0 +1,86 @@
import asyncio
import numpy as np
import zmq
import zmq.asyncio as azmq
from spade.behaviour import CyclicBehaviour
from spade.message import Message
from control_backend.agents import BaseAgent
from control_backend.core.config import settings
from .speech_recognizer import SpeechRecognizer
class TranscriptionAgent(BaseAgent):
"""
An agent which listens to audio fragments with voice, transcribes them, and sends the
transcription to other agents.
"""
def __init__(self, audio_in_address: str):
jid = settings.agent_settings.transcription_agent_name + "@" + settings.agent_settings.host
super().__init__(jid, settings.agent_settings.transcription_agent_name)
self.audio_in_address = audio_in_address
self.audio_in_socket: azmq.Socket | None = None
class Transcribing(CyclicBehaviour):
def __init__(self, audio_in_socket: azmq.Socket):
super().__init__()
self.audio_in_socket = audio_in_socket
self.speech_recognizer = SpeechRecognizer.best_type()
self._concurrency = asyncio.Semaphore(3)
def warmup(self):
"""Load the transcription model into memory to speed up the first transcription."""
self.speech_recognizer.load_model()
async def _transcribe(self, audio: np.ndarray) -> str:
async with self._concurrency:
return await asyncio.to_thread(self.speech_recognizer.recognize_speech, audio)
async def _share_transcription(self, transcription: str):
"""Share a transcription to the other agents that depend on it."""
receiver_jids = [
settings.agent_settings.text_belief_extractor_agent_name
+ "@"
+ settings.agent_settings.host,
] # Set message receivers here
for receiver_jid in receiver_jids:
message = Message(to=receiver_jid, body=transcription)
await self.send(message)
async def run(self) -> None:
audio = await self.audio_in_socket.recv()
audio = np.frombuffer(audio, dtype=np.float32)
speech = await self._transcribe(audio)
if not speech:
self.agent.logger.info("Nothing transcribed.")
return
self.agent.logger.info("Transcribed speech: %s", speech)
await self._share_transcription(speech)
async def stop(self):
self.audio_in_socket.close()
self.audio_in_socket = None
return await super().stop()
def _connect_audio_in_socket(self):
self.audio_in_socket = azmq.Context.instance().socket(zmq.SUB)
self.audio_in_socket.setsockopt_string(zmq.SUBSCRIBE, "")
self.audio_in_socket.connect(self.audio_in_address)
async def setup(self):
self.logger.info("Setting up %s", self.jid)
self._connect_audio_in_socket()
transcribing = self.Transcribing(self.audio_in_socket)
transcribing.warmup()
self.add_behaviour(transcribing)
self.logger.info("Finished setting up %s", self.jid)

View File

@@ -1,384 +0,0 @@
import json
import zmq
from zmq.asyncio import Context
from control_backend.agents import BaseAgent
from control_backend.agents.bdi.agentspeak_generator import AgentSpeakGenerator
from control_backend.core.agent_system import InternalMessage
from control_backend.core.config import settings
from control_backend.schemas.belief_message import Belief, BeliefMessage
from control_backend.schemas.program import ConditionalNorm, Program
from control_backend.schemas.ri_message import (
GestureCommand,
PauseCommand,
RIEndpoint,
SpeechCommand,
)
class UserInterruptAgent(BaseAgent):
"""
User Interrupt Agent.
This agent receives button_pressed events from the external HTTP API
(via ZMQ) and uses the associated context to trigger one of the following actions:
- Send a prioritized message to the `RobotSpeechAgent`
- Send a prioritized gesture to the `RobotGestureAgent`
- Send a belief override to the `BDIProgramManager`in order to activate a
trigger/conditional norm or complete a goal.
Prioritized actions clear the current RI queue before inserting the new item,
ensuring they are executed immediately after Pepper's current action has been fulfilled.
:ivar sub_socket: The ZMQ SUB socket used to receive user interrupts.
"""
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.sub_socket = None
self.pub_socket = None
self._trigger_map = {}
self._trigger_reverse_map = {}
self._goal_map = {} # id -> sluggified goal
self._goal_reverse_map = {} # sluggified goal -> id
self._cond_norm_map = {} # id -> sluggified cond norm
self._cond_norm_reverse_map = {} # sluggified cond norm -> id
async def setup(self):
"""
Initialize the agent.
Connects the internal ZMQ SUB socket and subscribes to the 'button_pressed' topic.
Starts the background behavior to receive the user interrupts.
"""
context = Context.instance()
self.sub_socket = context.socket(zmq.SUB)
self.sub_socket.connect(settings.zmq_settings.internal_sub_address)
self.sub_socket.subscribe("button_pressed")
self.pub_socket = context.socket(zmq.PUB)
self.pub_socket.connect(settings.zmq_settings.internal_pub_address)
self.add_behavior(self._receive_button_event())
async def _receive_button_event(self):
"""
The behaviour of the UserInterruptAgent.
Continuous loop that receives button_pressed events from the button_pressed HTTP endpoint.
These events contain a type and a context.
These are the different types and contexts:
- type: "speech", context: string that the robot has to say.
- type: "gesture", context: single gesture name that the robot has to perform.
- type: "override", context: belief_id that overrides the goal/trigger/conditional norm.
- type: "pause", context: boolean indicating whether to pause
- type: "reset_phase", context: None, indicates to the BDI Core to
- type: "reset_experiment", context: None, indicates to the BDI Core to
"""
while True:
topic, body = await self.sub_socket.recv_multipart()
try:
event_data = json.loads(body)
event_type = event_data.get("type") # e.g., "speech", "gesture"
event_context = event_data.get("context") # e.g., "Hello, I am Pepper!"
except json.JSONDecodeError:
self.logger.error("Received invalid JSON payload on topic %s", topic)
continue
self.logger.debug("Received event type %s", event_type)
if event_type == "speech":
await self._send_to_speech_agent(event_context)
self.logger.info(
"Forwarded button press (speech) with context '%s' to RobotSpeechAgent.",
event_context,
)
elif event_type == "gesture":
await self._send_to_gesture_agent(event_context)
self.logger.info(
"Forwarded button press (gesture) with context '%s' to RobotGestureAgent.",
event_context,
)
elif event_type == "override":
ui_id = str(event_context)
if asl_trigger := self._trigger_map.get(ui_id):
await self._send_to_bdi("force_trigger", asl_trigger)
self.logger.info(
"Forwarded button press (override) with context '%s' to BDI Core.",
event_context,
)
elif asl_cond_norm := self._cond_norm_map.get(ui_id):
await self._send_to_bdi("force_norm", asl_cond_norm)
self.logger.info(
"Forwarded button press (override) with context '%s' to BDIProgramManager.",
event_context,
)
elif asl_goal := self._goal_map.get(ui_id):
await self._send_to_bdi_belief(asl_goal)
self.logger.info(
"Forwarded button press (override) with context '%s' to BDI Core.",
event_context,
)
goal_achieve_msg = InternalMessage(
to=settings.agent_settings.bdi_program_manager_name,
thread="achieve_goal",
body=ui_id,
)
await self.send(goal_achieve_msg)
else:
self.logger.warning("Could not determine which element to override.")
elif event_type == "pause":
self.logger.debug(
"Received pause/resume button press with context '%s'.", event_context
)
await self._send_pause_command(event_context)
if event_context:
self.logger.info("Sent pause command.")
else:
self.logger.info("Sent resume command.")
elif event_type in ["next_phase", "reset_phase", "reset_experiment"]:
await self._send_experiment_control_to_bdi_core(event_type)
else:
self.logger.warning(
"Received button press with unknown type '%s' (context: '%s').",
event_type,
event_context,
)
async def handle_message(self, msg: InternalMessage):
"""
Handle commands received from other internal Python agents.
"""
match msg.thread:
case "new_program":
self._create_mapping(msg.body)
case "trigger_start":
# msg.body is the sluggified trigger
asl_slug = msg.body
ui_id = self._trigger_reverse_map.get(asl_slug)
if ui_id:
payload = {"type": "trigger_update", "id": ui_id, "achieved": True}
await self._send_experiment_update(payload)
self.logger.info(f"UI Update: Trigger {asl_slug} started (ID: {ui_id})")
case "trigger_end":
asl_slug = msg.body
ui_id = self._trigger_reverse_map.get(asl_slug)
if ui_id:
payload = {"type": "trigger_update", "id": ui_id, "achieved": False}
await self._send_experiment_update(payload)
self.logger.info(f"UI Update: Trigger {asl_slug} ended (ID: {ui_id})")
case "transition_phase":
new_phase_id = msg.body
self.logger.info(f"Phase transition detected: {new_phase_id}")
payload = {"type": "phase_update", "id": new_phase_id}
await self._send_experiment_update(payload)
case "goal_start":
goal_name = msg.body
ui_id = self._goal_reverse_map.get(goal_name)
if ui_id:
payload = {"type": "goal_update", "id": ui_id, "active": True}
await self._send_experiment_update(payload)
self.logger.info(f"UI Update: Goal {goal_name} started (ID: {ui_id})")
case "active_norms_update":
norm_list = [s.strip("() '\",") for s in msg.body.split(",") if s.strip("() '\",")]
await self._broadcast_cond_norms(norm_list)
case _:
self.logger.debug(f"Received internal message on unhandled thread: {msg.thread}")
async def _broadcast_cond_norms(self, active_slugs: list[str]):
"""
Sends the current state of all conditional norms to the UI.
:param active_slugs: A list of slugs (strings) currently active in the BDI core.
"""
updates = []
for asl_slug, ui_id in self._cond_norm_reverse_map.items():
is_active = asl_slug in active_slugs
updates.append({"id": ui_id, "name": asl_slug, "active": is_active})
payload = {"type": "cond_norms_state_update", "norms": updates}
await self._send_experiment_update(payload, should_log=False)
# self.logger.debug(f"Broadcasted state for {len(updates)} conditional norms.")
def _create_mapping(self, program_json: str):
"""
Create mappings between UI IDs and ASL slugs for triggers, goals, and conditional norms
"""
try:
program = Program.model_validate_json(program_json)
self._trigger_map = {}
self._trigger_reverse_map = {}
self._goal_map = {}
self._cond_norm_map = {}
self._cond_norm_reverse_map = {}
for phase in program.phases:
for trigger in phase.triggers:
slug = AgentSpeakGenerator.slugify(trigger)
self._trigger_map[str(trigger.id)] = slug
self._trigger_reverse_map[slug] = str(trigger.id)
for goal in phase.goals:
self._goal_map[str(goal.id)] = AgentSpeakGenerator.slugify(goal)
self._goal_reverse_map[AgentSpeakGenerator.slugify(goal)] = str(goal.id)
for goal, id in self._goal_reverse_map.items():
self.logger.debug(f"Goal mapping: UI ID {goal} -> {id}")
for norm in phase.norms:
if isinstance(norm, ConditionalNorm):
asl_slug = AgentSpeakGenerator.slugify(norm)
norm_id = str(norm.id)
self._cond_norm_map[norm_id] = asl_slug
self._cond_norm_reverse_map[norm.norm] = norm_id
self.logger.debug("Added conditional norm %s", asl_slug)
self.logger.info(
f"Mapped {len(self._trigger_map)} triggers and {len(self._goal_map)} goals "
f"and {len(self._cond_norm_map)} conditional norms for UserInterruptAgent."
)
except Exception as e:
self.logger.error(f"Mapping failed: {e}")
async def _send_experiment_update(self, data, should_log: bool = True):
"""
Sends an update to the 'experiment' topic.
The SSE endpoint will pick this up and push it to the UI.
"""
if self.pub_socket:
topic = b"experiment"
body = json.dumps(data).encode("utf-8")
await self.pub_socket.send_multipart([topic, body])
if should_log:
self.logger.debug(f"Sent experiment update: {data}")
async def _send_to_speech_agent(self, text_to_say: str):
"""
method to send prioritized speech command to RobotSpeechAgent.
:param text_to_say: The string that the robot has to say.
"""
cmd = SpeechCommand(data=text_to_say, is_priority=True)
out_msg = InternalMessage(
to=settings.agent_settings.robot_speech_name,
sender=self.name,
body=cmd.model_dump_json(),
)
await self.send(out_msg)
async def _send_to_gesture_agent(self, single_gesture_name: str):
"""
method to send prioritized gesture command to RobotGestureAgent.
:param single_gesture_name: The gesture tag that the robot has to perform.
"""
# the endpoint is set to always be GESTURE_SINGLE for user interrupts
cmd = GestureCommand(
endpoint=RIEndpoint.GESTURE_SINGLE, data=single_gesture_name, is_priority=True
)
out_msg = InternalMessage(
to=settings.agent_settings.robot_gesture_name,
sender=self.name,
body=cmd.model_dump_json(),
)
await self.send(out_msg)
async def _send_to_bdi(self, thread: str, body: str):
"""Send slug of trigger to BDI"""
msg = InternalMessage(to=settings.agent_settings.bdi_core_name, thread=thread, body=body)
await self.send(msg)
self.logger.info(f"Directly forced {thread} in BDI: {body}")
async def _send_to_bdi_belief(self, asl_goal: str):
"""Send belief to BDI Core"""
belief_name = f"achieved_{asl_goal}"
belief = Belief(name=belief_name, arguments=None)
self.logger.debug(f"Sending belief to BDI Core: {belief_name}")
belief_message = BeliefMessage(create=[belief])
msg = InternalMessage(
to=settings.agent_settings.bdi_core_name,
thread="beliefs",
body=belief_message.model_dump_json(),
)
await self.send(msg)
async def _send_experiment_control_to_bdi_core(self, type):
"""
method to send experiment control buttons to bdi core.
:param type: the type of control button we should send to the bdi core.
"""
# Switch which thread we should send to bdi core
thread = ""
match type:
case "next_phase":
thread = "force_next_phase"
case "reset_phase":
thread = "reset_current_phase"
case "reset_experiment":
thread = "reset_experiment"
case _:
self.logger.warning(
"Received unknown experiment control type '%s' to send to BDI Core.",
type,
)
out_msg = InternalMessage(
to=settings.agent_settings.bdi_core_name,
sender=self.name,
thread=thread,
body="",
)
self.logger.debug("Sending experiment control '%s' to BDI Core.", thread)
await self.send(out_msg)
async def _send_pause_command(self, pause):
"""
Send a pause command to the Robot Interface via the RI Communication Agent.
Send a pause command to the other internal agents; for now just VAD agent.
"""
cmd = PauseCommand(data=pause)
message = InternalMessage(
to=settings.agent_settings.ri_communication_name,
sender=self.name,
body=cmd.model_dump_json(),
)
await self.send(message)
if pause == "true":
# Send pause to VAD agent
vad_message = InternalMessage(
to=settings.agent_settings.vad_name,
sender=self.name,
body="PAUSE",
)
await self.send(vad_message)
self.logger.info("Sent pause command to VAD Agent and RI Communication Agent.")
else:
# Send resume to VAD agent
vad_message = InternalMessage(
to=settings.agent_settings.vad_name,
sender=self.name,
body="RESUME",
)
await self.send(vad_message)
self.logger.info("Sent resume command to VAD Agent and RI Communication Agent.")

View File

@@ -0,0 +1,172 @@
import numpy as np
import torch
import zmq
import zmq.asyncio as azmq
from spade.behaviour import CyclicBehaviour
from control_backend.agents import BaseAgent
from control_backend.core.config import settings
from .transcription.transcription_agent import TranscriptionAgent
class SocketPoller[T]:
"""
Convenience class for polling a socket for data with a timeout, persisting a zmq.Poller for
multiple usages.
"""
def __init__(self, socket: azmq.Socket, timeout_ms: int = 100):
"""
:param socket: The socket to poll and get data from.
:param timeout_ms: A timeout in milliseconds to wait for data.
"""
self.socket = socket
self.poller = zmq.Poller()
self.poller.register(self.socket, zmq.POLLIN)
self.timeout_ms = timeout_ms
async def poll(self, timeout_ms: int | None = None) -> T | None:
"""
Get data from the socket, or None if the timeout is reached.
:param timeout_ms: If given, the timeout. Otherwise, `self.timeout_ms` is used.
:return: Data from the socket or None.
"""
timeout_ms = timeout_ms or self.timeout_ms
socks = dict(self.poller.poll(timeout_ms))
if socks.get(self.socket) == zmq.POLLIN:
return await self.socket.recv()
return None
class Streaming(CyclicBehaviour):
def __init__(self, audio_in_socket: azmq.Socket, audio_out_socket: azmq.Socket):
super().__init__()
self.audio_in_poller = SocketPoller[bytes](audio_in_socket)
self.model, _ = torch.hub.load(
repo_or_dir="snakers4/silero-vad", model="silero_vad", force_reload=False
)
self.audio_out_socket = audio_out_socket
self.audio_buffer = np.array([], dtype=np.float32)
self.i_since_speech = 100 # Used to allow small pauses in speech
self._ready = False
async def reset(self):
"""Clears the ZeroMQ queue and tells this behavior to start."""
discarded = 0
while await self.audio_in_poller.poll(1) is not None:
discarded += 1
self.agent.logger.info(f"Discarded {discarded} audio packets before starting.")
self._ready = True
async def run(self) -> None:
if not self._ready:
return
data = await self.audio_in_poller.poll()
if data is None:
if len(self.audio_buffer) > 0:
self.agent.logger.debug(
"No audio data received. Discarding buffer until new data arrives."
)
self.audio_buffer = np.array([], dtype=np.float32)
self.i_since_speech = 100
return
# copy otherwise Torch will be sad that it's immutable
chunk = np.frombuffer(data, dtype=np.float32).copy()
prob = self.model(torch.from_numpy(chunk), 16000).item()
if prob > 0.5:
if self.i_since_speech > 3:
self.agent.logger.debug("Speech started.")
self.audio_buffer = np.append(self.audio_buffer, chunk)
self.i_since_speech = 0
return
self.i_since_speech += 1
# prob < 0.5, so speech maybe ended. Wait a bit more before to be more certain
if self.i_since_speech <= 3:
self.audio_buffer = np.append(self.audio_buffer, chunk)
return
# Speech probably ended. Make sure we have a usable amount of data.
if len(self.audio_buffer) >= 3 * len(chunk):
self.agent.logger.debug("Speech ended.")
await self.audio_out_socket.send(self.audio_buffer[: -2 * len(chunk)].tobytes())
# At this point, we know that the speech has ended.
# Prepend the last chunk that had no speech, for a more fluent boundary
self.audio_buffer = chunk
class VADAgent(BaseAgent):
"""
An agent which listens to an audio stream, does Voice Activity Detection (VAD), and sends
fragments with detected speech to other agents over ZeroMQ.
"""
def __init__(self, audio_in_address: str, audio_in_bind: bool):
jid = settings.agent_settings.vad_agent_name + "@" + settings.agent_settings.host
super().__init__(jid, settings.agent_settings.vad_agent_name)
self.audio_in_address = audio_in_address
self.audio_in_bind = audio_in_bind
self.audio_in_socket: azmq.Socket | None = None
self.audio_out_socket: azmq.Socket | None = None
self.streaming_behaviour: Streaming | None = None
async def stop(self):
"""
Stop listening to audio, stop publishing audio, close sockets.
"""
if self.audio_in_socket is not None:
self.audio_in_socket.close()
self.audio_in_socket = None
if self.audio_out_socket is not None:
self.audio_out_socket.close()
self.audio_out_socket = None
return await super().stop()
def _connect_audio_in_socket(self):
self.audio_in_socket = azmq.Context.instance().socket(zmq.SUB)
self.audio_in_socket.setsockopt_string(zmq.SUBSCRIBE, "")
if self.audio_in_bind:
self.audio_in_socket.bind(self.audio_in_address)
else:
self.audio_in_socket.connect(self.audio_in_address)
self.audio_in_poller = SocketPoller[bytes](self.audio_in_socket)
def _connect_audio_out_socket(self) -> int | None:
"""Returns the port bound, or None if binding failed."""
try:
self.audio_out_socket = azmq.Context.instance().socket(zmq.PUB)
return self.audio_out_socket.bind_to_random_port("tcp://*", max_tries=100)
except zmq.ZMQBindError:
self.logger.error("Failed to bind an audio output socket after 100 tries.")
self.audio_out_socket = None
return None
async def setup(self):
self.logger.info("Setting up %s", self.jid)
self._connect_audio_in_socket()
audio_out_port = self._connect_audio_out_socket()
if audio_out_port is None:
await self.stop()
return
audio_out_address = f"tcp://localhost:{audio_out_port}"
self.streaming_behaviour = Streaming(self.audio_in_socket, self.audio_out_socket)
self.add_behaviour(self.streaming_behaviour)
# Start agents dependent on the output audio fragments here
transcriber = TranscriptionAgent(audio_out_address)
await transcriber.start()
self.logger.info("Finished setting up %s", self.jid)

View File

@@ -0,0 +1,20 @@
import logging
from fastapi import APIRouter, Request
from control_backend.schemas.ri_message import SpeechCommand
logger = logging.getLogger(__name__)
router = APIRouter()
@router.post("/command", status_code=202)
async def receive_command(command: SpeechCommand, request: Request):
# Validate and retrieve data.
SpeechCommand.model_validate(command)
topic = b"command"
pub_socket = request.app.state.endpoints_pub_socket
await pub_socket.send_multipart([topic, command.model_dump_json().encode()])
return {"status": "Command received"}

View File

@@ -15,14 +15,6 @@ router = APIRouter()
# DO NOT LOG INSIDE THIS FUNCTION
@router.get("/logs/stream")
async def log_stream():
"""
Server-Sent Events (SSE) endpoint for real-time log streaming.
Subscribes to the internal ZMQ logging topic and forwards log records to the client.
Allows the frontend to display live logs from the backend.
:return: A StreamingResponse yielding SSE data.
"""
context = Context.instance()
socket = context.socket(zmq.SUB)

View File

@@ -11,14 +11,6 @@ router = APIRouter()
@router.post("/message", status_code=202)
async def receive_message(message: Message, request: Request):
"""
Generic endpoint to receive text messages.
Publishes the message to the internal 'message' topic via ZMQ.
:param message: The message payload.
:param request: The FastAPI request object (used to access app state).
"""
logger.info("Received message: %s", message.message)
topic = b"message"

View File

@@ -1,31 +0,0 @@
import logging
from fastapi import APIRouter, Request
from control_backend.schemas.program import Program
logger = logging.getLogger(__name__)
router = APIRouter()
@router.post("/program", status_code=202)
async def receive_message(program: Program, request: Request):
"""
Endpoint to upload a new Behavior Program.
Validates the program structure (phases, norms, goals) and publishes it to the internal
'program' topic. The :class:`~control_backend.agents.bdi.bdi_program_manager.BDIProgramManager`
will pick this up and update the BDI agent.
:param program: The parsed Program object.
:param request: The FastAPI request object.
"""
logger.debug("Received raw program: %s", program)
# send away
topic = b"program"
body = program.model_dump_json().encode()
pub_socket = request.app.state.endpoints_pub_socket
await pub_socket.send_multipart([topic, body])
return {"status": "Program parsed"}

View File

@@ -1,143 +0,0 @@
import asyncio
import json
import logging
import zmq.asyncio
from fastapi import APIRouter, Request
from fastapi.responses import StreamingResponse
from zmq.asyncio import Context, Socket
from control_backend.core.config import settings
from control_backend.schemas.ri_message import GestureCommand, SpeechCommand
logger = logging.getLogger(__name__)
router = APIRouter()
@router.post("/command/speech", status_code=202)
async def receive_command_speech(command: SpeechCommand, request: Request):
"""
Send a direct speech command to the robot.
Publishes the command to the internal 'command' topic. The
:class:`~control_backend.agents.actuation.robot_speech_agent.RobotSpeechAgent`
will forward this to the robot.
:param command: The speech command payload.
:param request: The FastAPI request object.
"""
topic = b"command"
pub_socket: Socket = request.app.state.endpoints_pub_socket
await pub_socket.send_multipart([topic, command.model_dump_json().encode()])
return {"status": "Speech command received"}
@router.post("/command/gesture", status_code=202)
async def receive_command_gesture(command: GestureCommand, request: Request):
"""
Send a direct gesture command to the robot.
Publishes the command to the internal 'command' topic. The
:class:`~control_backend.agents.actuation.robot_speech_agent.RobotGestureAgent`
will forward this to the robot.
:param command: The speech command payload.
:param request: The FastAPI request object.
"""
topic = b"command"
pub_socket: Socket = request.app.state.endpoints_pub_socket
await pub_socket.send_multipart([topic, command.model_dump_json().encode()])
return {"status": "Gesture command received"}
@router.get("/ping_check")
async def ping(request: Request):
"""
Simple HTTP ping endpoint to check if the backend is reachable.
"""
pass
@router.get("/commands/gesture/tags")
async def get_available_gesture_tags(request: Request, count=0):
"""
Endpoint to retrieve the available gesture tags for the robot.
:param request: The FastAPI request object.
:return: A list of available gesture tags.
"""
req_socket = Context.instance().socket(zmq.REQ)
req_socket.connect(settings.zmq_settings.internal_gesture_rep_adress)
# Check to see if we've got any count given in the query parameter
amount = count or None
timeout = 5 # seconds
await req_socket.send(f"{amount}".encode() if amount else b"None")
try:
body = await asyncio.wait_for(req_socket.recv(), timeout=timeout)
except TimeoutError:
body = '{"tags": []}'
logger.debug("Got timeout error fetching gestures.")
# Handle empty response and JSON decode errors
available_tags = []
if body:
try:
available_tags = json.loads(body).get("tags", [])
except json.JSONDecodeError as e:
logger.error(f"Failed to parse gesture tags JSON: {e}, body: {body}")
# Return empty list on JSON error
available_tags = []
return {"available_gesture_tags": available_tags}
@router.get("/ping_stream")
async def ping_stream(request: Request):
"""
SSE endpoint for monitoring the Robot Interface connection status.
Subscribes to the internal 'ping' topic (published by the RI Communication Agent)
and yields status updates to the client.
:return: A StreamingResponse of connection status events.
"""
async def event_stream():
# Set up internal socket to receive ping updates
sub_socket = Context.instance().socket(zmq.SUB)
sub_socket.connect(settings.zmq_settings.internal_sub_address)
sub_socket.setsockopt(zmq.SUBSCRIBE, b"ping")
connected = False
ping_frequency = 2
# Even though its most likely the updates should alternate
# (So, True - False - True - False for connectivity),
# let's still check.
while True:
try:
topic, body = await asyncio.wait_for(
sub_socket.recv_multipart(), timeout=ping_frequency
)
connected = json.loads(body)
except TimeoutError:
logger.debug("got timeout error in ping loop in ping router")
connected = False
# Stop if client disconnected
if await request.is_disconnected():
logger.info("Client disconnected from SSE")
break
connectedJson = json.dumps(connected)
yield (f"data: {connectedJson}\n\n")
return StreamingResponse(event_stream(), media_type="text/event-stream")

View File

@@ -6,7 +6,4 @@ router = APIRouter()
# TODO: implement
@router.get("/sse")
async def sse(request: Request):
"""
Placeholder for future Server-Sent Events endpoint.
"""
pass

View File

@@ -1,67 +0,0 @@
import asyncio
import logging
import zmq
import zmq.asyncio
from fastapi import APIRouter, Request
from fastapi.responses import StreamingResponse
from zmq.asyncio import Context
from control_backend.core.config import settings
from control_backend.schemas.events import ButtonPressedEvent
logger = logging.getLogger(__name__)
router = APIRouter()
@router.post("/button_pressed", status_code=202)
async def receive_button_event(event: ButtonPressedEvent, request: Request):
"""
Endpoint to handle external button press events.
Validates the event payload and publishes it to the internal 'button_pressed' topic.
Subscribers (in this case user_interrupt_agent) will pick this up to trigger
specific behaviors or state changes.
:param event: The parsed ButtonPressedEvent object.
:param request: The FastAPI request object.
"""
logger.debug("Received button event: %s | %s", event.type, event.context)
topic = b"button_pressed"
body = event.model_dump_json().encode()
pub_socket = request.app.state.endpoints_pub_socket
await pub_socket.send_multipart([topic, body])
return {"status": "Event received"}
@router.get("/experiment_stream")
async def experiment_stream(request: Request):
# Use the asyncio-compatible context
context = Context.instance()
socket = context.socket(zmq.SUB)
# Connect and subscribe
socket.connect(settings.zmq_settings.internal_sub_address)
socket.subscribe(b"experiment")
async def gen():
try:
while True:
# Check if client closed the tab
if await request.is_disconnected():
logger.info("Client disconnected from experiment stream.")
break
try:
parts = await asyncio.wait_for(socket.recv_multipart(), timeout=1.0)
_, message = parts
yield f"data: {message.decode().strip()}\n\n"
except TimeoutError:
continue
finally:
socket.close()
return StreamingResponse(gen(), media_type="text/event-stream")

View File

@@ -1,6 +1,6 @@
from fastapi.routing import APIRouter
from control_backend.api.v1.endpoints import logs, message, program, robot, sse, user_interact
from control_backend.api.v1.endpoints import command, logs, message, sse
api_router = APIRouter()
@@ -8,10 +8,6 @@ api_router.include_router(message.router, tags=["Messages"])
api_router.include_router(sse.router, tags=["SSE"])
api_router.include_router(robot.router, prefix="/robot", tags=["Pings", "Commands"])
api_router.include_router(command.router, tags=["Commands"])
api_router.include_router(logs.router, tags=["Logs"])
api_router.include_router(program.router, tags=["Program"])
api_router.include_router(user_interact.router, tags=["Button Pressed Events"])

View File

@@ -1,219 +0,0 @@
import asyncio
import logging
from abc import ABC, abstractmethod
from asyncio import Task
from collections.abc import Coroutine
import zmq
import zmq.asyncio as azmq
from control_backend.core.config import settings
from control_backend.schemas.internal_message import InternalMessage
# Central directory to resolve agent names to instances
_agent_directory: dict[str, "BaseAgent"] = {}
class AgentDirectory:
"""
Helper class to keep track of which agents are registered.
Used for handling message routing.
"""
@staticmethod
def register(name: str, agent: "BaseAgent"):
_agent_directory[name] = agent
@staticmethod
def get(name: str) -> "BaseAgent | None":
return _agent_directory.get(name)
class BaseAgent(ABC):
"""
Abstract base class for all agents in the system.
This class provides the foundational infrastructure for agent lifecycle management, messaging
(both intra-process and inter-process via ZMQ), and asynchronous behavior execution.
.. warning::
Do not inherit from this class directly for creating new agents. Instead, inherit from
:class:`control_backend.agents.base.BaseAgent`, which ensures proper logger configuration.
:ivar name: The unique name of the agent.
:ivar inbox: The queue for receiving internal messages.
:ivar _tasks: A set of currently running asynchronous tasks/behaviors.
:ivar _running: A boolean flag indicating if the agent is currently running.
:ivar logger: The logger instance for the agent.
"""
logger: logging.Logger
def __init__(self, name: str):
"""
Initialize the BaseAgent.
:param name: The unique identifier for this agent.
"""
self.name = name
self.inbox: asyncio.Queue[InternalMessage] = asyncio.Queue()
self._tasks: set[asyncio.Task] = set()
self._running = False
self._internal_pub_socket: None | azmq.Socket = None
self._internal_sub_socket: None | azmq.Socket = None
# Register immediately
AgentDirectory.register(name, self)
@abstractmethod
async def setup(self):
"""
Initialize agent-specific resources.
This method must be overridden by subclasses. It is called after the agent has started
and the ZMQ sockets have been initialized. Use this method to:
* Initialize connections (databases, APIs, etc.)
* Add initial behaviors using :meth:`add_behavior`
"""
pass
async def start(self):
"""
Start the agent and its internal loops.
This method:
1. Sets the running state to True.
2. Initializes ZeroMQ PUB/SUB sockets for inter-process communication.
3. Calls the user-defined :meth:`setup` method.
4. Starts the inbox processing loop and the ZMQ receiver loop.
"""
self.logger.info(f"Starting agent {self.name}")
self._running = True
context = azmq.Context.instance()
# Setup the internal publishing socket
self._internal_pub_socket = context.socket(zmq.PUB)
self._internal_pub_socket.connect(settings.zmq_settings.internal_pub_address)
# Setup the internal receiving socket
self._internal_sub_socket = context.socket(zmq.SUB)
self._internal_sub_socket.connect(settings.zmq_settings.internal_sub_address)
self._internal_sub_socket.subscribe(f"internal/{self.name}")
await self.setup()
# Start processing inbox and ZMQ messages
self.add_behavior(self._process_inbox())
self.add_behavior(self._receive_internal_zmq_loop())
async def stop(self):
"""
Stop the agent.
Sets the running state to False and cancels all running background tasks.
"""
self._running = False
for task in self._tasks:
task.cancel()
self.logger.info(f"Agent {self.name} stopped")
async def send(self, message: InternalMessage, should_log: bool = True):
"""
Send a message to another agent.
This method intelligently routes the message:
* If the target agent is in the same process (found in :class:`AgentDirectory`),
the message is put directly into its inbox.
* If the target agent is not found locally, the message is serialized and sent
via ZeroMQ to the internal publication address.
:param message: The message to send.
"""
message.sender = self.name
to = message.to
receivers = [to] if isinstance(to, str) else to
for receiver in receivers:
target = AgentDirectory.get(receiver)
if target:
await target.inbox.put(message)
if should_log:
self.logger.debug(
f"Sent message {message.body} to {message.to} via regular inbox."
)
else:
# Apparently target agent is on a different process, send via ZMQ
topic = f"internal/{receiver}".encode()
body = message.model_dump_json().encode()
await self._internal_pub_socket.send_multipart([topic, body])
if should_log:
self.logger.debug(f"Sent message {message.body} to {message.to} via ZMQ.")
async def _process_inbox(self):
"""
Internal loop that processes messages from the inbox.
Reads messages from ``self.inbox`` and passes them to :meth:`handle_message`.
"""
while self._running:
msg = await self.inbox.get()
await self.handle_message(msg)
async def _receive_internal_zmq_loop(self):
"""
Internal loop that listens for ZMQ messages.
Subscribes to ``internal/<agent_name>`` topics. When a message is received,
it is deserialized into an :class:`InternalMessage` and put into the local inbox.
This bridges the gap between inter-process ZMQ communication and the intra-process inbox.
"""
while self._running:
try:
_, body = await self._internal_sub_socket.recv_multipart()
msg = InternalMessage.model_validate_json(body)
await self.inbox.put(msg)
except asyncio.CancelledError:
break
except Exception:
self.logger.exception("Could not process ZMQ message.")
async def handle_message(self, msg: InternalMessage):
"""
Handle an incoming message.
This method must be overridden by subclasses to define how the agent reacts to messages.
:param msg: The received message.
:raises NotImplementedError: If not overridden by the subclass.
"""
raise NotImplementedError
def add_behavior(self, coro: Coroutine) -> Task:
"""
Add a background behavior (task) to the agent.
This is the preferred way to run continuous loops or long-running tasks within an agent.
The task is tracked and will be automatically cancelled when :meth:`stop` is called.
:param coro: The coroutine to execute as a task.
"""
async def try_coro(coro_: Coroutine):
try:
await coro_
except asyncio.CancelledError:
self.logger.debug("A behavior was canceled successfully: %s", coro_)
except Exception:
self.logger.warning("An exception occurred in a behavior.", exc_info=True)
task = asyncio.create_task(try_coro(coro))
self._tasks.add(task)
task.add_done_callback(self._tasks.discard)
return task

View File

@@ -1,192 +1,43 @@
"""
An exhaustive overview of configurable options. All of these can be set using environment variables
by nesting with double underscores (__). Start from the ``Settings`` class.
For example, ``settings.ri_host`` becomes ``RI_HOST``, and
``settings.zmq_settings.ri_communication_address`` becomes
``ZMQ_SETTINGS__RI_COMMUNICATION_ADDRESS``.
"""
from pydantic import BaseModel
from pydantic_settings import BaseSettings, SettingsConfigDict
class ZMQSettings(BaseModel):
"""
Configuration for ZeroMQ (ZMQ) addresses used for inter-process communication.
:ivar internal_pub_address: Address for the internal PUB socket.
:ivar internal_sub_address: Address for the internal SUB socket.
:ivar ri_communication_address: Address for the endpoint that the Robot Interface connects to.
:ivar vad_pub_address: Address that the VAD agent binds to and publishes audio segments to.
"""
# ATTENTION: When adding/removing settings, make sure to update the .env.example file
internal_pub_address: str = "tcp://localhost:5560"
internal_sub_address: str = "tcp://localhost:5561"
ri_communication_address: str = "tcp://*:5555"
internal_gesture_rep_adress: str = "tcp://localhost:7788"
vad_pub_address: str = "inproc://vad_stream"
class AgentSettings(BaseModel):
"""
Names of the various agents in the system. These names are used for routing messages.
host: str = "localhost"
bdi_core_agent_name: str = "bdi_core"
belief_collector_agent_name: str = "belief_collector"
text_belief_extractor_agent_name: str = "text_belief_extractor"
vad_agent_name: str = "vad_agent"
llm_agent_name: str = "llm_agent"
test_agent_name: str = "test_agent"
transcription_agent_name: str = "transcription_agent"
:ivar bdi_core_name: Name of the BDI Core Agent.
:ivar bdi_program_manager_name: Name of the BDI Program Manager Agent.
:ivar text_belief_extractor_name: Name of the Text Belief Extractor Agent.
:ivar vad_name: Name of the Voice Activity Detection (VAD) Agent.
:ivar llm_name: Name of the Large Language Model (LLM) Agent.
:ivar test_name: Name of the Test Agent.
:ivar transcription_name: Name of the Transcription Agent.
:ivar ri_communication_name: Name of the RI Communication Agent.
:ivar robot_speech_name: Name of the Robot Speech Agent.
"""
# ATTENTION: When adding/removing settings, make sure to update the .env.example file
# agent names
bdi_core_name: str = "bdi_core_agent"
bdi_program_manager_name: str = "bdi_program_manager_agent"
text_belief_extractor_name: str = "text_belief_extractor_agent"
vad_name: str = "vad_agent"
llm_name: str = "llm_agent"
test_name: str = "test_agent"
transcription_name: str = "transcription_agent"
ri_communication_name: str = "ri_communication_agent"
robot_speech_name: str = "robot_speech_agent"
robot_gesture_name: str = "robot_gesture_agent"
user_interrupt_name: str = "user_interrupt_agent"
class BehaviourSettings(BaseModel):
"""
Configuration for agent behaviors and parameters.
:ivar sleep_s: Default sleep time in seconds for loops.
:ivar comm_setup_max_retries: Maximum number of retries for setting up communication.
:ivar socket_poller_timeout_ms: Timeout in milliseconds for socket polling.
:ivar vad_prob_threshold: Probability threshold for Voice Activity Detection.
:ivar vad_initial_since_speech: Initial value for 'since speech' counter in VAD.
:ivar vad_non_speech_patience_chunks: Number of non-speech chunks to wait before speech ended.
:ivar vad_begin_silence_chunks: The number of chunks of silence to prepend to speech chunks.
:ivar transcription_max_concurrent_tasks: Maximum number of concurrent transcription tasks.
:ivar transcription_words_per_minute: Estimated words per minute for transcription timing.
:ivar transcription_words_per_token: Estimated words per token for transcription timing.
:ivar transcription_token_buffer: Buffer for transcription tokens.
:ivar conversation_history_length_limit: The maximum amount of messages to extract beliefs from.
"""
# ATTENTION: When adding/removing settings, make sure to update the .env.example file
sleep_s: float = 1.0
comm_setup_max_retries: int = 5
socket_poller_timeout_ms: int = 100
# VAD settings
vad_prob_threshold: float = 0.5
vad_initial_since_speech: int = 100
vad_non_speech_patience_chunks: int = 15
vad_begin_silence_chunks: int = 6
# transcription behaviour
transcription_max_concurrent_tasks: int = 3
transcription_words_per_minute: int = 300
transcription_words_per_token: float = 0.75 # (3 words = 4 tokens)
transcription_token_buffer: int = 10
# Text belief extractor settings
conversation_history_length_limit: int = 10
ri_communication_agent_name: str = "ri_communication_agent"
ri_command_agent_name: str = "ri_command_agent"
class LLMSettings(BaseModel):
"""
Configuration for the Large Language Model (LLM).
:ivar local_llm_url: URL for the local LLM API.
:ivar local_llm_model: Name of the local LLM model to use.
:ivar chat_temperature: The temperature to use while generating chat responses.
:ivar code_temperature: The temperature to use while generating code-like responses like during
belief inference.
:ivar n_parallel: The number of parallel calls allowed to be made to the LLM.
"""
# ATTENTION: When adding/removing settings, make sure to update the .env.example file
local_llm_url: str = "http://localhost:1234/v1/chat/completions"
local_llm_model: str = "gpt-oss"
chat_temperature: float = 1.0
code_temperature: float = 0.3
n_parallel: int = 4
class VADSettings(BaseModel):
"""
Configuration for Voice Activity Detection (VAD) model.
:ivar repo_or_dir: Repository or directory for the VAD model.
:ivar model_name: Name of the VAD model.
:ivar sample_rate_hz: Sample rate in Hz for the VAD model.
"""
# ATTENTION: When adding/removing settings, make sure to update the .env.example file
repo_or_dir: str = "snakers4/silero-vad"
model_name: str = "silero_vad"
sample_rate_hz: int = 16000
class SpeechModelSettings(BaseModel):
"""
Configuration for speech recognition models.
:ivar mlx_model_name: Model name for MLX-based speech recognition.
:ivar openai_model_name: Model name for OpenAI-based speech recognition.
"""
# ATTENTION: When adding/removing settings, make sure to update the .env.example file
# model identifiers for speech recognition
mlx_model_name: str = "mlx-community/whisper-small.en-mlx"
openai_model_name: str = "small.en"
local_llm_model: str = "openai/gpt-oss-20b"
class Settings(BaseSettings):
"""
Global application settings.
:ivar app_title: Title of the application.
:ivar ui_url: URL of the frontend UI.
:ivar ri_host: The hostname of the Robot Interface.
:ivar zmq_settings: ZMQ configuration.
:ivar agent_settings: Agent name configuration.
:ivar behaviour_settings: Behavior configuration.
:ivar vad_settings: VAD model configuration.
:ivar speech_model_settings: Speech model configuration.
:ivar llm_settings: LLM configuration.
"""
app_title: str = "PepperPlus"
ui_url: str = "http://localhost:5173"
ri_host: str = "localhost"
zmq_settings: ZMQSettings = ZMQSettings()
agent_settings: AgentSettings = AgentSettings()
behaviour_settings: BehaviourSettings = BehaviourSettings()
vad_settings: VADSettings = VADSettings()
speech_model_settings: SpeechModelSettings = SpeechModelSettings()
llm_settings: LLMSettings = LLMSettings()
model_config = SettingsConfigDict(env_file=".env", env_nested_delimiter="__")
model_config = SettingsConfigDict(env_file=".env")
settings = Settings()

View File

@@ -4,7 +4,6 @@ import os
import yaml
import zmq
from zmq.log.handlers import PUBHandler
from control_backend.core.config import settings
@@ -38,12 +37,6 @@ def add_logging_level(level_name: str, level_num: int, method_name: str | None =
def setup_logging(path: str = ".logging_config.yaml") -> None:
"""
Setup logging configuration of the CB. Tries to load the logging configuration from a file,
in which we specify custom loggers, formatters, handlers, etc.
:param path:
:return:
"""
if os.path.exists(path):
with open(path) as f:
try:
@@ -52,27 +45,15 @@ def setup_logging(path: str = ".logging_config.yaml") -> None:
logging.warning(f"Could not load logging configuration: {e}")
config = {}
custom_levels = config.get("custom_levels", {}) or {}
for level_name, level_num in custom_levels.items():
add_logging_level(level_name, level_num)
if "custom_levels" in config:
for level_name, level_num in config["custom_levels"].items():
add_logging_level(level_name, level_num)
if config.get("handlers") is not None and config.get("handlers").get("ui"):
pub_socket = zmq.Context.instance().socket(zmq.PUB)
pub_socket.connect(settings.zmq_settings.internal_pub_address)
config["handlers"]["ui"]["interface_or_socket"] = pub_socket
logging.config.dictConfig(config)
# Patch ZMQ PUBHandler to know about custom levels
if custom_levels:
for logger_name in ("control_backend",):
logger = logging.getLogger(logger_name)
for handler in logger.handlers:
if isinstance(handler, PUBHandler):
# Use the INFO formatter as the default template
default_fmt = handler.formatters[logging.INFO]
for level_num in custom_levels.values():
handler.setFormatter(default_fmt, level=level_num)
else:
logging.warning("Logging config file not found. Using default logging configuration.")

View File

@@ -1,20 +1,3 @@
"""
Control Backend Main Application.
This module defines the FastAPI application that serves as the entry point for the
Control Backend. It manages the lifecycle of the entire system, including:
1. **Socket Initialization**: Setting up the internal ZeroMQ PUB/SUB proxy for agent communication.
2. **Agent Management**: Instantiating and starting all agents.
3. **API Routing**: Exposing REST endpoints for external interaction.
Lifecycle Manager
-----------------
The :func:`lifespan` context manager handles the startup and shutdown sequences:
- **Startup**: Configures logging, starts the ZMQ proxy, connects sockets, and launches agents.
- **Shutdown**: Handles graceful cleanup (though currently minimal).
"""
import contextlib
import logging
import threading
@@ -24,39 +7,21 @@ from fastapi import FastAPI
from fastapi.middleware.cors import CORSMiddleware
from zmq.asyncio import Context
# BDI agents
from control_backend.agents.bdi import (
BDICoreAgent,
TextBeliefExtractorAgent,
from control_backend.agents import (
BeliefCollectorAgent,
LLMAgent,
RICommunicationAgent,
VADAgent,
)
from control_backend.agents.bdi.bdi_program_manager import BDIProgramManager
# Communication agents
from control_backend.agents.communication import RICommunicationAgent
# Emotional Agents
# LLM Agents
from control_backend.agents.llm import LLMAgent
# User Interrupt Agent
from control_backend.agents.user_interrupt.user_interrupt_agent import UserInterruptAgent
# Other backend imports
from control_backend.agents.bdi import BDICoreAgent, TBeliefExtractorAgent
from control_backend.api.v1.router import api_router
from control_backend.core.config import settings
from control_backend.logging import setup_logging
from control_backend.schemas.program_status import PROGRAM_STATUS, ProgramStatus
logger = logging.getLogger(__name__)
def setup_sockets():
"""
Initialize and run the internal ZeroMQ Proxy (XPUB/XSUB).
This proxy acts as the central message bus, forwarding messages published on the
internal PUB address to all subscribers on the internal SUB address.
"""
context = Context.instance()
internal_pub_socket = context.socket(zmq.XPUB)
@@ -83,6 +48,7 @@ async def lifespan(app: FastAPI):
# --- APPLICATION STARTUP ---
setup_logging()
logger.info("%s is starting up.", app.title)
logger.warning("testing extra", extra={"extra1": "one", "extra2": "two"})
# Initiate sockets
proxy_thread = threading.Thread(target=setup_sockets)
@@ -95,78 +61,81 @@ async def lifespan(app: FastAPI):
endpoints_pub_socket.connect(settings.zmq_settings.internal_pub_address)
app.state.endpoints_pub_socket = endpoints_pub_socket
await endpoints_pub_socket.send_multipart([PROGRAM_STATUS, ProgramStatus.STARTING.value])
# --- Initialize Agents ---
logger.info("Initializing and starting agents.")
agents_to_start = {
"RICommunicationAgent": (
RICommunicationAgent,
{
"name": settings.agent_settings.ri_communication_name,
"address": settings.zmq_settings.ri_communication_address,
"name": settings.agent_settings.ri_communication_agent_name,
"jid": f"{settings.agent_settings.ri_communication_agent_name}"
f"@{settings.agent_settings.host}",
"password": settings.agent_settings.ri_communication_agent_name,
"address": "tcp://*:5555",
"bind": True,
},
),
"LLMAgent": (
LLMAgent,
{
"name": settings.agent_settings.llm_name,
"name": settings.agent_settings.llm_agent_name,
"jid": f"{settings.agent_settings.llm_agent_name}@{settings.agent_settings.host}",
"password": settings.agent_settings.llm_agent_name,
},
),
"BDICoreAgent": (
BDICoreAgent,
{
"name": settings.agent_settings.bdi_core_name,
"name": settings.agent_settings.bdi_core_agent_name,
"jid": f"{settings.agent_settings.bdi_core_agent_name}@"
f"{settings.agent_settings.host}",
"password": settings.agent_settings.bdi_core_agent_name,
"asl": "src/control_backend/agents/bdi/rules.asl",
},
),
"TextBeliefExtractorAgent": (
TextBeliefExtractorAgent,
"BeliefCollectorAgent": (
BeliefCollectorAgent,
{
"name": settings.agent_settings.text_belief_extractor_name,
"name": settings.agent_settings.belief_collector_agent_name,
"jid": f"{settings.agent_settings.belief_collector_agent_name}@"
f"{settings.agent_settings.host}",
"password": settings.agent_settings.belief_collector_agent_name,
},
),
"ProgramManagerAgent": (
BDIProgramManager,
"TBeliefExtractor": (
TBeliefExtractorAgent,
{
"name": settings.agent_settings.bdi_program_manager_name,
"name": settings.agent_settings.text_belief_extractor_agent_name,
"jid": f"{settings.agent_settings.text_belief_extractor_agent_name}@"
f"{settings.agent_settings.host}",
"password": settings.agent_settings.text_belief_extractor_agent_name,
},
),
"UserInterruptAgent": (
UserInterruptAgent,
{
"name": settings.agent_settings.user_interrupt_name,
},
"VADAgent": (
VADAgent,
{"audio_in_address": "tcp://localhost:5558", "audio_in_bind": False},
),
}
agents = []
for name, (agent_class, kwargs) in agents_to_start.items():
try:
logger.debug("Starting agent: %s", name)
agent_instance = agent_class(**kwargs)
agent_instance = agent_class(**{k: v for k, v in kwargs.items() if k != "name"})
await agent_instance.start()
agents.append(agent_instance)
logger.info("Agent '%s' started successfully.", name)
except Exception as e:
logger.error("Failed to start agent '%s': %s", name, e, exc_info=True)
# Consider if the application should continue if an agent fails to start.
raise
logger.info("Application startup complete.")
await endpoints_pub_socket.send_multipart([PROGRAM_STATUS, ProgramStatus.RUNNING.value])
yield
# --- APPLICATION SHUTDOWN ---
logger.info("%s is shutting down.", app.title)
await endpoints_pub_socket.send_multipart([PROGRAM_STATUS, ProgramStatus.STOPPING.value])
# Additional shutdown logic goes here
for agent in agents:
await agent.stop()
# Potential shutdown logic goes here
logger.info("Application shutdown complete.")

View File

@@ -1,19 +0,0 @@
from pydantic import BaseModel
from control_backend.schemas.program import BaseGoal
from control_backend.schemas.program import Belief as ProgramBelief
class BeliefList(BaseModel):
"""
Represents a list of beliefs, separated from a program. Useful in agents which need to
communicate beliefs.
:ivar: beliefs: The list of beliefs.
"""
beliefs: list[ProgramBelief]
class GoalList(BaseModel):
goals: list[BaseGoal]

View File

@@ -1,35 +0,0 @@
from pydantic import BaseModel
class Belief(BaseModel):
"""
Represents a single belief in the BDI system.
:ivar name: The functor or name of the belief (e.g., 'user_said').
:ivar arguments: A list of string arguments for the belief, or None if the belief has no
arguments.
"""
name: str
arguments: list[str] | None = None
# To make it hashable
model_config = {"frozen": True}
class BeliefMessage(BaseModel):
"""
A container for communicating beliefs between agents.
:ivar create: Beliefs to create.
:ivar delete: Beliefs to delete.
:ivar replace: Beliefs to replace. Deletes all beliefs with the same name, replacing them with
one new belief.
"""
create: list[Belief] = []
delete: list[Belief] = []
replace: list[Belief] = []
def has_values(self) -> bool:
return len(self.create) > 0 or len(self.delete) > 0 or len(self.replace) > 0

View File

@@ -1,10 +0,0 @@
from pydantic import BaseModel
class ChatMessage(BaseModel):
role: str
content: str
class ChatHistory(BaseModel):
messages: list[ChatMessage]

View File

@@ -1,6 +0,0 @@
from pydantic import BaseModel
class ButtonPressedEvent(BaseModel):
type: str
context: str

View File

@@ -1,19 +0,0 @@
from collections.abc import Iterable
from pydantic import BaseModel
class InternalMessage(BaseModel):
"""
Standard message envelope for communication between agents within the Control Backend.
:ivar to: The name of the destination agent.
:ivar sender: The name of the sending agent.
:ivar body: The string payload (often a JSON-serialized model).
:ivar thread: An optional thread identifier/topic to categorize the message (e.g., 'beliefs').
"""
to: str | Iterable[str]
sender: str | None = None
body: str
thread: str | None = None

View File

@@ -1,18 +0,0 @@
from pydantic import BaseModel
class LLMPromptMessage(BaseModel):
"""
Payload sent from the BDI agent to the LLM agent.
Contains the user's text input along with the dynamic context (norms and goals)
that the LLM should use to generate a response.
:ivar text: The user's input text.
:ivar norms: A list of active behavioral norms.
:ivar goals: A list of active goals to pursue.
"""
text: str
norms: list[str]
goals: list[str]

View File

@@ -2,8 +2,4 @@ from pydantic import BaseModel
class Message(BaseModel):
"""
A simple generic message wrapper, typically used for simple API responses.
"""
message: str

View File

@@ -1,222 +0,0 @@
from enum import Enum
from typing import Literal
from pydantic import UUID4, BaseModel
class ProgramElement(BaseModel):
"""
Represents a basic element of our behavior program.
:ivar name: The researcher-assigned name of the element.
:ivar id: Unique identifier.
"""
name: str
id: UUID4
# To make program elements hashable
model_config = {"frozen": True}
class LogicalOperator(Enum):
AND = "AND"
OR = "OR"
type Belief = KeywordBelief | SemanticBelief | InferredBelief
type BasicBelief = KeywordBelief | SemanticBelief
class KeywordBelief(ProgramElement):
"""
Represents a belief that is set when the user spoken text contains a certain keyword.
:ivar keyword: The keyword on which this belief gets set.
"""
name: str = ""
keyword: str
class SemanticBelief(ProgramElement):
"""
Represents a belief that is set by semantic LLM validation.
:ivar description: Description of how to form the belief, used by the LLM.
"""
description: str
class InferredBelief(ProgramElement):
"""
Represents a belief that gets formed by combining two beliefs with a logical AND or OR.
These beliefs can also be :class:`InferredBelief`, leading to arbitrarily deep nesting.
:ivar operator: The logical operator to apply.
:ivar left: The left part of the logical expression.
:ivar right: The right part of the logical expression.
"""
name: str = ""
operator: LogicalOperator
left: Belief
right: Belief
class Norm(ProgramElement):
name: str = ""
norm: str
critical: bool = False
class BasicNorm(Norm):
"""
Represents a behavioral norm.
:ivar norm: The actual norm text describing the behavior.
:ivar critical: When true, this norm should absolutely not be violated (checked separately).
"""
pass
class ConditionalNorm(Norm):
"""
Represents a norm that is only active when a condition is met (i.e., a certain belief holds).
:ivar condition: When to activate this norm.
"""
condition: Belief
type PlanElement = Goal | Action
class Plan(ProgramElement):
"""
Represents a list of steps to execute. Each of these steps can be a goal (with its own plan)
or a simple action.
:ivar steps: The actions or subgoals to execute, in order.
"""
name: str = ""
steps: list[PlanElement]
class BaseGoal(ProgramElement):
"""
Represents an objective to be achieved. This base version does not include a plan to achieve
this goal, and is used in semantic belief extraction.
:ivar description: A description of the goal, used to determine if it has been achieved.
:ivar can_fail: Whether we can fail to achieve the goal after executing the plan.
"""
description: str = ""
can_fail: bool = True
class Goal(BaseGoal):
"""
Represents an objective to be achieved. To reach the goal, we should execute the corresponding
plan. It inherits from the BaseGoal a variable `can_fail`, which if true will cause the
completion to be determined based on the conversation.
Instances of this goal are not hashable because a plan is not hashable.
:ivar plan: The plan to execute.
"""
plan: Plan
type Action = SpeechAction | GestureAction | LLMAction
class SpeechAction(ProgramElement):
"""
Represents the action of the robot speaking a literal text.
:ivar text: The text to speak.
"""
name: str = ""
text: str
class Gesture(BaseModel):
"""
Represents a gesture to be performed. Can be either a single gesture,
or a random gesture from a category (tag).
:ivar type: The type of the gesture, "tag" or "single".
:ivar name: The name of the single gesture or tag.
"""
type: Literal["tag", "single"]
name: str
class GestureAction(ProgramElement):
"""
Represents the action of the robot performing a gesture.
:ivar gesture: The gesture to perform.
"""
name: str = ""
gesture: Gesture
class LLMAction(ProgramElement):
"""
Represents the action of letting an LLM generate a reply based on its chat history
and an additional goal added in the prompt.
:ivar goal: The extra (temporary) goal to add to the LLM.
"""
name: str = ""
goal: str
class Trigger(ProgramElement):
"""
Represents a belief-based trigger. When a belief is set, the corresponding plan is executed.
:ivar condition: When to activate the trigger.
:ivar plan: The plan to execute.
"""
condition: Belief
plan: Plan
class Phase(ProgramElement):
"""
A distinct phase within a program, containing norms, goals, and triggers.
:ivar norms: List of norms active in this phase.
:ivar goals: List of goals to pursue in this phase.
:ivar triggers: List of triggers that define transitions out of this phase.
"""
name: str = ""
norms: list[BasicNorm | ConditionalNorm]
goals: list[Goal]
triggers: list[Trigger]
class Program(BaseModel):
"""
Represents a complete interaction program, consisting of a sequence or set of phases.
:ivar phases: The list of phases that make up the program.
"""
phases: list[Phase]

View File

@@ -1,16 +0,0 @@
from enum import Enum
PROGRAM_STATUS = b"internal/program_status"
"""A topic key for the program status."""
class ProgramStatus(Enum):
"""
Used in internal communication, to tell agents what the status of the program is.
For example, the VAD agent only starts listening when the program is RUNNING.
"""
STARTING = b"starting"
RUNNING = b"running"
STOPPING = b"stopping"

View File

@@ -1,79 +1,20 @@
from enum import Enum
from typing import Any, Literal
from typing import Any
from pydantic import BaseModel, model_validator
from pydantic import BaseModel
class RIEndpoint(str, Enum):
"""
Enumeration of valid endpoints for the Robot Interface (RI).
"""
SPEECH = "actuate/speech"
GESTURE_SINGLE = "actuate/gesture/single"
GESTURE_TAG = "actuate/gesture/tag"
PING = "ping"
NEGOTIATE_PORTS = "negotiate/ports"
PAUSE = ""
class RIMessage(BaseModel):
"""
Base schema for messages sent to the Robot Interface.
:ivar endpoint: The target endpoint/action on the RI.
:ivar data: The payload associated with the action.
"""
endpoint: RIEndpoint
data: Any
class SpeechCommand(RIMessage):
"""
A specific command to make the robot speak.
:ivar endpoint: Fixed to ``RIEndpoint.SPEECH``.
:ivar data: The text string to be spoken.
"""
endpoint: RIEndpoint = RIEndpoint(RIEndpoint.SPEECH)
data: str
is_priority: bool = False
class GestureCommand(RIMessage):
"""
A specific command to make the robot do a gesture.
:ivar endpoint: Should be ``RIEndpoint.GESTURE_SINGLE`` or ``RIEndpoint.GESTURE_TAG``.
:ivar data: The id of the gesture to be executed.
"""
endpoint: Literal[ # pyright: ignore[reportIncompatibleVariableOverride] - We validate this stricter rule ourselves
RIEndpoint.GESTURE_SINGLE, RIEndpoint.GESTURE_TAG
]
data: str
is_priority: bool = False
@model_validator(mode="after")
def check_endpoint(self):
allowed = {
RIEndpoint.GESTURE_SINGLE,
RIEndpoint.GESTURE_TAG,
}
if self.endpoint not in allowed:
raise ValueError("endpoint must be GESTURE_SINGLE or GESTURE_TAG")
return self
class PauseCommand(RIMessage):
"""
A specific command to pause or unpause the robot's actions.
:ivar endpoint: Fixed to ``RIEndpoint.PAUSE``.
:ivar data: A boolean indicating whether to pause (True) or unpause (False).
"""
endpoint: RIEndpoint = RIEndpoint(RIEndpoint.PAUSE)
data: bool

View File

@@ -1,206 +0,0 @@
import random
from unittest.mock import AsyncMock, MagicMock
import pytest
import zmq
from control_backend.agents.perception.vad_agent import VADAgent
from control_backend.schemas.program_status import PROGRAM_STATUS, ProgramStatus
@pytest.fixture
def zmq_context(mocker):
mock_context = mocker.patch("control_backend.agents.perception.vad_agent.azmq.Context.instance")
mock_context.return_value = MagicMock()
return mock_context
@pytest.fixture
def per_transcription_agent(mocker):
return mocker.patch(
"control_backend.agents.perception.vad_agent.TranscriptionAgent", autospec=True
)
@pytest.fixture(autouse=True)
def torch_load(mocker):
mock_torch = mocker.patch("control_backend.agents.perception.vad_agent.torch")
model = MagicMock()
mock_torch.hub.load.return_value = (model, None)
mock_torch.from_numpy.side_effect = lambda arr: arr
return mock_torch
@pytest.mark.asyncio
async def test_normal_setup(per_transcription_agent):
"""
Test that during normal setup, the VAD agent creates a Streaming behavior and creates audio
sockets, and starts the TranscriptionAgent without loading real models.
"""
per_vad_agent = VADAgent("tcp://localhost:12345", False)
per_vad_agent._streaming_loop = AsyncMock()
async def swallow_background_task(coro):
coro.close()
per_vad_agent.add_behavior = swallow_background_task
await per_vad_agent.setup()
per_transcription_agent.assert_called_once()
per_transcription_agent.return_value.start.assert_called_once()
per_vad_agent._streaming_loop.assert_called_once()
assert per_vad_agent.audio_in_socket is not None
assert per_vad_agent.audio_out_socket is not None
@pytest.mark.parametrize("do_bind", [True, False])
def test_in_socket_creation(zmq_context, do_bind: bool):
"""
Test that the VAD agent creates an audio input socket, differentiating between binding and
connecting.
"""
per_vad_agent = VADAgent(f"tcp://{'*' if do_bind else 'localhost'}:12345", do_bind)
per_vad_agent._connect_audio_in_socket()
assert per_vad_agent.audio_in_socket is not None
zmq_context.return_value.socket.assert_called_once_with(zmq.SUB)
zmq_context.return_value.socket.return_value.setsockopt_string.assert_called_once_with(
zmq.SUBSCRIBE,
"",
)
if do_bind:
zmq_context.return_value.socket.return_value.bind.assert_called_once_with("tcp://*:12345")
else:
zmq_context.return_value.socket.return_value.connect.assert_called_once_with(
"tcp://localhost:12345"
)
def test_out_socket_creation(zmq_context):
"""
Test that the VAD agent creates an audio output socket correctly.
"""
per_vad_agent = VADAgent("tcp://localhost:12345", False)
per_vad_agent._connect_audio_out_socket()
assert per_vad_agent.audio_out_socket is not None
zmq_context.return_value.socket.assert_called_once_with(zmq.PUB)
zmq_context.return_value.socket.return_value.bind.assert_called_once_with("inproc://vad_stream")
@pytest.mark.asyncio
async def test_out_socket_creation_failure(zmq_context):
"""
Test setup failure when the audio output socket cannot be created.
"""
zmq_context.return_value.socket.return_value.bind_to_random_port.side_effect = zmq.ZMQBindError
per_vad_agent = VADAgent("tcp://localhost:12345", False)
per_vad_agent.stop = AsyncMock()
per_vad_agent._reset_stream = AsyncMock()
per_vad_agent._streaming_loop = AsyncMock()
per_vad_agent._connect_audio_out_socket = MagicMock(return_value=None)
async def swallow_background_task(coro):
coro.close()
per_vad_agent.add_behavior = swallow_background_task
await per_vad_agent.setup()
assert per_vad_agent.audio_out_socket is None
per_vad_agent.stop.assert_called_once()
@pytest.mark.asyncio
async def test_stop(zmq_context, per_transcription_agent):
"""
Test that when the VAD agent is stopped, the sockets are closed correctly.
"""
per_vad_agent = VADAgent("tcp://localhost:12345", False)
per_vad_agent._reset_stream = AsyncMock()
per_vad_agent._streaming_loop = AsyncMock()
async def swallow_background_task(coro):
coro.close()
per_vad_agent.add_behavior = swallow_background_task
zmq_context.return_value.socket.return_value.bind_to_random_port.return_value = random.randint(
1000,
10000,
)
await per_vad_agent.setup()
await per_vad_agent.stop()
assert zmq_context.return_value.socket.return_value.close.call_count == 2
assert per_vad_agent.audio_in_socket is None
assert per_vad_agent.audio_out_socket is None
@pytest.mark.asyncio
async def test_application_startup_complete(zmq_context):
"""Check that it resets the stream when the program finishes startup."""
vad_agent = VADAgent("tcp://localhost:12345", False)
vad_agent._running = True
vad_agent._reset_stream = AsyncMock()
vad_agent.program_sub_socket = AsyncMock()
vad_agent.program_sub_socket.recv_multipart.side_effect = [
(PROGRAM_STATUS, ProgramStatus.RUNNING.value),
]
await vad_agent._status_loop()
vad_agent._reset_stream.assert_called_once()
vad_agent.program_sub_socket.close.assert_called_once()
@pytest.mark.asyncio
async def test_application_other_status(zmq_context):
"""
Check that it does nothing when the internal communication message is a status update, but not
running.
"""
vad_agent = VADAgent("tcp://localhost:12345", False)
vad_agent._running = True
vad_agent._reset_stream = AsyncMock()
vad_agent.program_sub_socket = AsyncMock()
vad_agent.program_sub_socket.recv_multipart.side_effect = [
(PROGRAM_STATUS, ProgramStatus.STARTING.value),
(PROGRAM_STATUS, ProgramStatus.STOPPING.value),
]
try:
# Raises StopAsyncIteration the third time it calls `program_sub_socket.recv_multipart`
await vad_agent._status_loop()
except StopAsyncIteration:
pass
vad_agent._reset_stream.assert_not_called()
@pytest.mark.asyncio
async def test_application_message_other(zmq_context):
"""
Check that it does nothing when there's an internal communication message that is not a status
update.
"""
vad_agent = VADAgent("tcp://localhost:12345", False)
vad_agent._running = True
vad_agent._reset_stream = AsyncMock()
vad_agent.program_sub_socket = AsyncMock()
vad_agent.program_sub_socket.recv_multipart.side_effect = [(b"internal/other", b"Whatever")]
try:
# Raises StopAsyncIteration the second time it calls `program_sub_socket.recv_multipart`
await vad_agent._status_loop()
except StopAsyncIteration:
pass
vad_agent._reset_stream.assert_not_called()

View File

@@ -1,99 +0,0 @@
import os
from unittest.mock import AsyncMock, MagicMock
import pytest
import soundfile as sf
import zmq
from control_backend.agents.perception.vad_agent import VADAgent
@pytest.fixture(autouse=True)
def patch_settings():
from control_backend.agents.perception import vad_agent
vad_agent.settings.behaviour_settings.vad_prob_threshold = 0.5
vad_agent.settings.behaviour_settings.vad_non_speech_patience_chunks = 3
vad_agent.settings.behaviour_settings.vad_initial_since_speech = 0
vad_agent.settings.vad_settings.sample_rate_hz = 16_000
@pytest.fixture(autouse=True)
def mock_torch(mocker):
mock_torch = mocker.patch("control_backend.agents.perception.vad_agent.torch")
mock_torch.from_numpy.side_effect = lambda arr: arr
return mock_torch
def get_audio_chunks() -> list[bytes]:
curr_file = os.path.realpath(__file__)
curr_dir = os.path.dirname(curr_file)
file = f"{curr_dir}/speech_with_pauses_16k_1c_float32.wav"
chunk_size = 512
chunks = []
with sf.SoundFile(file, "r") as f:
assert f.samplerate == 16000
assert f.channels == 1
assert f.subtype == "FLOAT"
while True:
data = f.read(chunk_size, dtype="float32")
if len(data) != chunk_size:
break
chunks.append(data.tobytes())
return chunks
@pytest.mark.asyncio
async def test_real_audio(mocker):
"""
Test the VAD agent with only input and output mocked. Using the real model, using real audio as
input. Ensure that it outputs some fragments with audio.
"""
audio_chunks = get_audio_chunks()
audio_in_socket = AsyncMock()
audio_in_socket.recv.side_effect = audio_chunks
mock_poller: MagicMock = mocker.patch("control_backend.agents.perception.vad_agent.azmq.Poller")
mock_poller.return_value.poll = AsyncMock(return_value=[(audio_in_socket, zmq.POLLIN)])
audio_out_socket = AsyncMock()
vad_agent = VADAgent("tcp://localhost:12345", False)
vad_agent.audio_out_socket = audio_out_socket
# Use a fake model that marks most chunks as speech and ends with a few silences
silence_padding = 5
probabilities = [1.0] * len(audio_chunks) + [0.0] * silence_padding
chunk_bytes = audio_chunks + [b"\x00" * len(audio_chunks[0])] * silence_padding
model_item = MagicMock()
model_item.item.side_effect = probabilities
vad_agent.model = MagicMock(return_value=model_item)
class DummyPoller:
def __init__(self, data, agent):
self.data = data
self.agent = agent
async def poll(self, timeout_ms=None):
if self.data:
return self.data.pop(0)
self.agent._running = False
return None
vad_agent.audio_in_poller = DummyPoller(chunk_bytes, vad_agent)
vad_agent._ready = AsyncMock()
vad_agent._running = True
vad_agent.i_since_speech = 0
await vad_agent._streaming_loop()
audio_out_socket.send.assert_called()
for args in audio_out_socket.send.call_args_list:
assert isinstance(args[0][0], bytes)
assert len(args[0][0]) >= 512 * 4 * 3 # Should be at least 3 chunks of audio

View File

@@ -0,0 +1,99 @@
import json
from unittest.mock import AsyncMock, MagicMock, patch
import pytest
import zmq
from control_backend.agents.ri_command_agent import RICommandAgent
@pytest.fixture
def zmq_context(mocker):
mock_context = mocker.patch("control_backend.agents.vad_agent.azmq.Context.instance")
mock_context.return_value = MagicMock()
return mock_context
@pytest.mark.asyncio
async def test_setup_bind(zmq_context, mocker):
"""Test setup with bind=True"""
fake_socket = zmq_context.return_value.socket.return_value
agent = RICommandAgent("test@server", "password", address="tcp://localhost:5555", bind=True)
settings = mocker.patch("control_backend.agents.ri_command_agent.settings")
settings.zmq_settings.internal_sub_address = "tcp://internal:1234"
await agent.setup()
# Ensure PUB socket bound
fake_socket.bind.assert_any_call("tcp://localhost:5555")
# Ensure SUB socket connected to internal address and subscribed
fake_socket.connect.assert_any_call("tcp://internal:1234")
fake_socket.setsockopt.assert_any_call(zmq.SUBSCRIBE, b"command")
# Ensure behaviour attached
assert any(isinstance(b, agent.SendCommandsBehaviour) for b in agent.behaviours)
@pytest.mark.asyncio
async def test_setup_connect(zmq_context, mocker):
"""Test setup with bind=False"""
fake_socket = zmq_context.return_value.socket.return_value
agent = RICommandAgent("test@server", "password", address="tcp://localhost:5555", bind=False)
settings = mocker.patch("control_backend.agents.ri_command_agent.settings")
settings.zmq_settings.internal_sub_address = "tcp://internal:1234"
await agent.setup()
# Ensure PUB socket connected
fake_socket.connect.assert_any_call("tcp://localhost:5555")
@pytest.mark.asyncio
async def test_send_commands_behaviour_valid_message():
"""Test behaviour with valid JSON message"""
fake_socket = AsyncMock()
message_dict = {"message": "hello"}
fake_socket.recv_multipart = AsyncMock(
return_value=(b"command", json.dumps(message_dict).encode("utf-8"))
)
fake_socket.send_json = AsyncMock()
agent = RICommandAgent("test@server", "password")
agent.subsocket = fake_socket
agent.pubsocket = fake_socket
behaviour = agent.SendCommandsBehaviour()
behaviour.agent = agent
with patch("control_backend.agents.ri_command_agent.SpeechCommand") as MockSpeechCommand:
mock_message = MagicMock()
MockSpeechCommand.model_validate.return_value = mock_message
await behaviour.run()
fake_socket.recv_multipart.assert_awaited()
fake_socket.send_json.assert_awaited_with(mock_message.model_dump())
@pytest.mark.asyncio
async def test_send_commands_behaviour_invalid_message(caplog):
"""Test behaviour with invalid JSON message triggers error logging"""
fake_socket = AsyncMock()
fake_socket.recv_multipart = AsyncMock(return_value=(b"command", b"{invalid_json}"))
fake_socket.send_json = AsyncMock()
agent = RICommandAgent("test@server", "password")
agent.subsocket = fake_socket
agent.pubsocket = fake_socket
behaviour = agent.SendCommandsBehaviour()
behaviour.agent = agent
with caplog.at_level("ERROR"):
await behaviour.run()
fake_socket.recv_multipart.assert_awaited()
fake_socket.send_json.assert_not_awaited()
assert "Error processing message" in caplog.text

View File

@@ -0,0 +1,551 @@
import asyncio
from unittest.mock import ANY, AsyncMock, MagicMock, patch
import pytest
from control_backend.agents.ri_communication_agent import RICommunicationAgent
def fake_json_correct_negototiate_1():
return AsyncMock(
return_value={
"endpoint": "negotiate/ports",
"data": [
{"id": "main", "port": 5555, "bind": False},
{"id": "actuation", "port": 5556, "bind": True},
],
}
)
def fake_json_correct_negototiate_2():
return AsyncMock(
return_value={
"endpoint": "negotiate/ports",
"data": [
{"id": "main", "port": 5555, "bind": False},
{"id": "actuation", "port": 5557, "bind": True},
],
}
)
def fake_json_correct_negototiate_3():
return AsyncMock(
return_value={
"endpoint": "negotiate/ports",
"data": [
{"id": "main", "port": 5555, "bind": True},
{"id": "actuation", "port": 5557, "bind": True},
],
}
)
def fake_json_correct_negototiate_4():
# Different port, do bind
return AsyncMock(
return_value={
"endpoint": "negotiate/ports",
"data": [
{"id": "main", "port": 4555, "bind": True},
{"id": "actuation", "port": 5557, "bind": True},
],
}
)
def fake_json_correct_negototiate_5():
# Different port, dont bind.
return AsyncMock(
return_value={
"endpoint": "negotiate/ports",
"data": [
{"id": "main", "port": 4555, "bind": False},
{"id": "actuation", "port": 5557, "bind": True},
],
}
)
def fake_json_wrong_negototiate_1():
return AsyncMock(return_value={"endpoint": "ping", "data": ""})
def fake_json_invalid_id_negototiate():
return AsyncMock(
return_value={
"endpoint": "negotiate/ports",
"data": [
{"id": "banana", "port": 4555, "bind": False},
{"id": "tomato", "port": 5557, "bind": True},
],
}
)
@pytest.fixture
def zmq_context(mocker):
mock_context = mocker.patch("control_backend.agents.vad_agent.azmq.Context.instance")
mock_context.return_value = MagicMock()
return mock_context
@pytest.mark.asyncio
async def test_setup_creates_socket_and_negotiate_1(zmq_context):
"""
Test the setup of the communication agent
"""
# --- Arrange ---
fake_socket = zmq_context.return_value.socket.return_value
fake_socket.send_json = AsyncMock()
fake_socket.recv_json = fake_json_correct_negototiate_1()
# Mock RICommandAgent agent startup
with patch(
"control_backend.agents.ri_communication_agent.RICommandAgent", autospec=True
) as MockCommandAgent:
fake_agent_instance = MockCommandAgent.return_value
fake_agent_instance.start = AsyncMock()
# --- Act ---
agent = RICommunicationAgent(
"test@server", "password", address="tcp://localhost:5555", bind=False
)
await agent.setup()
# --- Assert ---
fake_socket.connect.assert_any_call("tcp://localhost:5555")
fake_socket.send_json.assert_any_call({"endpoint": "negotiate/ports", "data": None})
fake_socket.recv_json.assert_awaited()
fake_agent_instance.start.assert_awaited()
MockCommandAgent.assert_called_once_with(
ANY, # Server Name
ANY, # Server Password
address="tcp://*:5556", # derived from the 'port' value in negotiation
bind=True,
)
# Ensure the agent attached a ListenBehaviour
assert any(isinstance(b, agent.ListenBehaviour) for b in agent.behaviours)
@pytest.mark.asyncio
async def test_setup_creates_socket_and_negotiate_2(zmq_context):
"""
Test the setup of the communication agent
"""
# --- Arrange ---
fake_socket = zmq_context.return_value.socket.return_value
fake_socket.send_json = AsyncMock()
fake_socket.recv_json = fake_json_correct_negototiate_2()
# Mock RICommandAgent agent startup
with patch(
"control_backend.agents.ri_communication_agent.RICommandAgent", autospec=True
) as MockCommandAgent:
fake_agent_instance = MockCommandAgent.return_value
fake_agent_instance.start = AsyncMock()
# --- Act ---
agent = RICommunicationAgent(
"test@server", "password", address="tcp://localhost:5555", bind=False
)
await agent.setup()
# --- Assert ---
fake_socket.connect.assert_any_call("tcp://localhost:5555")
fake_socket.send_json.assert_any_call({"endpoint": "negotiate/ports", "data": None})
fake_socket.recv_json.assert_awaited()
fake_agent_instance.start.assert_awaited()
MockCommandAgent.assert_called_once_with(
ANY, # Server Name
ANY, # Server Password
address="tcp://*:5557", # derived from the 'port' value in negotiation
bind=True,
)
# Ensure the agent attached a ListenBehaviour
assert any(isinstance(b, agent.ListenBehaviour) for b in agent.behaviours)
@pytest.mark.asyncio
async def test_setup_creates_socket_and_negotiate_3(zmq_context, caplog):
"""
Test the functionality of setup with incorrect negotiation message
"""
# --- Arrange ---
fake_socket = zmq_context.return_value.socket.return_value
fake_socket.send_json = AsyncMock()
fake_socket.recv_json = fake_json_wrong_negototiate_1()
# Mock RICommandAgent agent startup
# We are sending wrong negotiation info to the communication agent,
# so we should retry and expect a better response, within a limited time.
with patch(
"control_backend.agents.ri_communication_agent.RICommandAgent", autospec=True
) as MockCommandAgent:
fake_agent_instance = MockCommandAgent.return_value
fake_agent_instance.start = AsyncMock()
# --- Act ---
with caplog.at_level("ERROR"):
agent = RICommunicationAgent(
"test@server", "password", address="tcp://localhost:5555", bind=False
)
await agent.setup(max_retries=1)
# --- Assert ---
fake_socket.connect.assert_any_call("tcp://localhost:5555")
fake_socket.recv_json.assert_awaited()
# Since it failed, there should not be any command agent.
fake_agent_instance.start.assert_not_awaited()
assert "Failed to set up RICommunicationAgent" in caplog.text
# Ensure the agent did not attach a ListenBehaviour
assert not any(isinstance(b, agent.ListenBehaviour) for b in agent.behaviours)
@pytest.mark.asyncio
async def test_setup_creates_socket_and_negotiate_4(zmq_context):
"""
Test the setup of the communication agent with different bind value
"""
# --- Arrange ---
fake_socket = zmq_context.return_value.socket.return_value
fake_socket.send_json = AsyncMock()
fake_socket.recv_json = fake_json_correct_negototiate_3()
# Mock RICommandAgent agent startup
with patch(
"control_backend.agents.ri_communication_agent.RICommandAgent", autospec=True
) as MockCommandAgent:
fake_agent_instance = MockCommandAgent.return_value
fake_agent_instance.start = AsyncMock()
# --- Act ---
agent = RICommunicationAgent(
"test@server", "password", address="tcp://localhost:5555", bind=True
)
await agent.setup()
# --- Assert ---
fake_socket.bind.assert_any_call("tcp://localhost:5555")
fake_socket.send_json.assert_any_call({"endpoint": "negotiate/ports", "data": None})
fake_socket.recv_json.assert_awaited()
fake_agent_instance.start.assert_awaited()
MockCommandAgent.assert_called_once_with(
ANY, # Server Name
ANY, # Server Password
address="tcp://*:5557", # derived from the 'port' value in negotiation
bind=True,
)
# Ensure the agent attached a ListenBehaviour
assert any(isinstance(b, agent.ListenBehaviour) for b in agent.behaviours)
@pytest.mark.asyncio
async def test_setup_creates_socket_and_negotiate_5(zmq_context):
"""
Test the setup of the communication agent
"""
# --- Arrange ---
fake_socket = zmq_context.return_value.socket.return_value
fake_socket.send_json = AsyncMock()
fake_socket.recv_json = fake_json_correct_negototiate_4()
# Mock RICommandAgent agent startup
with patch(
"control_backend.agents.ri_communication_agent.RICommandAgent", autospec=True
) as MockCommandAgent:
fake_agent_instance = MockCommandAgent.return_value
fake_agent_instance.start = AsyncMock()
# --- Act ---
agent = RICommunicationAgent(
"test@server", "password", address="tcp://localhost:5555", bind=False
)
await agent.setup()
# --- Assert ---
fake_socket.connect.assert_any_call("tcp://localhost:5555")
fake_socket.send_json.assert_any_call({"endpoint": "negotiate/ports", "data": None})
fake_socket.recv_json.assert_awaited()
fake_agent_instance.start.assert_awaited()
MockCommandAgent.assert_called_once_with(
ANY, # Server Name
ANY, # Server Password
address="tcp://*:5557", # derived from the 'port' value in negotiation
bind=True,
)
# Ensure the agent attached a ListenBehaviour
assert any(isinstance(b, agent.ListenBehaviour) for b in agent.behaviours)
@pytest.mark.asyncio
async def test_setup_creates_socket_and_negotiate_6(zmq_context):
"""
Test the setup of the communication agent
"""
# --- Arrange ---
fake_socket = zmq_context.return_value.socket.return_value
fake_socket.send_json = AsyncMock()
fake_socket.recv_json = fake_json_correct_negototiate_5()
# Mock RICommandAgent agent startup
with patch(
"control_backend.agents.ri_communication_agent.RICommandAgent", autospec=True
) as MockCommandAgent:
fake_agent_instance = MockCommandAgent.return_value
fake_agent_instance.start = AsyncMock()
# --- Act ---
agent = RICommunicationAgent(
"test@server", "password", address="tcp://localhost:5555", bind=False
)
await agent.setup()
# --- Assert ---
fake_socket.connect.assert_any_call("tcp://localhost:5555")
fake_socket.send_json.assert_any_call({"endpoint": "negotiate/ports", "data": None})
fake_socket.recv_json.assert_awaited()
fake_agent_instance.start.assert_awaited()
MockCommandAgent.assert_called_once_with(
ANY, # Server Name
ANY, # Server Password
address="tcp://*:5557", # derived from the 'port' value in negotiation
bind=True,
)
# Ensure the agent attached a ListenBehaviour
assert any(isinstance(b, agent.ListenBehaviour) for b in agent.behaviours)
@pytest.mark.asyncio
async def test_setup_creates_socket_and_negotiate_7(zmq_context, caplog):
"""
Test the functionality of setup with incorrect id
"""
# --- Arrange ---
fake_socket = zmq_context.return_value.socket.return_value
fake_socket.send_json = AsyncMock()
fake_socket.recv_json = fake_json_invalid_id_negototiate()
# Mock RICommandAgent agent startup
# We are sending wrong negotiation info to the communication agent,
# so we should retry and expect a better response, within a limited time.
with patch(
"control_backend.agents.ri_communication_agent.RICommandAgent", autospec=True
) as MockCommandAgent:
fake_agent_instance = MockCommandAgent.return_value
fake_agent_instance.start = AsyncMock()
# --- Act ---
with caplog.at_level("WARNING"):
agent = RICommunicationAgent(
"test@server", "password", address="tcp://localhost:5555", bind=False
)
await agent.setup(max_retries=1)
# --- Assert ---
fake_socket.connect.assert_any_call("tcp://localhost:5555")
fake_socket.recv_json.assert_awaited()
# Since it failed, there should not be any command agent.
fake_agent_instance.start.assert_not_awaited()
assert "Unhandled negotiation id:" in caplog.text
@pytest.mark.asyncio
async def test_setup_creates_socket_and_negotiate_timeout(zmq_context, caplog):
"""
Test the functionality of setup with incorrect negotiation message
"""
# --- Arrange ---
fake_socket = zmq_context.return_value.socket.return_value
fake_socket.send_json = AsyncMock()
fake_socket.recv_json = AsyncMock(side_effect=asyncio.TimeoutError)
with patch(
"control_backend.agents.ri_communication_agent.RICommandAgent", autospec=True
) as MockCommandAgent:
fake_agent_instance = MockCommandAgent.return_value
fake_agent_instance.start = AsyncMock()
# --- Act ---
with caplog.at_level("WARNING"):
agent = RICommunicationAgent(
"test@server", "password", address="tcp://localhost:5555", bind=False
)
await agent.setup(max_retries=1)
# --- Assert ---
fake_socket.connect.assert_any_call("tcp://localhost:5555")
# Since it failed, there should not be any command agent.
fake_agent_instance.start.assert_not_awaited()
assert "No connection established in 20 seconds" in caplog.text
# Ensure the agent did not attach a ListenBehaviour
assert not any(isinstance(b, agent.ListenBehaviour) for b in agent.behaviours)
@pytest.mark.asyncio
async def test_listen_behaviour_ping_correct(caplog):
fake_socket = AsyncMock()
fake_socket.send_json = AsyncMock()
fake_socket.recv_json = AsyncMock(return_value={"endpoint": "ping", "data": {}})
# TODO: Integration test between actual server and password needed for spade agents
agent = RICommunicationAgent("test@server", "password")
agent.req_socket = fake_socket
behaviour = agent.ListenBehaviour()
agent.add_behaviour(behaviour)
# Run once (CyclicBehaviour normally loops)
with caplog.at_level("DEBUG"):
await behaviour.run()
fake_socket.send_json.assert_awaited()
fake_socket.recv_json.assert_awaited()
assert "Received message" in caplog.text
@pytest.mark.asyncio
async def test_listen_behaviour_ping_wrong_endpoint(caplog):
"""
Test if our listen behaviour can work with wrong messages (wrong endpoint)
"""
fake_socket = AsyncMock()
fake_socket.send_json = AsyncMock()
# This is a message for the wrong endpoint >:(
fake_socket.recv_json = AsyncMock(
return_value={
"endpoint": "negotiate/ports",
"data": [
{"id": "main", "port": 5555, "bind": False},
{"id": "actuation", "port": 5556, "bind": True},
],
}
)
agent = RICommunicationAgent("test@server", "password")
agent.req_socket = fake_socket
behaviour = agent.ListenBehaviour()
agent.add_behaviour(behaviour)
# Run once (CyclicBehaviour normally loops)
with caplog.at_level("INFO"):
await behaviour.run()
assert "Received message with topic different than ping, while ping expected." in caplog.text
fake_socket.send_json.assert_awaited()
fake_socket.recv_json.assert_awaited()
@pytest.mark.asyncio
async def test_listen_behaviour_timeout(zmq_context, caplog):
fake_socket = zmq_context.return_value.socket.return_value
fake_socket.send_json = AsyncMock()
# recv_json will never resolve, simulate timeout
fake_socket.recv_json = AsyncMock(side_effect=asyncio.TimeoutError)
agent = RICommunicationAgent("test@server", "password")
agent.req_socket = fake_socket
behaviour = agent.ListenBehaviour()
agent.add_behaviour(behaviour)
with caplog.at_level("INFO"):
await behaviour.run()
assert "No ping retrieved in 3 seconds" in caplog.text
@pytest.mark.asyncio
async def test_listen_behaviour_ping_no_endpoint(caplog):
"""
Test if our listen behaviour can work with wrong messages (wrong endpoint)
"""
fake_socket = AsyncMock()
fake_socket.send_json = AsyncMock()
# This is a message without endpoint >:(
fake_socket.recv_json = AsyncMock(
return_value={
"data": "I dont have an endpoint >:)",
}
)
agent = RICommunicationAgent("test@server", "password")
agent.req_socket = fake_socket
behaviour = agent.ListenBehaviour()
agent.add_behaviour(behaviour)
# Run once (CyclicBehaviour normally loops)
with caplog.at_level("ERROR"):
await behaviour.run()
assert "No received endpoint in message, excepted ping endpoint." in caplog.text
fake_socket.send_json.assert_awaited()
fake_socket.recv_json.assert_awaited()
@pytest.mark.asyncio
async def test_setup_unexpected_exception(zmq_context, caplog):
fake_socket = zmq_context.return_value.socket.return_value
fake_socket.send_json = AsyncMock()
# Simulate unexpected exception during recv_json()
fake_socket.recv_json = AsyncMock(side_effect=Exception("boom!"))
agent = RICommunicationAgent(
"test@server", "password", address="tcp://localhost:5555", bind=False
)
with caplog.at_level("ERROR"):
await agent.setup(max_retries=1)
# Ensure that the error was logged
assert "Unexpected error during negotiation: boom!" in caplog.text
@pytest.mark.asyncio
async def test_setup_unpacking_exception(zmq_context, caplog):
# --- Arrange ---
fake_socket = zmq_context.return_value.socket.return_value
fake_socket.send_json = AsyncMock()
# Make recv_json return malformed negotiation data to trigger unpacking exception
malformed_data = {
"endpoint": "negotiate/ports",
"data": [{"id": "main"}],
} # missing 'port' and 'bind'
fake_socket.recv_json = AsyncMock(return_value=malformed_data)
# Patch RICommandAgent so it won't actually start
with patch(
"control_backend.agents.ri_communication_agent.RICommandAgent", autospec=True
) as MockCommandAgent:
fake_agent_instance = MockCommandAgent.return_value
fake_agent_instance.start = AsyncMock()
agent = RICommunicationAgent(
"test@server", "password", address="tcp://localhost:5555", bind=False
)
# --- Act & Assert ---
with caplog.at_level("ERROR"):
await agent.setup(max_retries=1)
# Ensure the unpacking exception was logged
assert "Error unpacking negotiation data" in caplog.text
# Ensure no command agent was started
fake_agent_instance.start.assert_not_awaited()
# Ensure no behaviour was attached
assert not any(isinstance(b, agent.ListenBehaviour) for b in agent.behaviours)

View File

@@ -0,0 +1,120 @@
import random
from unittest.mock import AsyncMock, MagicMock, patch
import pytest
import zmq
from spade.agent import Agent
from control_backend.agents.vad_agent import VADAgent
@pytest.fixture
def zmq_context(mocker):
mock_context = mocker.patch("control_backend.agents.vad_agent.azmq.Context.instance")
mock_context.return_value = MagicMock()
return mock_context
@pytest.fixture
def streaming(mocker):
return mocker.patch("control_backend.agents.vad_agent.Streaming")
@pytest.fixture
def transcription_agent(mocker):
return mocker.patch("control_backend.agents.vad_agent.TranscriptionAgent", autospec=True)
@pytest.mark.asyncio
async def test_normal_setup(streaming, transcription_agent):
"""
Test that during normal setup, the VAD agent creates a Streaming behavior and creates audio
sockets, and starts the TranscriptionAgent without loading real models.
"""
vad_agent = VADAgent("tcp://localhost:12345", False)
vad_agent.add_behaviour = MagicMock()
await vad_agent.setup()
streaming.assert_called_once()
vad_agent.add_behaviour.assert_called_once_with(streaming.return_value)
transcription_agent.assert_called_once()
transcription_agent.return_value.start.assert_called_once()
assert vad_agent.audio_in_socket is not None
assert vad_agent.audio_out_socket is not None
@pytest.mark.parametrize("do_bind", [True, False])
def test_in_socket_creation(zmq_context, do_bind: bool):
"""
Test that the VAD agent creates an audio input socket, differentiating between binding and
connecting.
"""
vad_agent = VADAgent(f"tcp://{'*' if do_bind else 'localhost'}:12345", do_bind)
vad_agent._connect_audio_in_socket()
assert vad_agent.audio_in_socket is not None
zmq_context.return_value.socket.assert_called_once_with(zmq.SUB)
zmq_context.return_value.socket.return_value.setsockopt_string.assert_called_once_with(
zmq.SUBSCRIBE,
"",
)
if do_bind:
zmq_context.return_value.socket.return_value.bind.assert_called_once_with("tcp://*:12345")
else:
zmq_context.return_value.socket.return_value.connect.assert_called_once_with(
"tcp://localhost:12345"
)
def test_out_socket_creation(zmq_context):
"""
Test that the VAD agent creates an audio output socket correctly.
"""
vad_agent = VADAgent("tcp://localhost:12345", False)
vad_agent._connect_audio_out_socket()
assert vad_agent.audio_out_socket is not None
zmq_context.return_value.socket.assert_called_once_with(zmq.PUB)
zmq_context.return_value.socket.return_value.bind_to_random_port.assert_called_once()
@pytest.mark.asyncio
async def test_out_socket_creation_failure(zmq_context):
"""
Test setup failure when the audio output socket cannot be created.
"""
with patch.object(Agent, "stop", new_callable=AsyncMock) as mock_super_stop:
zmq_context.return_value.socket.return_value.bind_to_random_port.side_effect = (
zmq.ZMQBindError
)
vad_agent = VADAgent("tcp://localhost:12345", False)
await vad_agent.setup()
assert vad_agent.audio_out_socket is None
mock_super_stop.assert_called_once()
@pytest.mark.asyncio
async def test_stop(zmq_context, transcription_agent):
"""
Test that when the VAD agent is stopped, the sockets are closed correctly.
"""
vad_agent = VADAgent("tcp://localhost:12345", False)
zmq_context.return_value.socket.return_value.bind_to_random_port.return_value = random.randint(
1000,
10000,
)
await vad_agent.setup()
await vad_agent.stop()
assert zmq_context.return_value.socket.return_value.close.call_count == 2
assert vad_agent.audio_in_socket is None
assert vad_agent.audio_out_socket is None

View File

@@ -0,0 +1,59 @@
import os
from unittest.mock import AsyncMock, MagicMock
import pytest
import soundfile as sf
import zmq
from control_backend.agents.vad_agent import Streaming
def get_audio_chunks() -> list[bytes]:
curr_file = os.path.realpath(__file__)
curr_dir = os.path.dirname(curr_file)
file = f"{curr_dir}/speech_with_pauses_16k_1c_float32.wav"
chunk_size = 512
chunks = []
with sf.SoundFile(file, "r") as f:
assert f.samplerate == 16000
assert f.channels == 1
assert f.subtype == "FLOAT"
while True:
data = f.read(chunk_size, dtype="float32")
if len(data) != chunk_size:
break
chunks.append(data.tobytes())
return chunks
@pytest.mark.asyncio
async def test_real_audio(mocker):
"""
Test the VAD agent with only input and output mocked. Using the real model, using real audio as
input. Ensure that it outputs some fragments with audio.
"""
audio_chunks = get_audio_chunks()
audio_in_socket = AsyncMock()
audio_in_socket.recv.side_effect = audio_chunks
mock_poller: MagicMock = mocker.patch("control_backend.agents.vad_agent.zmq.Poller")
mock_poller.return_value.poll.return_value = [(audio_in_socket, zmq.POLLIN)]
audio_out_socket = AsyncMock()
vad_streamer = Streaming(audio_in_socket, audio_out_socket)
vad_streamer._ready = True
vad_streamer.agent = MagicMock()
for _ in audio_chunks:
await vad_streamer.run()
audio_out_socket.send.assert_called()
for args in audio_out_socket.send.call_args_list:
assert isinstance(args[0][0], bytes)
assert len(args[0][0]) >= 512 * 4 * 3 # Should be at least 3 chunks of audio

View File

@@ -0,0 +1,61 @@
from unittest.mock import AsyncMock
import pytest
from fastapi import FastAPI
from fastapi.testclient import TestClient
from control_backend.api.v1.endpoints import command
from control_backend.schemas.ri_message import SpeechCommand
@pytest.fixture
def app():
"""
Creates a FastAPI test app and attaches the router under test.
Also sets up a mock internal_comm_socket.
"""
app = FastAPI()
app.include_router(command.router)
return app
@pytest.fixture
def client(app):
"""Create a test client for the app."""
return TestClient(app)
def test_receive_command_success(client):
"""
Test for successful reception of a command. Ensures the status code is 202 and the response body
is correct. It also verifies that the ZeroMQ socket's send_multipart method is called with the
expected data.
"""
# Arrange
mock_pub_socket = AsyncMock()
client.app.state.endpoints_pub_socket = mock_pub_socket
command_data = {"endpoint": "actuate/speech", "data": "This is a test"}
speech_command = SpeechCommand(**command_data)
# Act
response = client.post("/command", json=command_data)
# Assert
assert response.status_code == 202
assert response.json() == {"status": "Command received"}
# Verify that the ZMQ socket was used correctly
mock_pub_socket.send_multipart.assert_awaited_once_with(
[b"command", speech_command.model_dump_json().encode()]
)
def test_receive_command_invalid_payload(client):
"""
Test invalid data handling (schema validation).
"""
# Missing required field(s)
bad_payload = {"invalid": "data"}
response = client.post("/command", json=bad_payload)
assert response.status_code == 422 # validation error

View File

@@ -0,0 +1,26 @@
import pytest
from pydantic import ValidationError
from control_backend.schemas.ri_message import RIEndpoint, RIMessage, SpeechCommand
def valid_command_1():
return SpeechCommand(data="Hallo?")
def invalid_command_1():
return RIMessage(endpoint=RIEndpoint.PING, data="Hello again.")
def test_valid_speech_command_1():
command = valid_command_1()
RIMessage.model_validate(command)
SpeechCommand.model_validate(command)
def test_invalid_speech_command_1():
command = invalid_command_1()
RIMessage.model_validate(command)
with pytest.raises(ValidationError):
SpeechCommand.model_validate(command)

View File

@@ -1,514 +0,0 @@
import json
from unittest.mock import AsyncMock, MagicMock
import pytest
import zmq
from control_backend.agents.actuation.robot_gesture_agent import RobotGestureAgent
from control_backend.core.agent_system import InternalMessage
from control_backend.schemas.ri_message import RIEndpoint
@pytest.fixture
def zmq_context(mocker):
"""Mock the ZMQ context."""
mock_context = mocker.patch(
"control_backend.agents.actuation.robot_gesture_agent.azmq.Context.instance"
)
mock_context.return_value = MagicMock()
return mock_context
@pytest.mark.asyncio
async def test_setup_bind(zmq_context, mocker):
"""Setup binds and subscribes to internal commands."""
fake_socket = zmq_context.return_value.socket.return_value
agent = RobotGestureAgent("robot_gesture", address="tcp://localhost:5556", bind=True)
settings = mocker.patch("control_backend.agents.actuation.robot_gesture_agent.settings")
settings.zmq_settings.internal_sub_address = "tcp://internal:1234"
def close_coro(coro):
coro.close()
return MagicMock()
agent.add_behavior = MagicMock(side_effect=close_coro)
await agent.setup()
# Check PUB socket binding
fake_socket.bind.assert_any_call("tcp://localhost:5556")
# Check REP socket binding
fake_socket.bind.assert_called()
# Check SUB socket connection and subscriptions
fake_socket.connect.assert_any_call("tcp://internal:1234")
fake_socket.setsockopt.assert_any_call(zmq.SUBSCRIBE, b"command")
fake_socket.setsockopt.assert_any_call(zmq.SUBSCRIBE, b"send_gestures")
# Check behavior was added (twice: once for command loop, once for fetch gestures loop)
assert agent.add_behavior.call_count == 2
@pytest.mark.asyncio
async def test_setup_connect(zmq_context, mocker):
"""Setup connects when bind=False."""
fake_socket = zmq_context.return_value.socket.return_value
agent = RobotGestureAgent("robot_gesture", address="tcp://localhost:5556", bind=False)
settings = mocker.patch("control_backend.agents.actuation.robot_gesture_agent.settings")
settings.zmq_settings.internal_sub_address = "tcp://internal:1234"
def close_coro(coro):
coro.close()
return MagicMock()
agent.add_behavior = MagicMock(side_effect=close_coro)
await agent.setup()
# Check PUB socket connection (not binding)
fake_socket.connect.assert_any_call("tcp://localhost:5556")
fake_socket.connect.assert_any_call("tcp://internal:1234")
# Check REP socket binding (always binds)
fake_socket.bind.assert_called()
# Check behavior was added (twice)
assert agent.add_behavior.call_count == 2
@pytest.mark.asyncio
async def test_handle_message_sends_valid_gesture_command():
"""Internal message with valid gesture tag is forwarded to robot pub socket."""
pubsocket = AsyncMock()
agent = RobotGestureAgent("robot_gesture", gesture_data=["hello", "yes", "no"], address="")
agent.pubsocket = pubsocket
payload = {
"endpoint": RIEndpoint.GESTURE_TAG,
"data": "hello", # "hello" is in gesture_data
}
msg = InternalMessage(to="robot", sender="tester", body=json.dumps(payload))
await agent.handle_message(msg)
pubsocket.send_json.assert_awaited_once()
@pytest.mark.asyncio
async def test_handle_message_sends_non_gesture_command():
"""Internal message with non-gesture endpoint is not forwarded by this agent."""
pubsocket = AsyncMock()
agent = RobotGestureAgent("robot_gesture", gesture_data=["hello", "yes", "no"], address="")
agent.pubsocket = pubsocket
payload = {"endpoint": "some_other_endpoint", "data": "invalid_tag_not_in_list"}
msg = InternalMessage(to="robot", sender="tester", body=json.dumps(payload))
await agent.handle_message(msg)
# Non-gesture endpoints should not be forwarded by this agent
pubsocket.send_json.assert_not_awaited()
@pytest.mark.asyncio
async def test_handle_message_rejects_invalid_gesture_tag():
"""Internal message with invalid gesture tag is not forwarded."""
pubsocket = AsyncMock()
agent = RobotGestureAgent("robot_gesture", gesture_data=["hello", "yes", "no"], address="")
agent.pubsocket = pubsocket
# Use a tag that's not in gesture_data
payload = {"endpoint": RIEndpoint.GESTURE_TAG, "data": "invalid_tag_not_in_list"}
msg = InternalMessage(to="robot", sender="tester", body=json.dumps(payload))
await agent.handle_message(msg)
pubsocket.send_json.assert_not_awaited()
@pytest.mark.asyncio
async def test_handle_message_sends_valid_single_gesture_command():
"""Internal message with valid single gesture is forwarded."""
pubsocket = AsyncMock()
agent = RobotGestureAgent("robot_gesture", single_gesture_data=["wave", "point"], address="")
agent.pubsocket = pubsocket
payload = {
"endpoint": RIEndpoint.GESTURE_SINGLE,
"data": "wave",
}
msg = InternalMessage(to="robot", sender="tester", body=json.dumps(payload))
await agent.handle_message(msg)
pubsocket.send_json.assert_awaited_once()
@pytest.mark.asyncio
async def test_handle_message_rejects_invalid_single_gesture():
"""Internal message with invalid single gesture is not forwarded."""
pubsocket = AsyncMock()
agent = RobotGestureAgent("robot_gesture", single_gesture_data=["wave", "point"], address="")
agent.pubsocket = pubsocket
payload = {
"endpoint": RIEndpoint.GESTURE_SINGLE,
"data": "dance",
}
msg = InternalMessage(to="robot", sender="tester", body=json.dumps(payload))
await agent.handle_message(msg)
pubsocket.send_json.assert_not_awaited()
@pytest.mark.asyncio
async def test_zmq_command_loop_valid_single_gesture_payload():
"""UI command with valid single gesture is read from SUB and published."""
command = {"endpoint": RIEndpoint.GESTURE_SINGLE, "data": "wave"}
fake_socket = AsyncMock()
async def recv_once():
agent._running = False
return b"command", json.dumps(command).encode("utf-8")
fake_socket.recv_multipart = recv_once
fake_socket.send_json = AsyncMock()
agent = RobotGestureAgent("robot_gesture", single_gesture_data=["wave", "point"], address="")
agent.subsocket = fake_socket
agent.pubsocket = fake_socket
agent._running = True
await agent._zmq_command_loop()
fake_socket.send_json.assert_awaited_once()
@pytest.mark.asyncio
async def test_handle_message_invalid_payload():
"""Invalid payload is caught and does not send."""
pubsocket = AsyncMock()
agent = RobotGestureAgent("robot_gesture", gesture_data=["hello", "yes", "no"], address="")
agent.pubsocket = pubsocket
msg = InternalMessage(to="robot", sender="tester", body=json.dumps({"bad": "data"}))
await agent.handle_message(msg)
pubsocket.send_json.assert_not_awaited()
@pytest.mark.asyncio
async def test_zmq_command_loop_valid_gesture_payload():
"""UI command with valid gesture tag is read from SUB and published."""
command = {"endpoint": RIEndpoint.GESTURE_TAG, "data": "hello"}
fake_socket = AsyncMock()
async def recv_once():
# stop after first iteration
agent._running = False
return b"command", json.dumps(command).encode("utf-8")
fake_socket.recv_multipart = recv_once
fake_socket.send_json = AsyncMock()
agent = RobotGestureAgent("robot_gesture", gesture_data=["hello", "yes", "no"], address="")
agent.subsocket = fake_socket
agent.pubsocket = fake_socket
agent._running = True
await agent._zmq_command_loop()
fake_socket.send_json.assert_awaited_once()
@pytest.mark.asyncio
async def test_zmq_command_loop_valid_non_gesture_payload():
"""UI command with non-gesture endpoint is not forwarded by this agent."""
command = {"endpoint": "some_other_endpoint", "data": "anything"}
fake_socket = AsyncMock()
async def recv_once():
agent._running = False
return b"command", json.dumps(command).encode("utf-8")
fake_socket.recv_multipart = recv_once
fake_socket.send_json = AsyncMock()
agent = RobotGestureAgent("robot_gesture", gesture_data=["hello", "yes", "no"], address="")
agent.subsocket = fake_socket
agent.pubsocket = fake_socket
agent._running = True
await agent._zmq_command_loop()
fake_socket.send_json.assert_not_awaited()
@pytest.mark.asyncio
async def test_zmq_command_loop_invalid_gesture_tag():
"""UI command with invalid gesture tag is not forwarded."""
command = {"endpoint": RIEndpoint.GESTURE_TAG, "data": "invalid_tag_not_in_list"}
fake_socket = AsyncMock()
async def recv_once():
agent._running = False
return b"command", json.dumps(command).encode("utf-8")
fake_socket.recv_multipart = recv_once
fake_socket.send_json = AsyncMock()
agent = RobotGestureAgent("robot_gesture", gesture_data=["hello", "yes", "no"], address="")
agent.subsocket = fake_socket
agent.pubsocket = fake_socket
agent._running = True
await agent._zmq_command_loop()
fake_socket.send_json.assert_not_awaited()
@pytest.mark.asyncio
async def test_zmq_command_loop_invalid_json():
"""Invalid JSON is ignored without sending."""
fake_socket = AsyncMock()
async def recv_once():
agent._running = False
return b"command", b"{not_json}"
fake_socket.recv_multipart = recv_once
fake_socket.send_json = AsyncMock()
agent = RobotGestureAgent("robot_gesture", gesture_data=["hello", "yes", "no"], address="")
agent.subsocket = fake_socket
agent.pubsocket = fake_socket
agent._running = True
await agent._zmq_command_loop()
fake_socket.send_json.assert_not_awaited()
@pytest.mark.asyncio
async def test_zmq_command_loop_ignores_send_gestures_topic():
"""send_gestures topic is ignored in command loop."""
fake_socket = AsyncMock()
async def recv_once():
agent._running = False
return b"send_gestures", b"{}"
fake_socket.recv_multipart = recv_once
fake_socket.send_json = AsyncMock()
agent = RobotGestureAgent("robot_gesture", gesture_data=["hello", "yes", "no"], address="")
agent.subsocket = fake_socket
agent.pubsocket = fake_socket
agent._running = True
await agent._zmq_command_loop()
fake_socket.send_json.assert_not_awaited()
@pytest.mark.asyncio
async def test_fetch_gestures_loop_without_amount():
"""Fetch gestures request without amount returns all tags."""
fake_repsocket = AsyncMock()
async def recv_once():
agent._running = False
return b"{}" # Empty JSON request
fake_repsocket.recv = recv_once
fake_repsocket.send = AsyncMock()
agent = RobotGestureAgent(
"robot_gesture", gesture_data=["hello", "yes", "no", "wave", "point"], address=""
)
agent.repsocket = fake_repsocket
agent._running = True
await agent._fetch_gestures_loop()
fake_repsocket.send.assert_awaited_once()
# Check the response contains all tags
args, kwargs = fake_repsocket.send.call_args
response = json.loads(args[0])
assert "tags" in response
assert response["tags"] == ["hello", "yes", "no", "wave", "point"]
@pytest.mark.asyncio
async def test_fetch_gestures_loop_with_amount():
"""Fetch gestures request with amount returns limited tags."""
fake_repsocket = AsyncMock()
amount = 3
async def recv_once():
agent._running = False
return json.dumps(amount).encode()
fake_repsocket.recv = recv_once
fake_repsocket.send = AsyncMock()
agent = RobotGestureAgent(
"robot_gesture", gesture_data=["hello", "yes", "no", "wave", "point"], address=""
)
agent.repsocket = fake_repsocket
agent._running = True
await agent._fetch_gestures_loop()
fake_repsocket.send.assert_awaited_once()
args, kwargs = fake_repsocket.send.call_args
response = json.loads(args[0])
assert "tags" in response
assert len(response["tags"]) == amount
assert response["tags"] == ["hello", "yes", "no"]
@pytest.mark.asyncio
async def test_fetch_gestures_loop_with_integer_request():
"""Fetch gestures request with integer amount."""
fake_repsocket = AsyncMock()
amount = 2
async def recv_once():
agent._running = False
return json.dumps(amount).encode()
fake_repsocket.recv = recv_once
fake_repsocket.send = AsyncMock()
agent = RobotGestureAgent("robot_gesture", gesture_data=["hello", "yes", "no"], address="")
agent.repsocket = fake_repsocket
agent._running = True
await agent._fetch_gestures_loop()
fake_repsocket.send.assert_awaited_once()
args, kwargs = fake_repsocket.send.call_args
response = json.loads(args[0])
assert response["tags"] == ["hello", "yes"]
@pytest.mark.asyncio
async def test_fetch_gestures_loop_with_invalid_json():
"""Invalid JSON request returns all tags."""
fake_repsocket = AsyncMock()
async def recv_once():
agent._running = False
return b"not_json"
fake_repsocket.recv = recv_once
fake_repsocket.send = AsyncMock()
agent = RobotGestureAgent("robot_gesture", gesture_data=["hello", "yes", "no"], address="")
agent.repsocket = fake_repsocket
agent._running = True
await agent._fetch_gestures_loop()
fake_repsocket.send.assert_awaited_once()
args, kwargs = fake_repsocket.send.call_args
response = json.loads(args[0])
assert response["tags"] == ["hello", "yes", "no"]
@pytest.mark.asyncio
async def test_fetch_gestures_loop_with_non_integer_json():
"""Non-integer JSON request returns all tags."""
fake_repsocket = AsyncMock()
async def recv_once():
agent._running = False
return json.dumps({"not": "an_integer"}).encode()
fake_repsocket.recv = recv_once
fake_repsocket.send = AsyncMock()
agent = RobotGestureAgent("robot_gesture", gesture_data=["hello", "yes", "no"], address="")
agent.repsocket = fake_repsocket
agent._running = True
await agent._fetch_gestures_loop()
fake_repsocket.send.assert_awaited_once()
args, kwargs = fake_repsocket.send.call_args
response = json.loads(args[0])
assert response["tags"] == ["hello", "yes", "no"]
def test_gesture_data_attribute():
"""Test that gesture_data returns the expected list."""
gesture_data = ["hello", "yes", "no", "wave"]
agent = RobotGestureAgent("robot_gesture", gesture_data=gesture_data, address="")
assert agent.gesture_data == gesture_data
assert isinstance(agent.gesture_data, list)
assert len(agent.gesture_data) == 4
assert "hello" in agent.gesture_data
assert "yes" in agent.gesture_data
assert "no" in agent.gesture_data
assert "invalid_tag_not_in_list" not in agent.gesture_data
@pytest.mark.asyncio
async def test_stop_closes_sockets():
"""Stop method closes all sockets."""
pubsocket = MagicMock()
subsocket = MagicMock()
repsocket = MagicMock()
agent = RobotGestureAgent("robot_gesture", address="")
agent.pubsocket = pubsocket
agent.subsocket = subsocket
agent.repsocket = repsocket
await agent.stop()
pubsocket.close.assert_called_once()
subsocket.close.assert_called_once()
repsocket.close.assert_called_once()
@pytest.mark.asyncio
async def test_initialization_with_custom_gesture_data():
"""Agent can be initialized with custom gesture data."""
custom_gestures = ["custom1", "custom2", "custom3"]
agent = RobotGestureAgent("robot_gesture", gesture_data=custom_gestures, address="")
assert agent.gesture_data == custom_gestures
@pytest.mark.asyncio
async def test_fetch_gestures_loop_handles_exception():
"""Exception in fetch gestures loop is caught and logged."""
fake_repsocket = AsyncMock()
async def recv_once():
agent._running = False
raise Exception("Test exception")
fake_repsocket.recv = recv_once
fake_repsocket.send = AsyncMock()
agent = RobotGestureAgent("robot_gesture", gesture_data=["hello", "yes", "no"], address="")
agent.repsocket = fake_repsocket
agent.logger = MagicMock()
agent._running = True
# Should not raise exception
await agent._fetch_gestures_loop()
# Exception should be logged
agent.logger.exception.assert_called_once()

View File

@@ -1,152 +0,0 @@
import json
from unittest.mock import AsyncMock, MagicMock
import pytest
import zmq
from control_backend.agents.actuation.robot_speech_agent import RobotSpeechAgent
from control_backend.core.agent_system import InternalMessage
def mock_speech_agent():
agent = RobotSpeechAgent("robot_speech", address="tcp://localhost:5555", bind=False)
return agent
@pytest.fixture
def zmq_context(mocker):
mock_context = mocker.patch(
"control_backend.agents.actuation.robot_speech_agent.azmq.Context.instance"
)
mock_context.return_value = MagicMock()
return mock_context
@pytest.mark.asyncio
async def test_setup_bind(zmq_context, mocker):
"""Setup binds and subscribes to internal commands."""
fake_socket = zmq_context.return_value.socket.return_value
agent = RobotSpeechAgent("robot_speech", address="tcp://localhost:5555", bind=True)
settings = mocker.patch("control_backend.agents.actuation.robot_speech_agent.settings")
settings.zmq_settings.internal_sub_address = "tcp://internal:1234"
def close_coro(coro):
coro.close()
return MagicMock()
agent.add_behavior = MagicMock(side_effect=close_coro)
await agent.setup()
fake_socket.bind.assert_any_call("tcp://localhost:5555")
fake_socket.connect.assert_any_call("tcp://internal:1234")
fake_socket.setsockopt.assert_any_call(zmq.SUBSCRIBE, b"command")
agent.add_behavior.assert_called_once()
@pytest.mark.asyncio
async def test_setup_connect(zmq_context, mocker):
"""Setup connects when bind=False."""
fake_socket = zmq_context.return_value.socket.return_value
agent = RobotSpeechAgent("robot_speech", address="tcp://localhost:5555", bind=False)
settings = mocker.patch("control_backend.agents.actuation.robot_speech_agent.settings")
settings.zmq_settings.internal_sub_address = "tcp://internal:1234"
def close_coro(coro):
coro.close()
return MagicMock()
agent.add_behavior = MagicMock(side_effect=close_coro)
await agent.setup()
fake_socket.connect.assert_any_call("tcp://localhost:5555")
fake_socket.connect.assert_any_call("tcp://internal:1234")
agent.add_behavior.assert_called_once()
@pytest.mark.asyncio
async def test_handle_message_sends_command():
"""Internal message is forwarded to robot pub socket as JSON."""
pubsocket = AsyncMock()
agent = mock_speech_agent()
agent.pubsocket = pubsocket
payload = {"endpoint": "actuate/speech", "data": "hello", "is_priority": False}
msg = InternalMessage(to="robot", sender="tester", body=json.dumps(payload))
await agent.handle_message(msg)
pubsocket.send_json.assert_awaited_once_with(payload)
@pytest.mark.asyncio
async def test_zmq_command_loop_valid_payload(zmq_context):
"""UI command is read from SUB and published."""
command = {"endpoint": "actuate/speech", "data": "hello", "is_priority": False}
fake_socket = AsyncMock()
async def recv_once():
# stop after first iteration
agent._running = False
return (b"command", json.dumps(command).encode("utf-8"))
fake_socket.recv_multipart = recv_once
fake_socket.send_json = AsyncMock()
agent = mock_speech_agent()
agent.subsocket = fake_socket
agent.pubsocket = fake_socket
agent._running = True
await agent._zmq_command_loop()
fake_socket.send_json.assert_awaited_once_with(command)
@pytest.mark.asyncio
async def test_zmq_command_loop_invalid_json():
"""Invalid JSON is ignored without sending."""
fake_socket = AsyncMock()
async def recv_once():
agent._running = False
return (b"command", b"{not_json}")
fake_socket.recv_multipart = recv_once
fake_socket.send_json = AsyncMock()
agent = mock_speech_agent()
agent.subsocket = fake_socket
agent.pubsocket = fake_socket
agent._running = True
await agent._zmq_command_loop()
fake_socket.send_json.assert_not_awaited()
@pytest.mark.asyncio
async def test_handle_message_invalid_payload():
"""Invalid payload is caught and does not send."""
pubsocket = AsyncMock()
agent = mock_speech_agent()
agent.pubsocket = pubsocket
msg = InternalMessage(to="robot", sender="tester", body=json.dumps({"bad": "data"}))
await agent.handle_message(msg)
pubsocket.send_json.assert_not_awaited()
@pytest.mark.asyncio
async def test_stop_closes_sockets():
pubsocket = MagicMock()
subsocket = MagicMock()
agent = mock_speech_agent()
agent.pubsocket = pubsocket
agent.subsocket = subsocket
await agent.stop()
pubsocket.close.assert_called_once()
subsocket.close.assert_called_once()

View File

@@ -0,0 +1,209 @@
import json
import logging
from unittest.mock import AsyncMock, MagicMock, call
import pytest
from control_backend.agents.bdi.behaviours.belief_setter import BeliefSetterBehaviour
# Define a constant for the collector agent name to use in tests
COLLECTOR_AGENT_NAME = "belief_collector"
COLLECTOR_AGENT_JID = f"{COLLECTOR_AGENT_NAME}@test"
@pytest.fixture
def mock_agent(mocker):
"""Fixture to create a mock BDIAgent."""
agent = MagicMock()
agent.bdi = MagicMock()
agent.jid = "bdi_agent@test"
return agent
@pytest.fixture
def belief_setter(mock_agent, mocker):
"""Fixture to create an instance of BeliefSetterBehaviour with a mocked agent."""
# Patch the settings to use a predictable agent name
mocker.patch(
"control_backend.agents.bdi.behaviours.belief_setter.settings.agent_settings.belief_collector_agent_name",
COLLECTOR_AGENT_NAME,
)
setter = BeliefSetterBehaviour()
setter.agent = mock_agent
# Mock the receive method, we will control its return value in each test
setter.receive = AsyncMock()
return setter
def create_mock_message(sender_node: str, body: str, thread: str) -> MagicMock:
"""Helper function to create a configured mock message."""
msg = MagicMock()
msg.sender.node = sender_node # MagicMock automatically creates nested mocks
msg.body = body
msg.thread = thread
return msg
@pytest.mark.asyncio
async def test_run_message_received(belief_setter, mocker):
"""
Test that when a message is received, _process_message is called.
"""
# Arrange
msg = MagicMock()
belief_setter.receive.return_value = msg
mocker.patch.object(belief_setter, "_process_message")
# Act
await belief_setter.run()
# Assert
belief_setter._process_message.assert_called_once_with(msg)
def test_process_message_from_belief_collector(belief_setter, mocker):
"""
Test processing a message from the correct belief collector agent.
"""
# Arrange
msg = create_mock_message(sender_node=COLLECTOR_AGENT_NAME, body="", thread="")
mock_process_belief = mocker.patch.object(belief_setter, "_process_belief_message")
# Act
belief_setter._process_message(msg)
# Assert
mock_process_belief.assert_called_once_with(msg)
def test_process_message_from_other_agent(belief_setter, mocker):
"""
Test that messages from other agents are ignored.
"""
# Arrange
msg = create_mock_message(sender_node="other_agent", body="", thread="")
mock_process_belief = mocker.patch.object(belief_setter, "_process_belief_message")
# Act
belief_setter._process_message(msg)
# Assert
mock_process_belief.assert_not_called()
def test_process_belief_message_valid_json(belief_setter, mocker):
"""
Test processing a valid belief message with correct thread and JSON body.
"""
# Arrange
beliefs_payload = {"is_hot": ["kitchen"], "is_clean": ["kitchen", "bathroom"]}
msg = create_mock_message(
sender_node=COLLECTOR_AGENT_JID, body=json.dumps(beliefs_payload), thread="beliefs"
)
mock_set_beliefs = mocker.patch.object(belief_setter, "_set_beliefs")
# Act
belief_setter._process_belief_message(msg)
# Assert
mock_set_beliefs.assert_called_once_with(beliefs_payload)
def test_process_belief_message_invalid_json(belief_setter, mocker, caplog):
"""
Test that a message with invalid JSON is handled gracefully and an error is logged.
"""
# Arrange
msg = create_mock_message(
sender_node=COLLECTOR_AGENT_JID, body="this is not a json string", thread="beliefs"
)
mock_set_beliefs = mocker.patch.object(belief_setter, "_set_beliefs")
# Act
belief_setter._process_belief_message(msg)
# Assert
mock_set_beliefs.assert_not_called()
def test_process_belief_message_wrong_thread(belief_setter, mocker):
"""
Test that a message with an incorrect thread is ignored.
"""
# Arrange
msg = create_mock_message(
sender_node=COLLECTOR_AGENT_JID, body='{"some": "data"}', thread="not_beliefs"
)
mock_set_beliefs = mocker.patch.object(belief_setter, "_set_beliefs")
# Act
belief_setter._process_belief_message(msg)
# Assert
mock_set_beliefs.assert_not_called()
def test_process_belief_message_empty_body(belief_setter, mocker):
"""
Test that a message with an empty body is ignored.
"""
# Arrange
msg = create_mock_message(sender_node=COLLECTOR_AGENT_JID, body="", thread="beliefs")
mock_set_beliefs = mocker.patch.object(belief_setter, "_set_beliefs")
# Act
belief_setter._process_belief_message(msg)
# Assert
mock_set_beliefs.assert_not_called()
def test_set_beliefs_success(belief_setter, mock_agent, caplog):
"""
Test that beliefs are correctly set on the agent's BDI.
"""
# Arrange
beliefs_to_set = {
"is_hot": ["kitchen"],
"door_opened": ["front_door", "back_door"],
}
# Act
with caplog.at_level(logging.INFO):
belief_setter._set_beliefs(beliefs_to_set)
# Assert
expected_calls = [
call("is_hot", "kitchen"),
call("door_opened", "front_door", "back_door"),
]
mock_agent.bdi.set_belief.assert_has_calls(expected_calls, any_order=True)
assert mock_agent.bdi.set_belief.call_count == 2
# def test_responded_unset(belief_setter, mock_agent):
# # Arrange
# new_beliefs = {"user_said": ["message"]}
#
# # Act
# belief_setter._set_beliefs(new_beliefs)
#
# # Assert
# mock_agent.bdi.set_belief.assert_has_calls([call("user_said", "message")])
# mock_agent.bdi.remove_belief.assert_has_calls([call("responded")])
# def test_set_beliefs_bdi_not_initialized(belief_setter, mock_agent, caplog):
# """
# Test that a warning is logged if the agent's BDI is not initialized.
# """
# # Arrange
# mock_agent.bdi = None # Simulate BDI not being ready
# beliefs_to_set = {"is_hot": ["kitchen"]}
#
# # Act
# with caplog.at_level(logging.WARNING):
# belief_setter._set_beliefs(beliefs_to_set)
#
# # Assert
# assert "Cannot set beliefs, since agent's BDI is not yet initialized." in caplog.text

View File

@@ -1,186 +0,0 @@
import pytest
from control_backend.agents.bdi.agentspeak_ast import (
AstAtom,
AstBinaryOp,
AstLiteral,
AstLogicalExpression,
AstNumber,
AstPlan,
AstProgram,
AstRule,
AstStatement,
AstString,
AstVar,
BinaryOperatorType,
StatementType,
TriggerType,
_coalesce_expr,
)
def test_ast_atom():
atom = AstAtom("test")
assert str(atom) == "test"
assert atom._to_agentspeak() == "test"
def test_ast_var():
var = AstVar("Variable")
assert str(var) == "Variable"
assert var._to_agentspeak() == "Variable"
def test_ast_number():
num = AstNumber(42)
assert str(num) == "42"
num_float = AstNumber(3.14)
assert str(num_float) == "3.14"
def test_ast_string():
s = AstString("hello")
assert str(s) == '"hello"'
def test_ast_literal():
lit = AstLiteral("functor", [AstAtom("atom"), AstNumber(1)])
assert str(lit) == "functor(atom, 1)"
lit_empty = AstLiteral("functor")
assert str(lit_empty) == "functor"
def test_ast_binary_op():
left = AstNumber(1)
right = AstNumber(2)
op = AstBinaryOp(left, BinaryOperatorType.GREATER_THAN, right)
assert str(op) == "1 > 2"
# Test logical wrapper
assert isinstance(op.left, AstLogicalExpression)
assert isinstance(op.right, AstLogicalExpression)
def test_ast_binary_op_parens():
# 1 > 2
inner = AstBinaryOp(AstNumber(1), BinaryOperatorType.GREATER_THAN, AstNumber(2))
# (1 > 2) & 3
outer = AstBinaryOp(inner, BinaryOperatorType.AND, AstNumber(3))
assert str(outer) == "(1 > 2) & 3"
# 3 & (1 > 2)
outer_right = AstBinaryOp(AstNumber(3), BinaryOperatorType.AND, inner)
assert str(outer_right) == "3 & (1 > 2)"
def test_ast_binary_op_parens_negated():
inner = AstLogicalExpression(AstAtom("foo"), negated=True)
outer = AstBinaryOp(inner, BinaryOperatorType.AND, AstAtom("bar"))
# The current implementation checks `if self.left.negated: l_str = f"({l_str})"`
# str(inner) is "not foo"
# so we expect "(not foo) & bar"
assert str(outer) == "(not foo) & bar"
outer_right = AstBinaryOp(AstAtom("bar"), BinaryOperatorType.AND, inner)
assert str(outer_right) == "bar & (not foo)"
def test_ast_logical_expression_negation():
expr = AstLogicalExpression(AstAtom("true"), negated=True)
assert str(expr) == "not true"
expr_neg_neg = ~expr
assert str(expr_neg_neg) == "true"
assert not expr_neg_neg.negated
# Invert a non-logical expression (wraps it)
term = AstAtom("true")
inverted = ~term
assert isinstance(inverted, AstLogicalExpression)
assert inverted.negated
assert str(inverted) == "not true"
def test_ast_logical_expression_no_negation():
# _as_logical on already logical expression
expr = AstLogicalExpression(AstAtom("x"))
# Doing binary op will call _as_logical
op = AstBinaryOp(expr, BinaryOperatorType.AND, AstAtom("y"))
assert isinstance(op.left, AstLogicalExpression)
assert op.left is expr # Should reuse instance
def test_ast_operators():
t1 = AstAtom("a")
t2 = AstAtom("b")
assert str(t1 & t2) == "a & b"
assert str(t1 | t2) == "a | b"
assert str(t1 >= t2) == "a >= b"
assert str(t1 > t2) == "a > b"
assert str(t1 <= t2) == "a <= b"
assert str(t1 < t2) == "a < b"
assert str(t1 == t2) == "a == b"
assert str(t1 != t2) == r"a \== b"
def test_coalesce_expr():
t = AstAtom("a")
assert str(t & "b") == 'a & "b"'
assert str(t & 1) == "a & 1"
assert str(t & 1.5) == "a & 1.5"
with pytest.raises(TypeError):
_coalesce_expr(None)
def test_ast_statement():
stmt = AstStatement(StatementType.DO_ACTION, AstLiteral("action"))
assert str(stmt) == ".action"
def test_ast_rule():
# Rule with condition
rule = AstRule(AstLiteral("head"), AstLiteral("body"))
assert str(rule) == "head :- body."
# Rule without condition
rule_simple = AstRule(AstLiteral("fact"))
assert str(rule_simple) == "fact."
def test_ast_plan():
plan = AstPlan(
TriggerType.ADDED_GOAL,
AstLiteral("goal"),
[AstLiteral("context")],
[AstStatement(StatementType.DO_ACTION, AstLiteral("action"))],
)
output = str(plan)
# verify parts exist
assert "+!goal" in output
assert ": context" in output
assert "<- .action." in output
def test_ast_plan_no_context():
plan = AstPlan(
TriggerType.ADDED_GOAL,
AstLiteral("goal"),
[],
[AstStatement(StatementType.DO_ACTION, AstLiteral("action"))],
)
output = str(plan)
assert "+!goal" in output
assert ": " not in output
assert "<- .action." in output
def test_ast_program():
prog = AstProgram(
rules=[AstRule(AstLiteral("fact"))],
plans=[AstPlan(TriggerType.ADDED_BELIEF, AstLiteral("b"), [], [])],
)
output = str(prog)
assert "fact." in output
assert "+b" in output

View File

@@ -1,187 +0,0 @@
import uuid
import pytest
from control_backend.agents.bdi.agentspeak_ast import AstProgram
from control_backend.agents.bdi.agentspeak_generator import AgentSpeakGenerator
from control_backend.schemas.program import (
BasicNorm,
ConditionalNorm,
Gesture,
GestureAction,
Goal,
InferredBelief,
KeywordBelief,
LLMAction,
LogicalOperator,
Phase,
Plan,
Program,
SemanticBelief,
SpeechAction,
Trigger,
)
@pytest.fixture
def generator():
return AgentSpeakGenerator()
def test_generate_empty_program(generator):
prog = Program(phases=[])
code = generator.generate(prog)
assert 'phase("end").' in code
assert "!notify_cycle" in code
def test_generate_basic_norm(generator):
norm = BasicNorm(id=uuid.uuid4(), name="n1", norm="be nice")
phase = Phase(id=uuid.uuid4(), norms=[norm], goals=[], triggers=[])
prog = Program(phases=[phase])
code = generator.generate(prog)
assert f'norm("be nice") :- phase("{phase.id}").' in code
def test_generate_critical_norm(generator):
norm = BasicNorm(id=uuid.uuid4(), name="n1", norm="safety", critical=True)
phase = Phase(id=uuid.uuid4(), norms=[norm], goals=[], triggers=[])
prog = Program(phases=[phase])
code = generator.generate(prog)
assert f'critical_norm("safety") :- phase("{phase.id}").' in code
def test_generate_conditional_norm(generator):
cond = KeywordBelief(id=uuid.uuid4(), name="k1", keyword="please")
norm = ConditionalNorm(id=uuid.uuid4(), name="n1", norm="help", condition=cond)
phase = Phase(id=uuid.uuid4(), norms=[norm], goals=[], triggers=[])
prog = Program(phases=[phase])
code = generator.generate(prog)
assert 'norm("help")' in code
assert 'keyword_said("please")' in code
assert f"force_norm_{generator._slugify_str(norm.norm)}" in code
def test_generate_goal_and_plan(generator):
action = SpeechAction(id=uuid.uuid4(), name="s1", text="hello")
plan = Plan(id=uuid.uuid4(), name="p1", steps=[action])
# IMPORTANT: can_fail must be False for +achieved_ belief to be added
goal = Goal(id=uuid.uuid4(), name="g1", description="desc", plan=plan, can_fail=False)
phase = Phase(id=uuid.uuid4(), norms=[], goals=[goal], triggers=[])
prog = Program(phases=[phase])
code = generator.generate(prog)
# Check trigger for goal
goal_slug = generator._slugify_str(goal.name)
assert f"+!{goal_slug}" in code
assert f'phase("{phase.id}")' in code
assert '!say("hello")' in code
# Check success belief addition
assert f"+achieved_{goal_slug}" in code
def test_generate_subgoal(generator):
subplan = Plan(id=uuid.uuid4(), name="p2", steps=[])
subgoal = Goal(id=uuid.uuid4(), name="sub1", description="sub", plan=subplan)
plan = Plan(id=uuid.uuid4(), name="p1", steps=[subgoal])
goal = Goal(id=uuid.uuid4(), name="g1", description="main", plan=plan)
phase = Phase(id=uuid.uuid4(), norms=[], goals=[goal], triggers=[])
prog = Program(phases=[phase])
code = generator.generate(prog)
subgoal_slug = generator._slugify_str(subgoal.name)
# Main goal calls subgoal
assert f"!{subgoal_slug}" in code
# Subgoal plan exists
assert f"+!{subgoal_slug}" in code
def test_generate_trigger(generator):
cond = SemanticBelief(id=uuid.uuid4(), name="s1", description="desc")
plan = Plan(id=uuid.uuid4(), name="p1", steps=[])
trigger = Trigger(id=uuid.uuid4(), name="t1", condition=cond, plan=plan)
phase = Phase(id=uuid.uuid4(), norms=[], goals=[], triggers=[trigger])
prog = Program(phases=[phase])
code = generator.generate(prog)
# Trigger logic is added to check_triggers
assert f"{generator.slugify(cond)}" in code
assert f'notify_trigger_start("{generator.slugify(trigger)}")' in code
assert f'notify_trigger_end("{generator.slugify(trigger)}")' in code
def test_phase_transition(generator):
phase1 = Phase(id=uuid.uuid4(), name="p1", norms=[], goals=[], triggers=[])
phase2 = Phase(id=uuid.uuid4(), name="p2", norms=[], goals=[], triggers=[])
prog = Program(phases=[phase1, phase2])
code = generator.generate(prog)
assert "transition_phase" in code
assert f'phase("{phase1.id}")' in code
assert f'phase("{phase2.id}")' in code
assert "force_transition_phase" in code
def test_astify_gesture(generator):
gesture = Gesture(type="single", name="wave")
action = GestureAction(id=uuid.uuid4(), name="g1", gesture=gesture)
ast = generator._astify(action)
assert str(ast) == 'gesture("single", "wave")'
def test_astify_llm_action(generator):
action = LLMAction(id=uuid.uuid4(), name="l1", goal="be funny")
ast = generator._astify(action)
assert str(ast) == 'reply_with_goal("be funny")'
def test_astify_inferred_belief_and(generator):
left = KeywordBelief(id=uuid.uuid4(), name="k1", keyword="a")
right = KeywordBelief(id=uuid.uuid4(), name="k2", keyword="b")
inf = InferredBelief(
id=uuid.uuid4(), name="i1", operator=LogicalOperator.AND, left=left, right=right
)
ast = generator._astify(inf)
assert 'keyword_said("a") & keyword_said("b")' == str(ast)
def test_astify_inferred_belief_or(generator):
left = KeywordBelief(id=uuid.uuid4(), name="k1", keyword="a")
right = KeywordBelief(id=uuid.uuid4(), name="k2", keyword="b")
inf = InferredBelief(
id=uuid.uuid4(), name="i1", operator=LogicalOperator.OR, left=left, right=right
)
ast = generator._astify(inf)
assert 'keyword_said("a") | keyword_said("b")' == str(ast)
def test_astify_semantic_belief(generator):
sb = SemanticBelief(id=uuid.uuid4(), name="s1", description="desc")
ast = generator._astify(sb)
assert str(ast) == f"semantic_{generator._slugify_str(sb.name)}"
def test_slugify_not_implemented(generator):
with pytest.raises(NotImplementedError):
generator.slugify("not a program element")
def test_astify_not_implemented(generator):
with pytest.raises(NotImplementedError):
generator._astify("not a program element")
def test_process_phase_transition_from_none(generator):
# Initialize AstProgram manually as we are bypassing generate()
generator._asp = AstProgram()
# Should safely return doing nothing
generator._add_phase_transition(None, None)
assert len(generator._asp.plans) == 0

View File

@@ -1,526 +0,0 @@
import asyncio
import json
import time
from unittest.mock import AsyncMock, MagicMock, mock_open, patch
import agentspeak
import pytest
from control_backend.agents.bdi.bdi_core_agent import BDICoreAgent
from control_backend.core.agent_system import InternalMessage
from control_backend.core.config import settings
from control_backend.schemas.belief_message import Belief, BeliefMessage
@pytest.fixture
def mock_agentspeak_env():
with patch("agentspeak.runtime.Environment") as mock_env:
yield mock_env
@pytest.fixture
def agent():
agent = BDICoreAgent("bdi_agent")
agent.send = AsyncMock()
agent.bdi_agent = MagicMock()
return agent
@pytest.mark.asyncio
async def test_setup_loads_asl(mock_agentspeak_env, agent):
# Mock file opening
with patch("builtins.open", mock_open(read_data="+initial_goal.")):
await agent.setup()
# Check if environment tried to build agent
mock_agentspeak_env.return_value.build_agent.assert_called()
@pytest.mark.asyncio
async def test_setup_no_asl(mock_agentspeak_env, agent):
with patch("builtins.open", side_effect=FileNotFoundError):
await agent.setup()
mock_agentspeak_env.return_value.build_agent.assert_not_called()
@pytest.mark.asyncio
async def test_handle_belief_message(agent, mock_settings):
"""Test that incoming beliefs are added to the BDI agent"""
beliefs = [Belief(name="user_said", arguments=["Hello"])]
msg = InternalMessage(
to="bdi_agent",
sender=mock_settings.agent_settings.text_belief_extractor_name,
body=BeliefMessage(create=beliefs).model_dump_json(),
thread="beliefs",
)
await agent.handle_message(msg)
# Check for the specific call we expect among all calls
# bdi_agent.call is called multiple times (for transition_phase, check_triggers)
# We want to confirm the belief addition call exists
found_call = False
for call in agent.bdi_agent.call.call_args_list:
args = call.args
if (
args[0] == agentspeak.Trigger.addition
and args[1] == agentspeak.GoalType.belief
and args[2].functor == "user_said"
and args[2].args[0].functor == "Hello"
):
found_call = True
break
assert found_call, "Expected belief addition call not found in bdi_agent.call history"
@pytest.mark.asyncio
async def test_handle_delete_belief_message(agent, mock_settings):
"""Test that incoming beliefs to be deleted are removed from the BDI agent"""
beliefs = [Belief(name="user_said", arguments=["Hello"])]
msg = InternalMessage(
to="bdi_agent",
sender=mock_settings.agent_settings.text_belief_extractor_name,
body=BeliefMessage(delete=beliefs).model_dump_json(),
thread="beliefs",
)
await agent.handle_message(msg)
found_call = False
for call in agent.bdi_agent.call.call_args_list:
args = call.args
if (
args[0] == agentspeak.Trigger.removal
and args[1] == agentspeak.GoalType.belief
and args[2].functor == "user_said"
and args[2].args[0].functor == "Hello"
):
found_call = True
break
assert found_call
@pytest.mark.asyncio
async def test_incorrect_belief_message(agent, mock_settings):
"""Test that incorrect message format triggers an exception."""
msg = InternalMessage(
to="bdi_agent",
sender=mock_settings.agent_settings.text_belief_extractor_name,
body=json.dumps({"bad_format": "bad_format"}),
thread="beliefs",
)
await agent.handle_message(msg)
agent.bdi_agent.call.assert_not_called() # did not set belief
@pytest.mark.asyncio
async def test_handle_llm_response(agent):
"""Test that LLM responses are forwarded to the Robot Speech Agent"""
msg = InternalMessage(
to="bdi_agent", sender=settings.agent_settings.llm_name, body="This is the LLM reply"
)
await agent.handle_message(msg)
# Verify forward
assert agent.send.called
sent_msg = agent.send.call_args[0][0]
assert sent_msg.to == settings.agent_settings.robot_speech_name
assert "This is the LLM reply" in sent_msg.body
@pytest.mark.asyncio
async def test_custom_actions(agent):
agent._send_to_llm = MagicMock(side_effect=agent.send) # Mock specific method
# Initialize actions manually since we didn't call setup with real file
agent._add_custom_actions()
# Find the action
action_fn = None
for (functor, _), fn in agent.actions.actions.items():
if functor == ".reply":
action_fn = fn
break
assert action_fn is not None
# Invoke action
mock_term = MagicMock()
mock_term.args = ["Hello", "Norm"]
mock_intention = MagicMock()
# Run generator
gen = action_fn(agent, mock_term, mock_intention)
next(gen) # Execute
agent._send_to_llm.assert_called_with("Hello", "Norm", "")
def test_add_belief_sets_event(agent):
"""Test that a belief triggers wake event and call()"""
agent._wake_bdi_loop = MagicMock()
belief = Belief(name="test_belief", arguments=["a", "b"])
belief_changes = BeliefMessage(replace=[belief])
agent._apply_belief_changes(belief_changes)
assert agent.bdi_agent.call.called
agent._wake_bdi_loop.set.assert_called()
def test_apply_beliefs_empty_returns(agent):
"""Line: if not beliefs: return"""
agent._wake_bdi_loop = MagicMock()
agent._apply_belief_changes(BeliefMessage())
agent.bdi_agent.call.assert_not_called()
agent._wake_bdi_loop.set.assert_not_called()
def test_remove_belief_success_wakes_loop(agent):
"""Line: if result: wake set"""
agent._wake_bdi_loop = MagicMock()
agent.bdi_agent.call.return_value = True
agent._remove_belief("remove_me", ["x"])
assert agent.bdi_agent.call.called
call_args = agent.bdi_agent.call.call_args.args
trigger = call_args[0]
goaltype = call_args[1]
literal = call_args[2]
assert trigger == agentspeak.Trigger.removal
assert goaltype == agentspeak.GoalType.belief
assert literal.functor == "remove_me"
assert literal.args[0].functor == "x"
agent._wake_bdi_loop.set.assert_called()
def test_remove_belief_failure_does_not_wake(agent):
"""Line: else result is False"""
agent._wake_bdi_loop = MagicMock()
agent.bdi_agent.call.return_value = False
agent._remove_belief("not_there", ["y"])
assert agent.bdi_agent.call.called # removal was attempted
agent._wake_bdi_loop.set.assert_not_called()
def test_remove_all_with_name_wakes_loop(agent):
"""Cover _remove_all_with_name() removed counter + wake"""
agent._wake_bdi_loop = MagicMock()
fake_literal = agentspeak.Literal("delete_me", (agentspeak.Literal("arg1"),))
fake_key = ("delete_me", 1)
agent.bdi_agent.beliefs = {fake_key: {fake_literal}}
agent._remove_all_with_name("delete_me")
assert agent.bdi_agent.call.called
agent._wake_bdi_loop.set.assert_called()
@pytest.mark.asyncio
async def test_bdi_step_true_branch_hits_line_67(agent):
"""Force step() to return True once so line 67 is actually executed"""
# counter that isn't tied to MagicMock.call_count ordering
counter = {"i": 0}
def fake_step():
counter["i"] += 1
return counter["i"] == 1 # True only first time
# Important: wrap fake_step into another mock so `.called` still exists
agent.bdi_agent.step = MagicMock(side_effect=fake_step)
agent.bdi_agent.shortest_deadline = MagicMock(return_value=None)
agent._running = True
agent._wake_bdi_loop = asyncio.Event()
agent._wake_bdi_loop.set()
task = asyncio.create_task(agent._bdi_loop())
await asyncio.sleep(0.01)
task.cancel()
try:
await task
except asyncio.CancelledError:
pass
assert agent.bdi_agent.step.called
assert counter["i"] >= 1 # proves True branch ran
def test_replace_belief_calls_remove_all(agent):
"""Cover: if belief.replace: self._remove_all_with_name()"""
agent._remove_all_with_name = MagicMock()
agent._wake_bdi_loop = MagicMock()
belief = Belief(name="user_said", arguments=["Hello"])
belief_changes = BeliefMessage(replace=[belief])
agent._apply_belief_changes(belief_changes)
agent._remove_all_with_name.assert_called_with("user_said")
@pytest.mark.asyncio
async def test_send_to_llm_creates_prompt_and_sends(agent):
"""Cover entire _send_to_llm() including message send and logger.info"""
agent.bdi_agent = MagicMock() # ensure mocked BDI does not interfere
agent._wake_bdi_loop = MagicMock()
await agent._send_to_llm("hello world", "n1\nn2", "g1")
# send() was called
assert agent.send.called
sent_msg: InternalMessage = agent.send.call_args.args[0]
# Message routing values correct
assert sent_msg.to == settings.agent_settings.llm_name
assert "hello world" in sent_msg.body
# JSON contains split norms/goals
body = json.loads(sent_msg.body)
assert body["norms"] == ["n1", "n2"]
assert body["goals"] == ["g1"]
@pytest.mark.asyncio
async def test_deadline_sleep_branch(agent):
"""Specifically assert the if deadline: sleep → maybe_more_work=True branch"""
future_deadline = time.time() + 0.005
agent.bdi_agent.step.return_value = False
agent.bdi_agent.shortest_deadline.return_value = future_deadline
start_time = time.time()
agent._running = True
agent._wake_bdi_loop = asyncio.Event()
agent._wake_bdi_loop.set()
task = asyncio.create_task(agent._bdi_loop())
await asyncio.sleep(0.01)
task.cancel()
duration = time.time() - start_time
assert duration >= 0.004 # loop slept until deadline
@pytest.mark.asyncio
async def test_handle_new_program(agent):
agent._load_asl = AsyncMock()
agent.add_behavior = MagicMock()
# Mock existing loop task so it can be cancelled
mock_task = MagicMock()
mock_task.cancel = MagicMock()
agent._bdi_loop_task = mock_task
def close_coro(coro):
coro.close()
return MagicMock()
agent.add_behavior = MagicMock(side_effect=close_coro)
msg = InternalMessage(to="bdi_agent", thread="new_program", body="path/to/asl.asl")
await agent.handle_message(msg)
mock_task.cancel.assert_called_once()
agent._load_asl.assert_awaited_once_with("path/to/asl.asl")
agent.add_behavior.assert_called()
@pytest.mark.asyncio
async def test_handle_user_interrupts(agent, mock_settings):
mock_settings.agent_settings.user_interrupt_name = "user_interrupt_agent"
# force_phase_transition
agent._set_goal = MagicMock()
msg = InternalMessage(
to="bdi_agent",
sender=mock_settings.agent_settings.user_interrupt_name,
thread="force_phase_transition",
body="",
)
await agent.handle_message(msg)
agent._set_goal.assert_called_with("transition_phase")
# force_trigger
agent._force_trigger = MagicMock()
msg.thread = "force_trigger"
msg.body = "trigger_x"
await agent.handle_message(msg)
agent._force_trigger.assert_called_with("trigger_x")
# force_norm
agent._force_norm = MagicMock()
msg.thread = "force_norm"
msg.body = "norm_y"
await agent.handle_message(msg)
agent._force_norm.assert_called_with("norm_y")
# force_next_phase
agent._force_next_phase = MagicMock()
msg.thread = "force_next_phase"
msg.body = ""
await agent.handle_message(msg)
agent._force_next_phase.assert_called_once()
# unknown interrupt
agent.logger = MagicMock()
msg.thread = "unknown_thing"
await agent.handle_message(msg)
agent.logger.warning.assert_called()
@pytest.mark.asyncio
async def test_custom_action_reply_with_goal(agent):
agent._send_to_llm = MagicMock(side_effect=agent.send)
agent._add_custom_actions()
action_fn = agent.actions.actions[(".reply_with_goal", 3)]
mock_term = MagicMock(args=["msg", "norms", "goal"])
gen = action_fn(agent, mock_term, MagicMock())
next(gen)
agent._send_to_llm.assert_called_with("msg", "norms", "goal")
@pytest.mark.asyncio
async def test_custom_action_notify_norms(agent):
agent._add_custom_actions()
action_fn = agent.actions.actions[(".notify_norms", 1)]
mock_term = MagicMock(args=["norms_list"])
gen = action_fn(agent, mock_term, MagicMock())
next(gen)
agent.send.assert_called()
msg = agent.send.call_args[0][0]
assert msg.thread == "active_norms_update"
assert msg.body == "norms_list"
@pytest.mark.asyncio
async def test_custom_action_say(agent):
agent._add_custom_actions()
action_fn = agent.actions.actions[(".say", 1)]
mock_term = MagicMock(args=["hello"])
gen = action_fn(agent, mock_term, MagicMock())
next(gen)
assert agent.send.call_count == 2
msgs = [c[0][0] for c in agent.send.call_args_list]
assert any(m.to == settings.agent_settings.robot_speech_name for m in msgs)
assert any(
m.to == settings.agent_settings.llm_name and m.thread == "assistant_message" for m in msgs
)
@pytest.mark.asyncio
async def test_custom_action_gesture(agent):
agent._add_custom_actions()
# Test single
action_fn = agent.actions.actions[(".gesture", 2)]
mock_term = MagicMock(args=["single", "wave"])
gen = action_fn(agent, mock_term, MagicMock())
next(gen)
msg = agent.send.call_args[0][0]
assert "actuate/gesture/single" in msg.body
# Test tag
mock_term.args = ["tag", "happy"]
gen = action_fn(agent, mock_term, MagicMock())
next(gen)
msg = agent.send.call_args[0][0]
assert "actuate/gesture/tag" in msg.body
@pytest.mark.asyncio
async def test_custom_action_notify_user_said(agent):
agent._add_custom_actions()
action_fn = agent.actions.actions[(".notify_user_said", 1)]
mock_term = MagicMock(args=["hello"])
gen = action_fn(agent, mock_term, MagicMock())
next(gen)
msg = agent.send.call_args[0][0]
assert msg.to == settings.agent_settings.llm_name
assert msg.thread == "user_message"
@pytest.mark.asyncio
async def test_custom_action_notify_trigger_start_end(agent):
agent._add_custom_actions()
# Start
action_fn = agent.actions.actions[(".notify_trigger_start", 1)]
gen = action_fn(agent, MagicMock(args=["t1"]), MagicMock())
next(gen)
assert agent.send.call_args[0][0].thread == "trigger_start"
# End
action_fn = agent.actions.actions[(".notify_trigger_end", 1)]
gen = action_fn(agent, MagicMock(args=["t1"]), MagicMock())
next(gen)
assert agent.send.call_args[0][0].thread == "trigger_end"
@pytest.mark.asyncio
async def test_custom_action_notify_goal_start(agent):
agent._add_custom_actions()
action_fn = agent.actions.actions[(".notify_goal_start", 1)]
gen = action_fn(agent, MagicMock(args=["g1"]), MagicMock())
next(gen)
assert agent.send.call_args[0][0].thread == "goal_start"
@pytest.mark.asyncio
async def test_custom_action_notify_transition_phase(agent):
agent._add_custom_actions()
action_fn = agent.actions.actions[(".notify_transition_phase", 2)]
gen = action_fn(agent, MagicMock(args=["old", "new"]), MagicMock())
next(gen)
msg = agent.send.call_args[0][0]
assert msg.thread == "transition_phase"
assert "old" in msg.body and "new" in msg.body
def test_remove_belief_no_args(agent):
agent._wake_bdi_loop = MagicMock()
agent.bdi_agent.call.return_value = True
agent._remove_belief("fact", None)
assert agent.bdi_agent.call.called
def test_set_goal_with_args(agent):
agent._wake_bdi_loop = MagicMock()
agent._set_goal("goal", ["arg1", "arg2"])
assert agent.bdi_agent.call.called
def test_format_belief_string():
assert BDICoreAgent.format_belief_string("b") == "b"
assert BDICoreAgent.format_belief_string("b", ["a1", "a2"]) == "b(a1,a2)"
def test_force_norm(agent):
agent._add_belief = MagicMock()
agent._force_norm("be_polite")
agent._add_belief.assert_called_with("force_be_polite")
def test_force_trigger(agent):
agent._set_goal = MagicMock()
agent._force_trigger("trig")
agent._set_goal.assert_called_with("trig")
def test_force_next_phase(agent):
agent._set_goal = MagicMock()
agent._force_next_phase()
agent._set_goal.assert_called_with("force_transition_phase")

View File

@@ -1,297 +0,0 @@
import asyncio
import json
import sys
import uuid
from unittest.mock import AsyncMock, MagicMock, mock_open, patch
import pytest
from control_backend.agents.bdi.bdi_program_manager import BDIProgramManager
from control_backend.core.agent_system import InternalMessage
from control_backend.schemas.program import BasicNorm, Goal, Phase, Plan, Program
# Fix Windows Proactor loop for zmq
if sys.platform.startswith("win"):
asyncio.set_event_loop_policy(asyncio.WindowsSelectorEventLoopPolicy())
def make_valid_program_json(norm="N1", goal="G1") -> str:
return Program(
phases=[
Phase(
id=uuid.uuid4(),
name="Basic Phase",
norms=[
BasicNorm(
id=uuid.uuid4(),
name=norm,
norm=norm,
),
],
goals=[
Goal(
id=uuid.uuid4(),
name=goal,
description="This description can be used to determine whether the goal "
"has been achieved.",
plan=Plan(
id=uuid.uuid4(),
name="Goal Plan",
steps=[],
),
can_fail=False,
),
],
triggers=[],
),
],
).model_dump_json()
@pytest.mark.asyncio
async def test_create_agentspeak_and_send_to_bdi(mock_settings):
manager = BDIProgramManager(name="program_manager_test")
manager.send = AsyncMock()
program = Program.model_validate_json(make_valid_program_json())
with patch("builtins.open", mock_open()) as mock_file:
await manager._create_agentspeak_and_send_to_bdi(program)
# Check file writing
mock_file.assert_called_with("src/control_backend/agents/bdi/agentspeak.asl", "w")
handle = mock_file()
handle.write.assert_called()
assert manager.send.await_count == 1
msg: InternalMessage = manager.send.await_args[0][0]
assert msg.thread == "new_program"
assert msg.to == mock_settings.agent_settings.bdi_core_name
assert msg.body == "src/control_backend/agents/bdi/agentspeak.asl"
@pytest.mark.asyncio
async def test_receive_programs_valid_and_invalid():
sub = AsyncMock()
sub.recv_multipart.side_effect = [
(b"program", b"{bad json"),
(b"program", make_valid_program_json().encode()),
]
manager = BDIProgramManager(name="program_manager_test")
manager._internal_pub_socket = AsyncMock()
manager.sub_socket = sub
manager._create_agentspeak_and_send_to_bdi = AsyncMock()
manager._send_clear_llm_history = AsyncMock()
manager._send_program_to_user_interrupt = AsyncMock()
manager._send_beliefs_to_semantic_belief_extractor = AsyncMock()
manager._send_goals_to_semantic_belief_extractor = AsyncMock()
try:
# Will give StopAsyncIteration when the predefined `sub.recv_multipart` side-effects run out
await manager._receive_programs()
except StopAsyncIteration:
pass
# Only valid Program should have triggered _send_to_bdi
assert manager._create_agentspeak_and_send_to_bdi.await_count == 1
forwarded: Program = manager._create_agentspeak_and_send_to_bdi.await_args[0][0]
assert forwarded.phases[0].norms[0].name == "N1"
assert forwarded.phases[0].goals[0].name == "G1"
# Verify history clear was triggered exactly once (for the valid program)
# The invalid program loop `continue`s before calling _send_clear_llm_history
assert manager._send_clear_llm_history.await_count == 1
@pytest.mark.asyncio
async def test_send_clear_llm_history(mock_settings):
# Ensure the mock returns a string for the agent name (just like in your LLM tests)
mock_settings.agent_settings.llm_agent_name = "llm_agent"
manager = BDIProgramManager(name="program_manager_test")
manager.send = AsyncMock()
await manager._send_clear_llm_history()
assert manager.send.await_count == 2
msg: InternalMessage = manager.send.await_args_list[0][0][0]
# Verify the content and recipient
assert msg.body == "clear_history"
@pytest.mark.asyncio
async def test_handle_message_transition_phase(mock_settings):
mock_settings.agent_settings.user_interrupt_name = "user_interrupt_agent"
manager = BDIProgramManager(name="program_manager_test")
manager.send = AsyncMock()
# Setup state
prog = Program.model_validate_json(make_valid_program_json(norm="N1", goal="G1"))
manager._initialize_internal_state(prog)
# Test valid transition (to same phase for simplicity, or we need 2 phases)
# Let's create a program with 2 phases
phase2_id = uuid.uuid4()
phase2 = Phase(id=phase2_id, name="Phase 2", norms=[], goals=[], triggers=[])
prog.phases.append(phase2)
manager._initialize_internal_state(prog)
current_phase_id = str(prog.phases[0].id)
next_phase_id = str(phase2_id)
payload = json.dumps({"old": current_phase_id, "new": next_phase_id})
msg = InternalMessage(to="me", sender="bdi", body=payload, thread="transition_phase")
await manager.handle_message(msg)
assert str(manager._phase.id) == next_phase_id
# Allow background tasks to run (add_behavior)
await asyncio.sleep(0)
# Check notifications sent
# 1. beliefs to extractor
# 2. goals to extractor
# 3. notification to user interrupt
assert manager.send.await_count >= 3
# Verify user interrupt notification
calls = manager.send.await_args_list
ui_msgs = [
c[0][0] for c in calls if c[0][0].to == mock_settings.agent_settings.user_interrupt_name
]
assert len(ui_msgs) > 0
assert ui_msgs[-1].body == next_phase_id
@pytest.mark.asyncio
async def test_handle_message_transition_phase_desync():
manager = BDIProgramManager(name="program_manager_test")
manager.logger = MagicMock()
prog = Program.model_validate_json(make_valid_program_json())
manager._initialize_internal_state(prog)
current_phase_id = str(prog.phases[0].id)
# Request transition from WRONG old phase
payload = json.dumps({"old": "wrong_id", "new": "some_new_id"})
msg = InternalMessage(to="me", sender="bdi", body=payload, thread="transition_phase")
await manager.handle_message(msg)
# Should warn and do nothing
manager.logger.warning.assert_called_once()
assert "Phase transition desync detected" in manager.logger.warning.call_args[0][0]
assert str(manager._phase.id) == current_phase_id
@pytest.mark.asyncio
async def test_handle_message_transition_phase_end(mock_settings):
mock_settings.agent_settings.user_interrupt_name = "user_interrupt_agent"
manager = BDIProgramManager(name="program_manager_test")
manager.send = AsyncMock()
prog = Program.model_validate_json(make_valid_program_json())
manager._initialize_internal_state(prog)
current_phase_id = str(prog.phases[0].id)
payload = json.dumps({"old": current_phase_id, "new": "end"})
msg = InternalMessage(to="me", sender="bdi", body=payload, thread="transition_phase")
await manager.handle_message(msg)
assert manager._phase is None
# Allow background tasks to run (add_behavior)
await asyncio.sleep(0)
# Verify notification to user interrupt
assert manager.send.await_count == 1
msg_sent = manager.send.await_args[0][0]
assert msg_sent.to == mock_settings.agent_settings.user_interrupt_name
assert msg_sent.body == "end"
@pytest.mark.asyncio
async def test_handle_message_achieve_goal(mock_settings):
mock_settings.agent_settings.text_belief_extractor_name = "text_belief_extractor_agent"
manager = BDIProgramManager(name="program_manager_test")
manager.send = AsyncMock()
prog = Program.model_validate_json(make_valid_program_json(goal="TargetGoal"))
manager._initialize_internal_state(prog)
goal_id = str(prog.phases[0].goals[0].id)
msg = InternalMessage(to="me", sender="ui", body=goal_id, thread="achieve_goal")
await manager.handle_message(msg)
# Should send achieved goals to text extractor
assert manager.send.await_count == 1
msg_sent = manager.send.await_args[0][0]
assert msg_sent.to == mock_settings.agent_settings.text_belief_extractor_name
assert msg_sent.thread == "achieved_goals"
# Verify body
from control_backend.schemas.belief_list import GoalList
gl = GoalList.model_validate_json(msg_sent.body)
assert len(gl.goals) == 1
assert gl.goals[0].name == "TargetGoal"
@pytest.mark.asyncio
async def test_handle_message_achieve_goal_not_found():
manager = BDIProgramManager(name="program_manager_test")
manager.send = AsyncMock()
manager.logger = MagicMock()
prog = Program.model_validate_json(make_valid_program_json())
manager._initialize_internal_state(prog)
msg = InternalMessage(to="me", sender="ui", body="non_existent_id", thread="achieve_goal")
await manager.handle_message(msg)
manager.send.assert_not_called()
manager.logger.debug.assert_called()
@pytest.mark.asyncio
async def test_setup(mock_settings):
manager = BDIProgramManager(name="program_manager_test")
manager.send = AsyncMock()
def close_coro(coro):
coro.close()
return MagicMock()
manager.add_behavior = MagicMock(side_effect=close_coro)
mock_context = MagicMock()
mock_sub = MagicMock()
mock_context.socket.return_value = mock_sub
with patch(
"control_backend.agents.bdi.bdi_program_manager.Context.instance", return_value=mock_context
):
# We also need to mock file writing in _create_agentspeak_and_send_to_bdi
with patch("builtins.open", new_callable=MagicMock):
await manager.setup()
# Check logic
# 1. Sends default empty program to BDI
assert manager.send.await_count == 1
assert manager.send.await_args[0][0].to == mock_settings.agent_settings.bdi_core_name
# 2. Connects SUB socket
mock_sub.connect.assert_called_with(mock_settings.zmq_settings.internal_sub_address)
mock_sub.subscribe.assert_called_with("program")
# 3. Adds behavior
manager.add_behavior.assert_called()

View File

@@ -1,554 +0,0 @@
import json
import uuid
from unittest.mock import AsyncMock, MagicMock, patch
import httpx
import pytest
from control_backend.agents.bdi import TextBeliefExtractorAgent
from control_backend.agents.bdi.text_belief_extractor_agent import BeliefState
from control_backend.core.agent_system import InternalMessage
from control_backend.core.config import settings
from control_backend.schemas.belief_list import BeliefList
from control_backend.schemas.belief_message import Belief as InternalBelief
from control_backend.schemas.belief_message import BeliefMessage
from control_backend.schemas.chat_history import ChatHistory, ChatMessage
from control_backend.schemas.program import (
BaseGoal, # Changed from Goal
ConditionalNorm,
KeywordBelief,
LLMAction,
Phase,
Plan,
Program,
SemanticBelief,
Trigger,
)
@pytest.fixture
def llm():
llm = TextBeliefExtractorAgent.LLM(MagicMock(), 4)
# We must ensure _query_llm returns a dictionary so iterating it doesn't fail
llm._query_llm = AsyncMock(return_value={})
return llm
@pytest.fixture
def agent(llm):
with patch(
"control_backend.agents.bdi.text_belief_extractor_agent.TextBeliefExtractorAgent.LLM",
return_value=llm,
):
agent = TextBeliefExtractorAgent("text_belief_agent")
agent.send = AsyncMock()
return agent
@pytest.fixture
def sample_program():
return Program(
phases=[
Phase(
name="Some phase",
id=uuid.uuid4(),
norms=[
ConditionalNorm(
name="Some norm",
id=uuid.uuid4(),
norm="Use nautical terms.",
critical=False,
condition=SemanticBelief(
name="is_pirate",
id=uuid.uuid4(),
description="The user is a pirate. Perhaps because they say "
"they are, or because they speak like a pirate "
'with terms like "arr".',
),
),
],
goals=[],
triggers=[
Trigger(
name="Some trigger",
id=uuid.uuid4(),
condition=SemanticBelief(
name="no_more_booze",
id=uuid.uuid4(),
description="There is no more alcohol.",
),
plan=Plan(
name="Some plan",
id=uuid.uuid4(),
steps=[
LLMAction(
name="Some action",
id=uuid.uuid4(),
goal="Suggest eating chocolate instead.",
),
],
),
),
],
),
],
)
def make_msg(sender: str, body: str, thread: str | None = None) -> InternalMessage:
return InternalMessage(to="unused", sender=sender, body=body, thread=thread)
@pytest.mark.asyncio
async def test_handle_message_ignores_other_agents(agent):
msg = make_msg("unknown", "some data", None)
await agent.handle_message(msg)
agent.send.assert_not_called() # noqa # `agent.send` has no such property, but we mock it.
@pytest.mark.asyncio
async def test_handle_message_from_transcriber(agent, mock_settings):
transcription = "hello world"
msg = make_msg(mock_settings.agent_settings.transcription_name, transcription, None)
await agent.handle_message(msg)
agent.send.assert_awaited_once() # noqa # `agent.send` has no such property, but we mock it.
sent: InternalMessage = agent.send.call_args.args[0] # noqa
assert sent.to == mock_settings.agent_settings.bdi_core_name
assert sent.thread == "beliefs"
parsed = BeliefMessage.model_validate_json(sent.body)
replaced_last = parsed.replace.pop()
assert replaced_last.name == "user_said"
assert replaced_last.arguments == [transcription]
@pytest.mark.asyncio
async def test_query_llm():
mock_response = MagicMock()
mock_response.json.return_value = {
"choices": [
{
"message": {
"content": "null",
}
}
]
}
mock_client = AsyncMock()
mock_client.post.return_value = mock_response
mock_async_client = MagicMock()
mock_async_client.__aenter__.return_value = mock_client
mock_async_client.__aexit__.return_value = None
with patch(
"control_backend.agents.bdi.text_belief_extractor_agent.httpx.AsyncClient",
return_value=mock_async_client,
):
llm = TextBeliefExtractorAgent.LLM(MagicMock(), 4)
res = await llm._query_llm("hello world", {"type": "null"})
# Response content was set as "null", so should be deserialized as None
assert res is None
@pytest.mark.asyncio
async def test_retry_query_llm_success(llm):
llm._query_llm.return_value = None
res = await llm.query("hello world", {"type": "null"})
llm._query_llm.assert_called_once()
assert res is None
@pytest.mark.asyncio
async def test_retry_query_llm_success_after_failure(llm):
llm._query_llm.side_effect = [KeyError(), "real value"]
res = await llm.query("hello world", {"type": "string"})
assert llm._query_llm.call_count == 2
assert res == "real value"
@pytest.mark.asyncio
async def test_retry_query_llm_failures(llm):
llm._query_llm.side_effect = [KeyError(), KeyError(), KeyError(), "real value"]
res = await llm.query("hello world", {"type": "string"})
assert llm._query_llm.call_count == 3
assert res is None
@pytest.mark.asyncio
async def test_retry_query_llm_fail_immediately(llm):
llm._query_llm.side_effect = [KeyError(), "real value"]
res = await llm.query("hello world", {"type": "string"}, tries=1)
assert llm._query_llm.call_count == 1
assert res is None
@pytest.mark.asyncio
async def test_extracting_semantic_beliefs(agent):
"""
The Program Manager sends beliefs to this agent. Test whether the agent handles them correctly.
"""
assert len(agent.belief_inferrer.available_beliefs) == 0
beliefs = BeliefList(
beliefs=[
KeywordBelief(
id=uuid.uuid4(),
name="keyword_hello",
keyword="hello",
),
SemanticBelief(
id=uuid.uuid4(), name="semantic_hello_1", description="Some semantic belief 1"
),
SemanticBelief(
id=uuid.uuid4(), name="semantic_hello_2", description="Some semantic belief 2"
),
]
)
await agent.handle_message(
InternalMessage(
to=settings.agent_settings.text_belief_extractor_name,
sender=settings.agent_settings.bdi_program_manager_name,
body=beliefs.model_dump_json(),
thread="beliefs",
),
)
assert len(agent.belief_inferrer.available_beliefs) == 2
@pytest.mark.asyncio
async def test_handle_invalid_beliefs(agent, sample_program):
agent.belief_inferrer.available_beliefs.append(sample_program.phases[0].norms[0].condition)
agent.belief_inferrer.available_beliefs.append(sample_program.phases[0].triggers[0].condition)
assert len(agent.belief_inferrer.available_beliefs) == 2
await agent.handle_message(
InternalMessage(
to=settings.agent_settings.text_belief_extractor_name,
sender=settings.agent_settings.bdi_program_manager_name,
body=json.dumps({"phases": "Invalid"}),
thread="beliefs",
),
)
assert len(agent.belief_inferrer.available_beliefs) == 2
@pytest.mark.asyncio
async def test_handle_robot_response(agent):
initial_length = len(agent.conversation.messages)
response = "Hi, I'm Pepper. What's your name?"
await agent.handle_message(
InternalMessage(
to=settings.agent_settings.text_belief_extractor_name,
sender=settings.agent_settings.llm_name,
body=response,
),
)
assert len(agent.conversation.messages) == initial_length + 1
assert agent.conversation.messages[-1].role == "assistant"
assert agent.conversation.messages[-1].content == response
@pytest.mark.asyncio
async def test_simulated_real_turn_with_beliefs(agent, llm, sample_program):
"""Test sending user message to extract beliefs from."""
agent.belief_inferrer.available_beliefs.append(sample_program.phases[0].norms[0].condition)
agent.belief_inferrer.available_beliefs.append(sample_program.phases[0].triggers[0].condition)
# Send a user message with the belief that there's no more booze
llm._query_llm.return_value = {"is_pirate": None, "no_more_booze": True}
assert len(agent.conversation.messages) == 0
await agent.handle_message(
InternalMessage(
to=settings.agent_settings.text_belief_extractor_name,
sender=settings.agent_settings.transcription_name,
body="We're all out of schnaps.",
),
)
assert len(agent.conversation.messages) == 1
# There should be a belief set and sent to the BDI core, as well as the user_said belief
assert agent.send.call_count == 2
# First should be the beliefs message
message: InternalMessage = agent.send.call_args_list[1].args[0]
beliefs = BeliefMessage.model_validate_json(message.body)
assert len(beliefs.create) == 1
assert beliefs.create[0].name == "no_more_booze"
@pytest.mark.asyncio
async def test_simulated_real_turn_no_beliefs(agent, llm, sample_program):
"""Test a user message to extract beliefs from, but no beliefs are formed."""
agent.belief_inferrer.available_beliefs.append(sample_program.phases[0].norms[0].condition)
agent.belief_inferrer.available_beliefs.append(sample_program.phases[0].triggers[0].condition)
# Send a user message with no new beliefs
llm._query_llm.return_value = {"is_pirate": None, "no_more_booze": None}
await agent.handle_message(
InternalMessage(
to=settings.agent_settings.text_belief_extractor_name,
sender=settings.agent_settings.transcription_name,
body="Hello there!",
),
)
# Only the user_said belief should've been sent
agent.send.assert_called_once()
@pytest.mark.asyncio
async def test_simulated_real_turn_no_new_beliefs(agent, llm, sample_program):
"""
Test a user message to extract beliefs from, but no new beliefs are formed because they already
existed.
"""
agent.belief_inferrer.available_beliefs.append(sample_program.phases[0].norms[0].condition)
agent.belief_inferrer.available_beliefs.append(sample_program.phases[0].triggers[0].condition)
agent._current_beliefs = BeliefState(true={InternalBelief(name="is_pirate", arguments=None)})
# Send a user message with the belief the user is a pirate, still
llm._query_llm.return_value = {"is_pirate": True, "no_more_booze": None}
await agent.handle_message(
InternalMessage(
to=settings.agent_settings.text_belief_extractor_name,
sender=settings.agent_settings.transcription_name,
body="Arr, nice to meet you, matey.",
),
)
# Only the user_said belief should've been sent, as no beliefs have changed
agent.send.assert_called_once()
@pytest.mark.asyncio
async def test_simulated_real_turn_remove_belief(agent, llm, sample_program):
"""
Test a user message to extract beliefs from, but an existing belief is determined no longer to
hold.
"""
agent.belief_inferrer.available_beliefs.append(sample_program.phases[0].norms[0].condition)
agent.belief_inferrer.available_beliefs.append(sample_program.phases[0].triggers[0].condition)
agent._current_beliefs = BeliefState(
true={InternalBelief(name="no_more_booze", arguments=None)},
)
# Send a user message with the belief the user is a pirate, still
llm._query_llm.return_value = {"is_pirate": None, "no_more_booze": False}
await agent.handle_message(
InternalMessage(
to=settings.agent_settings.text_belief_extractor_name,
sender=settings.agent_settings.transcription_name,
body="I found an untouched barrel of wine!",
),
)
# Both user_said and belief change should've been sent
assert agent.send.call_count == 2
# Agent's current beliefs should've changed
assert any(b.name == "no_more_booze" for b in agent._current_beliefs.false)
@pytest.mark.asyncio
async def test_infer_goal_completions_sends_beliefs(agent, llm):
"""Test that inferred goal completions are sent to the BDI core."""
goal = BaseGoal(
id=uuid.uuid4(), name="Say Hello", description="The user said hello", can_fail=True
)
agent.goal_inferrer.goals = {goal}
# Mock goal inference: goal is achieved
llm.query = AsyncMock(return_value=True)
await agent._infer_goal_completions()
# Should send belief change to BDI core
agent.send.assert_awaited_once()
sent: InternalMessage = agent.send.call_args.args[0]
assert sent.to == settings.agent_settings.bdi_core_name
assert sent.thread == "beliefs"
parsed = BeliefMessage.model_validate_json(sent.body)
assert len(parsed.create) == 1
assert parsed.create[0].name == "achieved_say_hello"
@pytest.mark.asyncio
async def test_llm_failure_handling(agent, llm, sample_program):
"""
Check that the agent handles failures gracefully without crashing.
"""
llm._query_llm.side_effect = httpx.HTTPError("")
agent.belief_inferrer.available_beliefs.append(sample_program.phases[0].norms[0].condition)
agent.belief_inferrer.available_beliefs.append(sample_program.phases[0].triggers[0].condition)
belief_changes = await agent.belief_inferrer.infer_from_conversation(
ChatHistory(
messages=[ChatMessage(role="user", content="Good day!")],
),
)
assert len(belief_changes.true) == 0
assert len(belief_changes.false) == 0
def test_belief_state_bool():
# Empty
bs = BeliefState()
assert not bs
# True set
bs_true = BeliefState(true={InternalBelief(name="a", arguments=None)})
assert bs_true
# False set
bs_false = BeliefState(false={InternalBelief(name="a", arguments=None)})
assert bs_false
@pytest.mark.asyncio
async def test_handle_beliefs_message_validation_error(agent, mock_settings):
# Invalid JSON
mock_settings.agent_settings.bdi_program_manager_name = "bdi_program_manager_agent"
msg = InternalMessage(
to="me",
sender=mock_settings.agent_settings.bdi_program_manager_name,
thread="beliefs",
body="invalid json",
)
# Should log warning and return
agent.logger = MagicMock()
await agent.handle_message(msg)
agent.logger.warning.assert_called()
# Invalid Model
msg.body = json.dumps({"beliefs": [{"invalid": "obj"}]})
await agent.handle_message(msg)
agent.logger.warning.assert_called()
@pytest.mark.asyncio
async def test_handle_goals_message_validation_error(agent, mock_settings):
mock_settings.agent_settings.bdi_program_manager_name = "bdi_program_manager_agent"
msg = InternalMessage(
to="me",
sender=mock_settings.agent_settings.bdi_program_manager_name,
thread="goals",
body="invalid json",
)
agent.logger = MagicMock()
await agent.handle_message(msg)
agent.logger.warning.assert_called()
@pytest.mark.asyncio
async def test_handle_goal_achieved_message_validation_error(agent, mock_settings):
mock_settings.agent_settings.bdi_program_manager_name = "bdi_program_manager_agent"
msg = InternalMessage(
to="me",
sender=mock_settings.agent_settings.bdi_program_manager_name,
thread="achieved_goals",
body="invalid json",
)
agent.logger = MagicMock()
await agent.handle_message(msg)
agent.logger.warning.assert_called()
@pytest.mark.asyncio
async def test_goal_inferrer_infer_from_conversation(agent, llm):
# Setup goals
# Use BaseGoal object as typically received by the extractor
g1 = BaseGoal(id=uuid.uuid4(), name="g1", description="desc", can_fail=True)
# Use real GoalAchievementInferrer
from control_backend.agents.bdi.text_belief_extractor_agent import GoalAchievementInferrer
inferrer = GoalAchievementInferrer(llm)
inferrer.goals = {g1}
# Mock LLM response
llm._query_llm.return_value = True
completions = await inferrer.infer_from_conversation(ChatHistory(messages=[]))
assert completions
# slugify uses slugify library, hard to predict exact string without it,
# but we can check values
assert list(completions.values())[0] is True
def test_apply_conversation_message_limit(agent):
with patch("control_backend.agents.bdi.text_belief_extractor_agent.settings") as mock_s:
mock_s.behaviour_settings.conversation_history_length_limit = 2
agent.conversation.messages = []
agent._apply_conversation_message(ChatMessage(role="user", content="1"))
agent._apply_conversation_message(ChatMessage(role="assistant", content="2"))
agent._apply_conversation_message(ChatMessage(role="user", content="3"))
assert len(agent.conversation.messages) == 2
assert agent.conversation.messages[0].content == "2"
assert agent.conversation.messages[1].content == "3"
@pytest.mark.asyncio
async def test_handle_program_manager_reset(agent):
with patch("control_backend.agents.bdi.text_belief_extractor_agent.settings") as mock_s:
mock_s.agent_settings.bdi_program_manager_name = "pm"
agent.conversation.messages = [ChatMessage(role="user", content="hi")]
agent.belief_inferrer.available_beliefs = [
SemanticBelief(id=uuid.uuid4(), name="b", description="d")
]
msg = InternalMessage(to="me", sender="pm", thread="conversation_history", body="reset")
await agent.handle_message(msg)
assert len(agent.conversation.messages) == 0
assert len(agent.belief_inferrer.available_beliefs) == 0
def test_split_into_chunks():
from control_backend.agents.bdi.text_belief_extractor_agent import SemanticBeliefInferrer
items = [1, 2, 3, 4, 5]
chunks = SemanticBeliefInferrer._split_into_chunks(items, 2)
assert len(chunks) == 2
assert len(chunks[0]) + len(chunks[1]) == 5
@pytest.mark.asyncio
async def test_infer_beliefs_call(agent, llm):
from control_backend.agents.bdi.text_belief_extractor_agent import SemanticBeliefInferrer
inferrer = SemanticBeliefInferrer(llm)
sb = SemanticBelief(id=uuid.uuid4(), name="is_happy", description="User is happy")
llm.query = AsyncMock(return_value={"is_happy": True})
res = await inferrer._infer_beliefs(ChatHistory(messages=[]), [sb])
assert res == {"is_happy": True}
llm.query.assert_called_once()
@pytest.mark.asyncio
async def test_infer_goal_call(agent, llm):
from control_backend.agents.bdi.text_belief_extractor_agent import GoalAchievementInferrer
inferrer = GoalAchievementInferrer(llm)
goal = BaseGoal(id=uuid.uuid4(), name="g1", description="d")
llm.query = AsyncMock(return_value=True)
res = await inferrer._infer_goal(ChatHistory(messages=[]), goal)
assert res is True
llm.query.assert_called_once()

View File

@@ -0,0 +1,101 @@
import json
from unittest.mock import AsyncMock, MagicMock
import pytest
from control_backend.agents.belief_collector.behaviours.continuous_collect import (
ContinuousBeliefCollector,
)
def create_mock_message(sender_node: str, body: str) -> MagicMock:
"""Helper function to create a configured mock message."""
msg = MagicMock()
msg.sender.node = sender_node # MagicMock automatically creates nested mocks
msg.body = body
return msg
@pytest.fixture
def mock_agent(mocker):
"""Fixture to create a mock Agent."""
agent = MagicMock()
agent.jid = "belief_collector_agent@test"
return agent
@pytest.fixture
def continuous_collector(mock_agent, mocker):
"""Fixture to create an instance of ContinuousBeliefCollector with a mocked agent."""
# Patch asyncio.sleep to prevent tests from actually waiting
mocker.patch("asyncio.sleep", return_value=None)
collector = ContinuousBeliefCollector()
collector.agent = mock_agent
# Mock the receive method, we will control its return value in each test
collector.receive = AsyncMock()
return collector
@pytest.mark.asyncio
async def test_run_message_received(continuous_collector, mocker):
"""
Test that when a message is received, _process_message is called with that message.
"""
# Arrange
mock_msg = MagicMock()
continuous_collector.receive.return_value = mock_msg
mocker.patch.object(continuous_collector, "_process_message")
# Act
await continuous_collector.run()
# Assert
continuous_collector._process_message.assert_awaited_once_with(mock_msg)
@pytest.mark.asyncio
async def test_routes_to_handle_belief_text_by_type(continuous_collector, mocker):
msg = create_mock_message(
"anyone",
json.dumps({"type": "belief_extraction_text", "beliefs": {"user_said": [["hi"]]}}),
)
spy = mocker.patch.object(continuous_collector, "_handle_belief_text", new=AsyncMock())
await continuous_collector._process_message(msg)
spy.assert_awaited_once()
@pytest.mark.asyncio
async def test_routes_to_handle_belief_text_by_sender(continuous_collector, mocker):
msg = create_mock_message(
"belief_text_agent_mock", json.dumps({"beliefs": {"user_said": [["hi"]]}})
)
spy = mocker.patch.object(continuous_collector, "_handle_belief_text", new=AsyncMock())
await continuous_collector._process_message(msg)
spy.assert_awaited_once()
@pytest.mark.asyncio
async def test_routes_to_handle_emo_text(continuous_collector, mocker):
msg = create_mock_message("anyone", json.dumps({"type": "emotion_extraction_text"}))
spy = mocker.patch.object(continuous_collector, "_handle_emo_text", new=AsyncMock())
await continuous_collector._process_message(msg)
spy.assert_awaited_once()
@pytest.mark.asyncio
async def test_belief_text_happy_path_sends(continuous_collector, mocker):
payload = {"type": "belief_extraction_text", "beliefs": {"user_said": ["hello test", "No"]}}
continuous_collector.send = AsyncMock()
await continuous_collector._handle_belief_text(payload, "belief_text_agent_mock")
# make sure we attempted a send
continuous_collector.send.assert_awaited_once()
@pytest.mark.asyncio
async def test_belief_text_coerces_non_strings(continuous_collector, mocker):
payload = {"type": "belief_extraction_text", "beliefs": {"user_said": [["hi", 123]]}}
continuous_collector.send = AsyncMock()
await continuous_collector._handle_belief_text(payload, "origin")
continuous_collector.send.assert_awaited_once()

View File

@@ -0,0 +1,187 @@
import json
from unittest.mock import AsyncMock, MagicMock, patch
import pytest
from spade.message import Message
from control_backend.agents.bdi.behaviours.text_belief_extractor import BeliefFromText
@pytest.fixture
def mock_settings():
"""
Mocks the settings object that the behaviour imports.
We patch it at the source where it's imported by the module under test.
"""
# Create a mock object that mimics the nested structure
settings_mock = MagicMock()
settings_mock.agent_settings.transcription_agent_name = "transcriber"
settings_mock.agent_settings.belief_collector_agent_name = "collector"
settings_mock.agent_settings.host = "fake.host"
# Use patch to replace the settings object during the test
# Adjust 'control_backend.behaviours.belief_from_text.settings' to where
# your behaviour file imports it from.
with patch(
"control_backend.agents.bdi.behaviours.text_belief_extractor.settings", settings_mock
):
yield settings_mock
@pytest.fixture
def behavior(mock_settings):
"""
Creates an instance of the BeliefFromText behaviour and mocks its
agent, logger, send, and receive methods.
"""
b = BeliefFromText()
b.agent = MagicMock()
b.send = AsyncMock()
b.receive = AsyncMock()
return b
def create_mock_message(sender_node: str, body: str, thread: str) -> MagicMock:
"""Helper function to create a configured mock message."""
msg = MagicMock()
msg.sender.node = sender_node # MagicMock automatically creates nested mocks
msg.body = body
msg.thread = thread
return msg
@pytest.mark.asyncio
async def test_run_no_message(behavior):
"""
Tests the run() method when no message is received.
"""
# Arrange: Configure receive to return None
behavior.receive.return_value = None
# Act: Run the behavior
await behavior.run()
# Assert
# 1. Check that receive was called
behavior.receive.assert_called_once()
# 2. Check that no message was sent
behavior.send.assert_not_called()
@pytest.mark.asyncio
async def test_run_message_from_other_agent(behavior):
"""
Tests the run() method when a message is received from an
unknown agent (not the transcriber).
"""
# Arrange: Create a mock message from an unknown sender
mock_msg = create_mock_message("unknown", "some data", None)
behavior.receive.return_value = mock_msg
behavior._process_transcription_demo = MagicMock()
# Act
await behavior.run()
# Assert
# 1. Check that receive was called
behavior.receive.assert_called_once()
# 2. Check that _process_transcription_demo was not sent
behavior._process_transcription_demo.assert_not_called()
@pytest.mark.asyncio
async def test_run_message_from_transcriber_demo(behavior, mock_settings, monkeypatch):
"""
Tests the main success path: receiving a message from the
transcription agent, which triggers _process_transcription_demo.
"""
# Arrange: Create a mock message from the transcriber
transcription_text = "hello world"
mock_msg = create_mock_message(
mock_settings.agent_settings.transcription_agent_name, transcription_text, None
)
behavior.receive.return_value = mock_msg
# Act
await behavior.run()
# Assert
# 1. Check that receive was called
behavior.receive.assert_called_once()
# 2. Check that send was called *once*
behavior.send.assert_called_once()
# 3. Deeply inspect the message that was sent
sent_msg: Message = behavior.send.call_args[0][0]
assert (
sent_msg.to
== mock_settings.agent_settings.belief_collector_agent_name
+ "@"
+ mock_settings.agent_settings.host
)
# Check thread
assert sent_msg.thread == "beliefs"
# Parse the received JSON string back into a dict
expected_dict = {
"beliefs": {"user_said": [transcription_text]},
"type": "belief_extraction_text",
}
sent_dict = json.loads(sent_msg.body)
# Assert that the dictionaries are equal
assert sent_dict == expected_dict
@pytest.mark.asyncio
async def test_process_transcription_success(behavior, mock_settings):
"""
Tests the (currently unused) _process_transcription method's
success path, using its hardcoded mock response.
"""
# Arrange
test_text = "I am feeling happy"
# This is the hardcoded response inside the method
expected_response_body = '{"mood": [["happy"]]}'
# Act
await behavior._process_transcription(test_text)
# Assert
# 1. Check that a message was sent
behavior.send.assert_called_once()
# 2. Inspect the sent message
sent_msg: Message = behavior.send.call_args[0][0]
expected_to = (
mock_settings.agent_settings.belief_collector_agent_name
+ "@"
+ mock_settings.agent_settings.host
)
assert str(sent_msg.to) == expected_to
assert sent_msg.thread == "beliefs"
assert sent_msg.body == expected_response_body
@pytest.mark.asyncio
async def test_process_transcription_json_decode_error(behavior, mock_settings):
"""
Tests the _process_transcription method's error handling
when the (mocked) response is invalid JSON.
We do this by patching json.loads to raise an error.
"""
# Arrange
test_text = "I am feeling happy"
# Patch json.loads to raise an error when called
with patch("json.loads", side_effect=json.JSONDecodeError("Mock error", "", 0)):
# Act
await behavior._process_transcription(test_text)
# Assert
# 1. Check that NO message was sent
behavior.send.assert_not_called()

View File

@@ -1,433 +0,0 @@
import asyncio
from unittest.mock import ANY, AsyncMock, MagicMock, patch
import pytest
from control_backend.agents.communication.ri_communication_agent import RICommunicationAgent
from control_backend.core.agent_system import InternalMessage
from control_backend.schemas.ri_message import PauseCommand, RIEndpoint
def speech_agent_path():
return "control_backend.agents.communication.ri_communication_agent.RobotSpeechAgent"
def gesture_agent_path():
return "control_backend.agents.communication.ri_communication_agent.RobotGestureAgent"
@pytest.fixture
def zmq_context(mocker):
mock_context = mocker.patch(
"control_backend.agents.communication.ri_communication_agent.Context.instance"
)
mock_context.return_value = MagicMock()
return mock_context
def negotiation_message(
actuation_port: int = 5556,
bind_main: bool = False,
bind_actuation: bool = False,
main_port: int = 5555,
):
return {
"endpoint": "negotiate/ports",
"data": [
{"id": "main", "port": main_port, "bind": bind_main},
{"id": "actuation", "port": actuation_port, "bind": bind_actuation},
],
}
@pytest.mark.asyncio
async def test_setup_success_connects_and_starts_robot(zmq_context):
fake_socket = zmq_context.return_value.socket.return_value
fake_socket.send_json = AsyncMock()
fake_socket.recv_json = AsyncMock(return_value=negotiation_message())
fake_socket.send_multipart = AsyncMock()
with (
patch(speech_agent_path(), autospec=True) as MockSpeech,
patch(gesture_agent_path(), autospec=True) as MockGesture,
):
MockSpeech.return_value.start = AsyncMock()
MockGesture.return_value.start = AsyncMock()
agent = RICommunicationAgent("ri_comm", address="tcp://localhost:5555", bind=False)
def close_coro(coro):
coro.close()
return MagicMock()
agent.add_behavior = MagicMock(side_effect=close_coro)
await agent.setup()
fake_socket.connect.assert_any_call("tcp://localhost:5555")
fake_socket.send_json.assert_any_call({"endpoint": "negotiate/ports", "data": {}})
MockSpeech.return_value.start.assert_awaited_once()
MockGesture.return_value.start.assert_awaited_once()
MockSpeech.assert_called_once_with(ANY, address="tcp://localhost:5556", bind=False)
MockGesture.assert_called_once_with(
ANY,
address="tcp://localhost:5556",
bind=False,
gesture_data=[],
single_gesture_data=[],
)
agent.add_behavior.assert_called_once()
assert agent.connected is True
@pytest.mark.asyncio
async def test_setup_binds_when_requested(zmq_context):
fake_socket = zmq_context.return_value.socket.return_value
fake_socket.send_json = AsyncMock()
fake_socket.recv_json = AsyncMock(return_value=negotiation_message(bind_main=True))
fake_socket.send_multipart = AsyncMock()
agent = RICommunicationAgent("ri_comm", address="tcp://localhost:5555", bind=True)
def close_coro(coro):
coro.close()
return MagicMock()
agent.add_behavior = MagicMock(side_effect=close_coro)
with (
patch(speech_agent_path(), autospec=True) as MockSpeech,
patch(gesture_agent_path(), autospec=True) as MockGesture,
):
MockSpeech.return_value.start = AsyncMock()
MockGesture.return_value.start = AsyncMock()
await agent.setup()
fake_socket.bind.assert_any_call("tcp://localhost:5555")
agent.add_behavior.assert_called_once()
@pytest.mark.asyncio
async def test_negotiate_invalid_endpoint_retries(zmq_context):
fake_socket = zmq_context.return_value.socket.return_value
fake_socket.send_json = AsyncMock()
fake_socket.recv_json = AsyncMock(return_value={"endpoint": "ping", "data": {}})
fake_socket.send_multipart = AsyncMock()
agent = RICommunicationAgent("ri_comm", address="tcp://localhost:5555", bind=False)
agent._req_socket = fake_socket
success = await agent._negotiate_connection(max_retries=1)
assert success is False
@pytest.mark.asyncio
async def test_negotiate_timeout(zmq_context):
fake_socket = zmq_context.return_value.socket.return_value
fake_socket.send_json = AsyncMock()
fake_socket.recv_json = AsyncMock(side_effect=asyncio.TimeoutError)
fake_socket.send_multipart = AsyncMock()
agent = RICommunicationAgent("ri_comm", address="tcp://localhost:5555", bind=False)
agent._req_socket = fake_socket
success = await agent._negotiate_connection(max_retries=1)
assert success is False
@pytest.mark.asyncio
async def test_handle_negotiation_response_updates_req_socket(zmq_context):
fake_socket = zmq_context.return_value.socket.return_value
agent = RICommunicationAgent("ri_comm", address="tcp://localhost:5555", bind=False)
agent._req_socket = fake_socket
with (
patch(speech_agent_path(), autospec=True) as MockSpeech,
patch(gesture_agent_path(), autospec=True) as MockGesture,
):
MockSpeech.return_value.start = AsyncMock()
MockGesture.return_value.start = AsyncMock()
await agent._handle_negotiation_response(
negotiation_message(
main_port=6000,
actuation_port=6001,
bind_main=False,
bind_actuation=False,
)
)
fake_socket.connect.assert_any_call("tcp://localhost:6000")
@pytest.mark.asyncio
async def test_handle_disconnection_publishes_and_reconnects():
pub_socket = AsyncMock()
pub_socket.close = MagicMock()
agent = RICommunicationAgent("ri_comm")
agent.pub_socket = pub_socket
agent.connected = True
agent._negotiate_connection = AsyncMock(return_value=True)
await agent._handle_disconnection()
pub_socket.send_multipart.assert_awaited()
assert agent.connected is True
@pytest.mark.asyncio
async def test_listen_loop_handles_non_ping(zmq_context):
fake_socket = zmq_context.return_value.socket.return_value
fake_socket.send_json = AsyncMock()
async def recv_once():
agent._running = False
return {"endpoint": "negotiate/ports", "data": {}}
fake_socket.recv_json = recv_once
agent = RICommunicationAgent("ri_comm")
agent._req_socket = fake_socket
agent.pub_socket = AsyncMock()
agent.connected = True
agent._running = True
await agent._listen_loop()
fake_socket.send_json.assert_called()
@pytest.mark.asyncio
async def test_negotiate_unexpected_error(zmq_context):
fake_socket = zmq_context.return_value.socket.return_value
fake_socket.send_json = AsyncMock()
fake_socket.recv_json = AsyncMock(side_effect=Exception("boom"))
agent = RICommunicationAgent("ri_comm")
agent._req_socket = fake_socket
assert await agent._negotiate_connection(max_retries=1) is False
@pytest.mark.asyncio
async def test_negotiate_handle_response_error(zmq_context):
fake_socket = zmq_context.return_value.socket.return_value
fake_socket.send_json = AsyncMock()
fake_socket.recv_json = AsyncMock(return_value=negotiation_message())
agent = RICommunicationAgent("ri_comm")
agent._req_socket = fake_socket
agent._handle_negotiation_response = AsyncMock(side_effect=Exception("bad response"))
assert await agent._negotiate_connection(max_retries=1) is False
@pytest.mark.asyncio
async def test_setup_warns_on_failed_negotiate(zmq_context, mocker):
fake_socket = zmq_context.return_value.socket.return_value
fake_socket.send_json = AsyncMock()
fake_socket.recv_json = AsyncMock()
agent = RICommunicationAgent("ri_comm")
def swallow(coro):
coro.close()
agent.add_behavior = swallow
agent._negotiate_connection = AsyncMock(return_value=False)
await agent.setup()
assert agent.connected is False
@pytest.mark.asyncio
async def test_handle_negotiation_response_unhandled_id():
agent = RICommunicationAgent("ri_comm")
await agent._handle_negotiation_response(
{"data": [{"id": "other", "port": 5000, "bind": False}]}
)
@pytest.mark.asyncio
async def test_handle_negotiation_response_audio(zmq_context):
agent = RICommunicationAgent("ri_comm")
with patch(
"control_backend.agents.communication.ri_communication_agent.VADAgent", autospec=True
) as MockVAD:
MockVAD.return_value.start = AsyncMock()
await agent._handle_negotiation_response(
{"data": [{"id": "audio", "port": 7000, "bind": False}]}
)
MockVAD.assert_called_once_with(
audio_in_address="tcp://localhost:7000", audio_in_bind=False
)
MockVAD.return_value.start.assert_awaited_once()
@pytest.mark.asyncio
async def test_stop_closes_sockets():
req = MagicMock()
pub = MagicMock()
agent = RICommunicationAgent("ri_comm")
agent._req_socket = req
agent.pub_socket = pub
await agent.stop()
req.close.assert_called_once()
pub.close.assert_called_once()
@pytest.mark.asyncio
async def test_listen_loop_not_connected(monkeypatch):
agent = RICommunicationAgent("ri_comm")
agent._running = True
agent.connected = False
agent._req_socket = AsyncMock()
async def fake_sleep(duration):
agent._running = False
monkeypatch.setattr("asyncio.sleep", fake_sleep)
await agent._listen_loop()
@pytest.mark.asyncio
async def test_listen_loop_send_and_recv_timeout():
req = AsyncMock()
req.send_json = AsyncMock(side_effect=TimeoutError)
req.recv_json = AsyncMock(side_effect=TimeoutError)
agent = RICommunicationAgent("ri_comm")
agent._req_socket = req
agent.pub_socket = AsyncMock()
agent.connected = True
agent._running = True
async def stop_run():
agent._running = False
agent._handle_disconnection = AsyncMock(side_effect=stop_run)
await agent._listen_loop()
agent._handle_disconnection.assert_awaited()
@pytest.mark.asyncio
async def test_listen_loop_missing_endpoint(monkeypatch):
req = AsyncMock()
req.send_json = AsyncMock()
async def recv_once():
agent._running = False
return {"data": {}}
req.recv_json = recv_once
agent = RICommunicationAgent("ri_comm")
agent._req_socket = req
agent.pub_socket = AsyncMock()
agent.connected = True
agent._running = True
await agent._listen_loop()
@pytest.mark.asyncio
async def test_listen_loop_generic_exception():
req = AsyncMock()
req.send_json = AsyncMock()
req.recv_json = AsyncMock(side_effect=ValueError("boom"))
agent = RICommunicationAgent("ri_comm")
agent._req_socket = req
agent.pub_socket = AsyncMock()
agent.connected = True
agent._running = True
with pytest.raises(ValueError):
await agent._listen_loop()
@pytest.mark.asyncio
async def test_handle_disconnection_timeout(monkeypatch):
pub = AsyncMock()
pub.close = MagicMock()
pub.send_multipart = AsyncMock(side_effect=TimeoutError)
agent = RICommunicationAgent("ri_comm")
agent.pub_socket = pub
agent._negotiate_connection = AsyncMock(return_value=False)
await agent._handle_disconnection()
pub.send_multipart.assert_awaited()
@pytest.mark.asyncio
async def test_listen_loop_ping_sends_internal(zmq_context):
fake_socket = zmq_context.return_value.socket.return_value
fake_socket.send_json = AsyncMock()
pub_socket = AsyncMock()
agent = RICommunicationAgent("ri_comm")
agent._req_socket = fake_socket
agent.pub_socket = pub_socket
agent.connected = True
agent._running = True
async def recv_once():
agent._running = False
return {"endpoint": "ping", "data": {}}
fake_socket.recv_json = recv_once
await agent._listen_loop()
pub_socket.send_multipart.assert_awaited()
@pytest.mark.asyncio
async def test_negotiate_req_socket_none_causes_retry(zmq_context):
agent = RICommunicationAgent("ri_comm")
agent._req_socket = None
result = await agent._negotiate_connection(max_retries=1)
assert result is False
@pytest.mark.asyncio
async def test_handle_message_pause_command(zmq_context):
"""Test handle_message with a valid PauseCommand."""
agent = RICommunicationAgent("ri_comm")
agent._req_socket = AsyncMock()
agent.logger = MagicMock()
agent._req_socket.recv_json.return_value = {"status": "ok"}
pause_cmd = PauseCommand(data=True)
msg = InternalMessage(to="ri_comm", sender="user_int", body=pause_cmd.model_dump_json())
await agent.handle_message(msg)
agent._req_socket.send_json.assert_awaited_once()
args = agent._req_socket.send_json.await_args[0][0]
assert args["endpoint"] == RIEndpoint.PAUSE.value
assert args["data"] is True
@pytest.mark.asyncio
async def test_handle_message_invalid_pause_command(zmq_context):
"""Test handle_message with invalid JSON."""
agent = RICommunicationAgent("ri_comm")
agent._req_socket = AsyncMock()
agent.logger = MagicMock()
msg = InternalMessage(to="ri_comm", sender="user_int", body="invalid json")
await agent.handle_message(msg)
agent.logger.warning.assert_called_with("Incorrect message format for PauseCommand.")
agent._req_socket.send_json.assert_not_called()

View File

@@ -1,324 +0,0 @@
"""Mocks `httpx` and tests chunking logic."""
from unittest.mock import AsyncMock, MagicMock, patch
import httpx
import pytest
from control_backend.agents.llm.llm_agent import LLMAgent, LLMInstructions
from control_backend.core.agent_system import InternalMessage
from control_backend.schemas.llm_prompt_message import LLMPromptMessage
@pytest.fixture
def mock_httpx_client():
with patch("httpx.AsyncClient") as mock_cls:
mock_client = AsyncMock()
mock_cls.return_value.__aenter__.return_value = mock_client
yield mock_client
@pytest.mark.asyncio
async def test_llm_processing_success(mock_httpx_client, mock_settings):
# Setup the mock response for the stream
mock_response = MagicMock()
mock_response.raise_for_status = MagicMock()
# Simulate stream lines
lines = [
b'data: {"choices": [{"delta": {"content": "Hello"}}]}',
b'data: {"choices": [{"delta": {"content": " world"}}]}',
b'data: {"choices": [{"delta": {"content": "."}}]}',
b"data: [DONE]",
]
async def aiter_lines_gen():
for line in lines:
yield line.decode()
mock_response.aiter_lines.side_effect = aiter_lines_gen
mock_stream_context = MagicMock()
mock_stream_context.__aenter__ = AsyncMock(return_value=mock_response)
mock_stream_context.__aexit__ = AsyncMock(return_value=None)
# Configure the client
mock_httpx_client.stream = MagicMock(return_value=mock_stream_context)
# Setup Agent
agent = LLMAgent("llm_agent")
agent.send = AsyncMock() # Mock the send method to verify replies
mock_logger = MagicMock()
agent.logger = mock_logger
# Simulate receiving a message from BDI
prompt = LLMPromptMessage(text="Hi", norms=[], goals=[])
msg = InternalMessage(
to="llm_agent",
sender=mock_settings.agent_settings.bdi_core_name,
body=prompt.model_dump_json(),
thread="prompt_message", # REQUIRED: thread must match handle_message logic
)
await agent.handle_message(msg)
# Verification
# "Hello world." constitutes one sentence/chunk based on punctuation split
# The agent should call send once with the full sentence, PLUS once more for full reply
assert agent.send.called
# Check args. We expect at least one call sending "Hello world."
calls = agent.send.call_args_list
bodies = [c[0][0].body for c in calls]
assert any("Hello world." in b for b in bodies)
@pytest.mark.asyncio
async def test_llm_processing_errors(mock_httpx_client, mock_settings):
agent = LLMAgent("llm_agent")
agent.send = AsyncMock()
prompt = LLMPromptMessage(text="Hi", norms=[], goals=[])
msg = InternalMessage(
to="llm",
sender=mock_settings.agent_settings.bdi_core_name,
body=prompt.model_dump_json(),
thread="prompt_message",
)
# HTTP Error: stream method RAISES exception immediately
mock_httpx_client.stream = MagicMock(side_effect=httpx.HTTPError("Fail"))
await agent.handle_message(msg)
# Check that error message was sent
assert agent.send.called
assert "LLM service unavailable." in agent.send.call_args_list[0][0][0].body
# General Exception
agent.send.reset_mock()
mock_httpx_client.stream = MagicMock(side_effect=Exception("Boom"))
await agent.handle_message(msg)
assert "Error processing the request." in agent.send.call_args_list[0][0][0].body
@pytest.mark.asyncio
async def test_llm_json_error(mock_httpx_client, mock_settings):
# Test malformed JSON in stream
mock_response = MagicMock()
mock_response.raise_for_status = MagicMock()
async def aiter_lines_gen():
yield "data: {bad_json"
yield "data: [DONE]"
mock_response.aiter_lines.side_effect = aiter_lines_gen
mock_stream_context = MagicMock()
mock_stream_context.__aenter__ = AsyncMock(return_value=mock_response)
mock_stream_context.__aexit__ = AsyncMock(return_value=None)
mock_httpx_client.stream = MagicMock(return_value=mock_stream_context)
agent = LLMAgent("llm_agent")
agent.send = AsyncMock()
# Ensure logger is mocked
agent.logger = MagicMock()
prompt = LLMPromptMessage(text="Hi", norms=[], goals=[])
msg = InternalMessage(
to="llm",
sender=mock_settings.agent_settings.bdi_core_name,
body=prompt.model_dump_json(),
thread="prompt_message",
)
await agent.handle_message(msg)
agent.logger.error.assert_called() # Should log JSONDecodeError
def test_llm_instructions():
# Full custom
instr = LLMInstructions(norms=["N1", "N2"], goals=["G1", "G2"])
text = instr.build_developer_instruction()
assert "Norms to follow:\n- N1\n- N2" in text
assert "Goals to reach:\n- G1\n- G2" in text
# Defaults
instr_def = LLMInstructions()
text_def = instr_def.build_developer_instruction()
assert "Norms to follow" in text_def
assert "Goals to reach" in text_def
@pytest.mark.asyncio
async def test_handle_message_validation_error_branch_no_send(mock_httpx_client, mock_settings):
"""
Covers the ValidationError branch:
except ValidationError:
self.logger.debug("Prompt message from BDI core is invalid.")
Assert: no message is sent.
"""
agent = LLMAgent("llm_agent")
agent.send = AsyncMock()
# Invalid JSON that triggers ValidationError in LLMPromptMessage
invalid_json = '{"text": "Hi", "wrong_field": 123}' # field not in schema
msg = InternalMessage(
to="llm_agent",
sender=mock_settings.agent_settings.bdi_core_name,
body=invalid_json,
thread="prompt_message",
)
await agent.handle_message(msg)
# Should not send any reply
agent.send.assert_not_called()
@pytest.mark.asyncio
async def test_handle_message_ignored_sender_branch_no_send(mock_httpx_client, mock_settings):
"""
Covers the else branch for messages not from BDI core:
else:
self.logger.debug("Message ignored (not from BDI core.")
Assert: no message is sent.
"""
agent = LLMAgent("llm_agent")
agent.send = AsyncMock()
msg = InternalMessage(
to="llm_agent",
sender="some_other_agent", # Not BDI core
body='{"text": "Hi"}',
)
await agent.handle_message(msg)
# Should not send any reply
agent.send.assert_not_called()
@pytest.mark.asyncio
async def test_query_llm_yields_final_tail_chunk(mock_settings):
"""
Covers the branch: if current_chunk: yield current_chunk
Ensure that the last partial chunk is emitted.
"""
agent = LLMAgent("llm_agent")
agent.send = AsyncMock()
agent.logger = MagicMock()
agent.logger.llm = MagicMock()
# Patch _stream_query_llm to yield tokens that do NOT end with punctuation
async def fake_stream(messages):
yield "Hello"
yield " world" # No punctuation to trigger the normal chunking
agent._stream_query_llm = fake_stream
prompt = LLMPromptMessage(text="Hi", norms=[], goals=[])
# Collect chunks yielded
chunks = []
async for chunk in agent._query_llm(prompt.text, prompt.norms, prompt.goals):
chunks.append(chunk)
# The final chunk should be yielded
assert chunks[-1] == "Hello world"
assert any("Hello" in c for c in chunks)
@pytest.mark.asyncio
async def test_stream_query_llm_skips_non_data_lines(mock_httpx_client, mock_settings):
"""
Covers: if not line or not line.startswith("data: "): continue
Feed lines that are empty or do not start with 'data:' and check they are skipped.
"""
# Mock response
mock_response = MagicMock()
mock_response.raise_for_status = MagicMock()
lines = [
"", # empty line
"not data", # invalid prefix
'data: {"choices": [{"delta": {"content": "Hi"}}]}',
"data: [DONE]",
]
async def aiter_lines_gen():
for line in lines:
yield line
mock_response.aiter_lines.side_effect = aiter_lines_gen
# Proper async context manager for stream
mock_stream_context = MagicMock()
mock_stream_context.__aenter__ = AsyncMock(return_value=mock_response)
mock_stream_context.__aexit__ = AsyncMock(return_value=None)
# Make stream return the async context manager
mock_httpx_client.stream = MagicMock(return_value=mock_stream_context)
agent = LLMAgent("llm_agent")
agent.send = AsyncMock()
# Patch settings for local LLM URL
with patch("control_backend.agents.llm.llm_agent.settings") as mock_sett:
mock_sett.llm_settings.local_llm_url = "http://localhost"
mock_sett.llm_settings.local_llm_model = "test-model"
# Collect tokens
tokens = []
async for token in agent._stream_query_llm([]):
tokens.append(token)
# Only the valid 'data:' line should yield content
assert tokens == ["Hi"]
@pytest.mark.asyncio
async def test_clear_history_command(mock_settings):
"""Test that the 'clear_history' message clears the agent's memory."""
# setup LLM to have some history
mock_settings.agent_settings.bdi_program_manager_name = "bdi_program_manager_agent"
agent = LLMAgent("llm_agent")
agent.history = [
{"role": "user", "content": "Old conversation context"},
{"role": "assistant", "content": "Old response"},
]
assert len(agent.history) == 2
msg = InternalMessage(
to="llm_agent",
sender=mock_settings.agent_settings.bdi_program_manager_name,
body="clear_history",
)
await agent.handle_message(msg)
assert len(agent.history) == 0
@pytest.mark.asyncio
async def test_handle_assistant_and_user_messages(mock_settings):
agent = LLMAgent("llm_agent")
# Assistant message
msg_ast = InternalMessage(
to="llm_agent",
sender=mock_settings.agent_settings.bdi_core_name,
thread="assistant_message",
body="I said this",
)
await agent.handle_message(msg_ast)
assert agent.history[-1] == {"role": "assistant", "content": "I said this"}
# User message
msg_usr = InternalMessage(
to="llm_agent",
sender=mock_settings.agent_settings.bdi_core_name,
thread="user_message",
body="User said this",
)
await agent.handle_message(msg_usr)
assert agent.history[-1] == {"role": "user", "content": "User said this"}

View File

@@ -1,217 +0,0 @@
import asyncio
from unittest.mock import AsyncMock, MagicMock, patch
import numpy as np
import pytest
from control_backend.agents.perception.transcription_agent.speech_recognizer import (
MLXWhisperSpeechRecognizer,
OpenAIWhisperSpeechRecognizer,
SpeechRecognizer,
)
from control_backend.agents.perception.transcription_agent.transcription_agent import (
TranscriptionAgent,
)
@pytest.mark.asyncio
async def test_transcription_agent_flow(mock_zmq_context):
mock_sub = MagicMock()
mock_sub.recv = AsyncMock()
# Setup context to return this specific mock socket
mock_zmq_context.instance.return_value.socket.return_value = mock_sub
# Data: [Audio Bytes, Cancel Loop]
fake_audio = np.zeros(16000, dtype=np.float32).tobytes()
mock_sub.recv.side_effect = [fake_audio, asyncio.CancelledError()]
# Mock Recognizer
with patch.object(SpeechRecognizer, "best_type") as mock_best:
mock_recognizer = MagicMock()
mock_recognizer.recognize_speech.return_value = "Hello"
mock_best.return_value = mock_recognizer
agent = TranscriptionAgent("tcp://in")
agent.send = AsyncMock()
agent._running = True
def close_coro(coro):
coro.close()
return MagicMock()
agent.add_behavior = MagicMock(side_effect=close_coro)
await agent.setup()
try:
await agent._transcribing_loop()
except asyncio.CancelledError:
pass
# Check transcription happened
assert mock_recognizer.recognize_speech.called
# Check sending
assert agent.send.called
assert agent.send.call_args[0][0].body == "Hello"
await agent.stop()
@pytest.mark.asyncio
async def test_transcription_empty(mock_zmq_context):
mock_sub = MagicMock()
mock_sub.recv = AsyncMock()
mock_zmq_context.instance.return_value.socket.return_value = mock_sub
# Return valid audio, but recognizer returns empty string
fake_audio = np.zeros(10, dtype=np.float32).tobytes()
mock_sub.recv.side_effect = [fake_audio, asyncio.CancelledError()]
with patch.object(SpeechRecognizer, "best_type") as mock_best:
mock_recognizer = MagicMock()
mock_recognizer.recognize_speech.return_value = ""
mock_best.return_value = mock_recognizer
agent = TranscriptionAgent("tcp://in")
agent.send = AsyncMock()
await agent.setup()
try:
await agent._transcribing_loop()
except asyncio.CancelledError:
pass
# Should NOT send message
agent.send.assert_not_called()
def test_speech_recognizer_factory():
# Test Factory Logic
with patch("torch.mps.is_available", return_value=True):
assert isinstance(SpeechRecognizer.best_type(), MLXWhisperSpeechRecognizer)
with patch("torch.mps.is_available", return_value=False):
assert isinstance(SpeechRecognizer.best_type(), OpenAIWhisperSpeechRecognizer)
def test_openai_recognizer():
with patch("whisper.load_model") as load_mock:
with patch("whisper.transcribe") as trans_mock:
rec = OpenAIWhisperSpeechRecognizer()
rec.load_model()
load_mock.assert_called()
trans_mock.return_value = {"text": "Hi"}
res = rec.recognize_speech(np.zeros(10))
assert res == "Hi"
def test_mlx_recognizer():
# Fix: On Linux, 'mlx_whisper' isn't imported by the module, so it's missing from dir().
# We must use create=True to inject it into the module namespace during the test.
module_path = "control_backend.agents.perception.transcription_agent.speech_recognizer"
with patch("sys.platform", "darwin"):
with patch(f"{module_path}.mlx_whisper", create=True) as mlx_mock:
with patch(f"{module_path}.ModelHolder", create=True) as holder_mock:
# We also need to mock mlx.core if it's used for types/constants
with patch(f"{module_path}.mx", create=True):
rec = MLXWhisperSpeechRecognizer()
rec.load_model()
holder_mock.get_model.assert_called()
mlx_mock.transcribe.return_value = {"text": "Hi"}
res = rec.recognize_speech(np.zeros(10))
assert res == "Hi"
@pytest.mark.asyncio
async def test_transcription_loop_continues_after_error(mock_zmq_context):
mock_sub = MagicMock()
mock_sub.recv = AsyncMock()
mock_zmq_context.instance.return_value.socket.return_value = mock_sub
fake_audio = np.zeros(16000, dtype=np.float32).tobytes()
mock_sub.recv.side_effect = [
fake_audio, # first iteration → recognizer fails
asyncio.CancelledError(), # second iteration → stop loop
]
with patch.object(SpeechRecognizer, "best_type") as mock_best:
mock_recognizer = MagicMock()
mock_recognizer.recognize_speech.side_effect = RuntimeError("fail")
mock_best.return_value = mock_recognizer
agent = TranscriptionAgent("tcp://in")
agent._running = True # ← REQUIRED to enter the loop
agent.send = AsyncMock() # should never be called
def close_coro(coro):
coro.close()
return MagicMock()
agent.add_behavior = MagicMock(side_effect=close_coro) # match other tests
await agent.setup()
try:
await agent._transcribing_loop()
except asyncio.CancelledError:
pass
# recognizer failed, so we should never send anything
agent.send.assert_not_called()
# recv must have been called twice (audio then CancelledError)
assert mock_sub.recv.call_count == 2
@pytest.mark.asyncio
async def test_transcription_continue_branch_when_empty(mock_zmq_context):
mock_sub = MagicMock()
mock_sub.recv = AsyncMock()
mock_zmq_context.instance.return_value.socket.return_value = mock_sub
# First recv → audio chunk
# Second recv → Cancel loop → stop iteration
fake_audio = np.zeros(16000, dtype=np.float32).tobytes()
mock_sub.recv.side_effect = [fake_audio, asyncio.CancelledError()]
with patch.object(SpeechRecognizer, "best_type") as mock_best:
mock_recognizer = MagicMock()
mock_recognizer.recognize_speech.return_value = "" # <— triggers the continue branch
mock_best.return_value = mock_recognizer
agent = TranscriptionAgent("tcp://in")
# Make loop runnable
agent._running = True
agent.send = AsyncMock()
def close_coro(coro):
coro.close()
return MagicMock()
agent.add_behavior = MagicMock(side_effect=close_coro)
await agent.setup()
# Execute loop manually
try:
await agent._transcribing_loop()
except asyncio.CancelledError:
pass
# → Because of "continue", NO sending should occur
agent.send.assert_not_called()
# → Continue was hit, so we must have read exactly 2 times:
# - first audio
# - second CancelledError
assert mock_sub.recv.call_count == 2
# → recognizer was called once (first iteration)
assert mock_recognizer.recognize_speech.call_count == 1

View File

@@ -1,152 +0,0 @@
from unittest.mock import AsyncMock, MagicMock, patch
import pytest
from control_backend.agents.perception.vad_agent import VADAgent
from control_backend.core.agent_system import InternalMessage
from control_backend.schemas.program_status import PROGRAM_STATUS, ProgramStatus
@pytest.fixture(autouse=True)
def mock_zmq():
with patch("zmq.asyncio.Context") as mock:
mock.instance.return_value = MagicMock()
yield mock
@pytest.fixture
def agent():
return VADAgent("tcp://localhost:5555", False)
@pytest.mark.asyncio
async def test_handle_message_pause(agent):
agent._paused = MagicMock()
# It starts set (not paused)
msg = InternalMessage(to="vad", sender="user_interrupt_agent", body="PAUSE")
# We need to mock settings to match sender name
with patch("control_backend.agents.perception.vad_agent.settings") as mock_settings:
mock_settings.agent_settings.user_interrupt_name = "user_interrupt_agent"
await agent.handle_message(msg)
agent._paused.clear.assert_called_once()
assert agent._reset_needed is True
@pytest.mark.asyncio
async def test_handle_message_resume(agent):
agent._paused = MagicMock()
msg = InternalMessage(to="vad", sender="user_interrupt_agent", body="RESUME")
with patch("control_backend.agents.perception.vad_agent.settings") as mock_settings:
mock_settings.agent_settings.user_interrupt_name = "user_interrupt_agent"
await agent.handle_message(msg)
agent._paused.set.assert_called_once()
@pytest.mark.asyncio
async def test_handle_message_unknown_command(agent):
agent._paused = MagicMock()
msg = InternalMessage(to="vad", sender="user_interrupt_agent", body="UNKNOWN")
with patch("control_backend.agents.perception.vad_agent.settings") as mock_settings:
mock_settings.agent_settings.user_interrupt_name = "user_interrupt_agent"
agent.logger = MagicMock()
await agent.handle_message(msg)
agent._paused.clear.assert_not_called()
agent._paused.set.assert_not_called()
@pytest.mark.asyncio
async def test_handle_message_unknown_sender(agent):
agent._paused = MagicMock()
msg = InternalMessage(to="vad", sender="other_agent", body="PAUSE")
with patch("control_backend.agents.perception.vad_agent.settings") as mock_settings:
mock_settings.agent_settings.user_interrupt_name = "user_interrupt_agent"
await agent.handle_message(msg)
agent._paused.clear.assert_not_called()
@pytest.mark.asyncio
async def test_status_loop_waits_for_running(agent):
agent._running = True
agent.program_sub_socket = AsyncMock()
agent.program_sub_socket.close = MagicMock()
agent._reset_stream = AsyncMock()
# Sequence of messages:
# 1. Wrong topic
# 2. Right topic, wrong status (STARTING)
# 3. Right topic, RUNNING -> Should break loop
agent.program_sub_socket.recv_multipart.side_effect = [
(b"wrong_topic", b"whatever"),
(PROGRAM_STATUS, ProgramStatus.STARTING.value),
(PROGRAM_STATUS, ProgramStatus.RUNNING.value),
]
await agent._status_loop()
assert agent._reset_stream.await_count == 1
agent.program_sub_socket.close.assert_called_once()
@pytest.mark.asyncio
async def test_setup_success(agent, mock_zmq):
def close_coro(coro):
coro.close()
return MagicMock()
agent.add_behavior = MagicMock(side_effect=close_coro)
mock_context = mock_zmq.instance.return_value
mock_sub = MagicMock()
mock_pub = MagicMock()
# We expect multiple socket calls:
# 1. audio_in (SUB)
# 2. audio_out (PUB)
# 3. program_sub (SUB)
mock_context.socket.side_effect = [mock_sub, mock_pub, mock_sub]
with patch("control_backend.agents.perception.vad_agent.torch.hub.load") as mock_load:
mock_load.return_value = (MagicMock(), None)
with patch("control_backend.agents.perception.vad_agent.TranscriptionAgent") as MockTrans:
mock_trans_instance = MockTrans.return_value
mock_trans_instance.start = AsyncMock()
await agent.setup()
mock_trans_instance.start.assert_awaited_once()
assert agent.add_behavior.call_count == 2 # streaming_loop + status_loop
assert agent.audio_in_socket is not None
assert agent.audio_out_socket is not None
assert agent.program_sub_socket is not None
@pytest.mark.asyncio
async def test_reset_stream(agent):
mock_poller = MagicMock()
agent.audio_in_poller = mock_poller
# poll(1) returns not None twice, then None
mock_poller.poll = AsyncMock(side_effect=[b"data", b"data", None])
agent._ready = MagicMock()
await agent._reset_stream()
assert mock_poller.poll.await_count == 3
agent._ready.set.assert_called_once()

View File

@@ -1,223 +0,0 @@
from unittest.mock import AsyncMock, MagicMock, patch
import numpy as np
import pytest
import zmq
from control_backend.agents.perception.vad_agent import VADAgent
from control_backend.core.config import settings
# We don't want to use real ZMQ in unit tests, for example because it can give errors when sockets
# aren't closed properly.
@pytest.fixture(autouse=True)
def mock_zmq():
with patch("zmq.asyncio.Context") as mock:
mock.instance.return_value = MagicMock()
yield mock
@pytest.fixture
def audio_out_socket():
return AsyncMock()
@pytest.fixture
def vad_agent(audio_out_socket):
return VADAgent("tcp://localhost:5555", False)
@pytest.fixture(autouse=True)
def patch_settings(monkeypatch):
# Patch the settings that vad_agent.run() reads
from control_backend.agents.perception import vad_agent
monkeypatch.setattr(
vad_agent.settings.behaviour_settings, "vad_prob_threshold", 0.5, raising=False
)
monkeypatch.setattr(
vad_agent.settings.behaviour_settings, "vad_non_speech_patience_chunks", 2, raising=False
)
monkeypatch.setattr(
vad_agent.settings.behaviour_settings, "vad_initial_since_speech", 0, raising=False
)
monkeypatch.setattr(vad_agent.settings.vad_settings, "sample_rate_hz", 16_000, raising=False)
async def simulate_streaming_with_probabilities(streaming, probabilities: list[float]):
"""
Simulates a streaming scenario with given VAD model probabilities for testing purposes.
:param streaming: The streaming component to be tested.
:param probabilities: A list of probabilities representing the outputs of the VAD model.
"""
model_item = MagicMock()
model_item.item.side_effect = probabilities
streaming.model = MagicMock(return_value=model_item)
# Prepare deterministic audio chunks and a poller that stops the loop when exhausted
chunk_bytes = np.empty(shape=512, dtype=np.float32).tobytes()
chunks = [chunk_bytes for _ in probabilities]
class DummyPoller:
def __init__(self, data, agent):
self.data = data
self.agent = agent
async def poll(self, timeout_ms=None):
if self.data:
return self.data.pop(0)
# Stop the loop cleanly once we've consumed all chunks
self.agent._running = False
return None
streaming.audio_in_poller = DummyPoller(chunks, streaming)
streaming._ready = AsyncMock()
streaming._running = True
await streaming._streaming_loop()
@pytest.mark.asyncio
async def test_voice_activity_detected(audio_out_socket, vad_agent):
"""
Test a scenario where there is voice activity detected between silences.
"""
speech_chunk_count = 5
probabilities = [0.0] * 5 + [1.0] * speech_chunk_count + [0.0] * 5
vad_agent.audio_out_socket = audio_out_socket
await simulate_streaming_with_probabilities(vad_agent, probabilities)
audio_out_socket.send.assert_called_once()
data = audio_out_socket.send.call_args[0][0]
assert isinstance(data, bytes)
assert len(data) == 512 * 4 * (speech_chunk_count + 1)
@pytest.mark.asyncio
async def test_voice_activity_short_pause(audio_out_socket, vad_agent):
"""
Test a scenario where there is a short pause between speech, checking whether it ignores the
short pause.
"""
speech_chunk_count = 5
probabilities = (
[0.0] * 5 + [1.0] * speech_chunk_count + [0.0] + [1.0] * speech_chunk_count + [0.0] * 5
)
vad_agent.audio_out_socket = audio_out_socket
await simulate_streaming_with_probabilities(vad_agent, probabilities)
audio_out_socket.send.assert_called_once()
data = audio_out_socket.send.call_args[0][0]
assert isinstance(data, bytes)
# Expecting 13 chunks (2*5 with speech, 1 pause between, 1 as padding)
assert len(data) == 512 * 4 * (speech_chunk_count * 2 + 1 + 1)
@pytest.mark.asyncio
async def test_no_data(audio_out_socket, vad_agent):
"""
Test a scenario where there is no data received. This should not cause errors.
"""
class DummyPoller:
async def poll(self, timeout_ms=None):
vad_agent._running = False
return None
vad_agent.audio_out_socket = audio_out_socket
vad_agent.audio_in_poller = DummyPoller()
vad_agent._ready = AsyncMock()
vad_agent._running = True
await vad_agent._streaming_loop()
audio_out_socket.send.assert_not_called()
assert len(vad_agent.audio_buffer) == 0
@pytest.mark.asyncio
async def test_streaming_loop_reset_needed(audio_out_socket, vad_agent):
"""Test that _reset_needed branch works as expected."""
vad_agent._reset_needed = True
vad_agent._ready.set()
vad_agent._paused.set()
vad_agent._running = True
vad_agent.audio_buffer = np.array([1.0], dtype=np.float32)
vad_agent.i_since_speech = 0
# Mock _reset_stream to stop the loop by setting _running=False
async def mock_reset():
vad_agent._running = False
vad_agent._reset_stream = mock_reset
# Needs a poller to avoid AssertionError
vad_agent.audio_in_poller = AsyncMock()
vad_agent.audio_in_poller.poll.return_value = None
await vad_agent._streaming_loop()
assert vad_agent._reset_needed is False
assert len(vad_agent.audio_buffer) == 0
assert vad_agent.i_since_speech == settings.behaviour_settings.vad_initial_since_speech
@pytest.mark.asyncio
async def test_streaming_loop_no_data_clears_buffer(audio_out_socket, vad_agent):
"""Test that if poll returns None, buffer is cleared if not empty."""
vad_agent.audio_buffer = np.array([1.0], dtype=np.float32)
vad_agent._ready.set()
vad_agent._paused.set()
vad_agent._running = True
class MockPoller:
async def poll(self, timeout_ms=None):
vad_agent._running = False # stop after one poll
return None
vad_agent.audio_in_poller = MockPoller()
await vad_agent._streaming_loop()
assert len(vad_agent.audio_buffer) == 0
assert vad_agent.i_since_speech == settings.behaviour_settings.vad_initial_since_speech
@pytest.mark.asyncio
async def test_vad_model_load_failure_stops_agent(vad_agent):
"""
Test that if loading the VAD model raises an Exception, it is caught,
the agent logs an exception, stops itself, and setup returns.
"""
# Patch torch.hub.load to raise an exception
with patch(
"control_backend.agents.perception.vad_agent.torch.hub.load",
side_effect=Exception("model fail"),
):
# Patch stop to an AsyncMock so we can check it was awaited
vad_agent.stop = AsyncMock()
await vad_agent.setup()
# Assert stop was called
vad_agent.stop.assert_awaited_once()
@pytest.mark.asyncio
async def test_audio_out_bind_failure_sets_none_and_logs(vad_agent, caplog):
"""
Test that if binding the output socket raises ZMQBindError,
audio_out_socket is set to None, None is returned, and an error is logged.
"""
mock_socket = MagicMock()
mock_socket.bind.side_effect = zmq.ZMQBindError()
with patch("control_backend.agents.perception.vad_agent.azmq.Context.instance") as mock_ctx:
mock_ctx.return_value.socket.return_value = mock_socket
with caplog.at_level("ERROR"):
port = vad_agent._connect_audio_out_socket()
assert port is None
assert vad_agent.audio_out_socket is None
assert caplog.text is not None

View File

@@ -1,24 +0,0 @@
import logging
from control_backend.agents.base import BaseAgent
class MyAgent(BaseAgent):
async def setup(self):
pass
async def handle_message(self, msg):
pass
def test_base_agent_logger_init():
# When defining a subclass, __init_subclass__ runs
# The BaseAgent in agents/base.py sets the logger
assert hasattr(MyAgent, "logger")
assert isinstance(MyAgent.logger, logging.Logger)
# The logger name depends on the package.
# Since this test file is running as a module, __package__ might be None or the test package.
# In 'src/control_backend/agents/base.py', it uses __package__ of base.py which is
# 'control_backend.agents'.
# So logger name should be control_backend.agents.MyAgent
assert MyAgent.logger.name == "control_backend.agents.MyAgent"

View File

@@ -3,7 +3,7 @@ from unittest.mock import AsyncMock, MagicMock
import pytest
import zmq
from control_backend.agents.perception.vad_agent import SocketPoller
from control_backend.agents.vad_agent import SocketPoller
@pytest.fixture
@@ -16,8 +16,8 @@ async def test_socket_poller_with_data(socket, mocker):
socket_data = b"test"
socket.recv.return_value = socket_data
mock_poller: MagicMock = mocker.patch("control_backend.agents.perception.vad_agent.azmq.Poller")
mock_poller.return_value.poll = AsyncMock(return_value=[(socket, zmq.POLLIN)])
mock_poller: MagicMock = mocker.patch("control_backend.agents.vad_agent.zmq.Poller")
mock_poller.return_value.poll.return_value = [(socket, zmq.POLLIN)]
poller = SocketPoller(socket)
# Calling `poll` twice to be able to check that the poller is reused
@@ -35,8 +35,8 @@ async def test_socket_poller_with_data(socket, mocker):
@pytest.mark.asyncio
async def test_socket_poller_no_data(socket, mocker):
mock_poller: MagicMock = mocker.patch("control_backend.agents.perception.vad_agent.azmq.Poller")
mock_poller.return_value.poll = AsyncMock(return_value=[])
mock_poller: MagicMock = mocker.patch("control_backend.agents.vad_agent.zmq.Poller")
mock_poller.return_value.poll.return_value = []
poller = SocketPoller(socket)
data = await poller.poll()

View File

@@ -0,0 +1,106 @@
from unittest.mock import AsyncMock, MagicMock
import numpy as np
import pytest
from control_backend.agents.vad_agent import Streaming
@pytest.fixture
def audio_in_socket():
return AsyncMock()
@pytest.fixture
def audio_out_socket():
return AsyncMock()
@pytest.fixture
def mock_agent(mocker):
"""Fixture to create a mock BDIAgent."""
agent = MagicMock()
agent.jid = "vad_agent@test"
return agent
@pytest.fixture
def streaming(audio_in_socket, audio_out_socket, mock_agent):
import torch
torch.hub.load.return_value = (..., ...) # Mock
streaming = Streaming(audio_in_socket, audio_out_socket)
streaming._ready = True
streaming.agent = mock_agent
return streaming
async def simulate_streaming_with_probabilities(streaming, probabilities: list[float]):
"""
Simulates a streaming scenario with given VAD model probabilities for testing purposes.
:param streaming: The streaming component to be tested.
:param probabilities: A list of probabilities representing the outputs of the VAD model.
"""
model_item = MagicMock()
model_item.item.side_effect = probabilities
streaming.model = MagicMock()
streaming.model.return_value = model_item
audio_in_poller = AsyncMock()
audio_in_poller.poll.return_value = np.empty(shape=512, dtype=np.float32)
streaming.audio_in_poller = audio_in_poller
for _ in probabilities:
await streaming.run()
@pytest.mark.asyncio
async def test_voice_activity_detected(audio_in_socket, audio_out_socket, streaming):
"""
Test a scenario where there is voice activity detected between silences.
:return:
"""
speech_chunk_count = 5
probabilities = [0.0] * 5 + [1.0] * speech_chunk_count + [0.0] * 5
await simulate_streaming_with_probabilities(streaming, probabilities)
audio_out_socket.send.assert_called_once()
data = audio_out_socket.send.call_args[0][0]
assert isinstance(data, bytes)
# each sample has 512 frames of 4 bytes, expecting 7 chunks (5 with speech, 2 as padding)
assert len(data) == 512 * 4 * (speech_chunk_count + 2)
@pytest.mark.asyncio
async def test_voice_activity_short_pause(audio_in_socket, audio_out_socket, streaming):
"""
Test a scenario where there is a short pause between speech, checking whether it ignores the
short pause.
"""
speech_chunk_count = 5
probabilities = (
[0.0] * 5 + [1.0] * speech_chunk_count + [0.0] + [1.0] * speech_chunk_count + [0.0] * 5
)
await simulate_streaming_with_probabilities(streaming, probabilities)
audio_out_socket.send.assert_called_once()
data = audio_out_socket.send.call_args[0][0]
assert isinstance(data, bytes)
# Expecting 13 chunks (2*5 with speech, 1 pause between, 2 as padding)
assert len(data) == 512 * 4 * (speech_chunk_count * 2 + 1 + 2)
@pytest.mark.asyncio
async def test_no_data(audio_in_socket, audio_out_socket, streaming):
"""
Test a scenario where there is no data received. This should not cause errors.
"""
audio_in_poller = AsyncMock()
audio_in_poller.poll.return_value = None
streaming.audio_in_poller = audio_in_poller
await streaming.run()
audio_out_socket.send.assert_not_called()
assert len(streaming.audio_buffer) == 0

View File

@@ -1,30 +1,11 @@
import numpy as np
import pytest
from control_backend.agents.perception.transcription_agent.speech_recognizer import (
from control_backend.agents.transcription.speech_recognizer import (
OpenAIWhisperSpeechRecognizer,
SpeechRecognizer,
)
@pytest.fixture(autouse=True)
def patch_sr_settings(monkeypatch):
# Patch the *module-local* settings that SpeechRecognizer imported
from control_backend.agents.perception.transcription_agent import speech_recognizer as sr
# Provide real numbers for everything _estimate_max_tokens() reads
monkeypatch.setattr(sr.settings.vad_settings, "sample_rate_hz", 16_000, raising=False)
monkeypatch.setattr(
sr.settings.behaviour_settings, "transcription_words_per_minute", 450, raising=False
)
monkeypatch.setattr(
sr.settings.behaviour_settings, "transcription_words_per_token", 0.75, raising=False
)
monkeypatch.setattr(
sr.settings.behaviour_settings, "transcription_token_buffer", 10, raising=False
)
def test_estimate_max_tokens():
"""Inputting one minute of audio, assuming 450 words per minute and adding a 10 token padding,
expecting 610 tokens."""
@@ -55,6 +36,4 @@ def test_get_decode_options():
assert isinstance(options["sample_len"], int)
# When disabled, it should not limit output length based on input size
recognizer = OpenAIWhisperSpeechRecognizer(limit_output_length=False)
options = recognizer._get_decode_options(audio)
assert "sample_len" not in options
assert "sample_rate" not in options

View File

@@ -1,311 +0,0 @@
import asyncio
import json
from unittest.mock import AsyncMock, MagicMock
import pytest
from control_backend.agents.user_interrupt.user_interrupt_agent import UserInterruptAgent
from control_backend.core.agent_system import InternalMessage
from control_backend.core.config import settings
from control_backend.schemas.program import (
ConditionalNorm,
Goal,
KeywordBelief,
Phase,
Plan,
Program,
Trigger,
)
from control_backend.schemas.ri_message import RIEndpoint
@pytest.fixture
def agent():
agent = UserInterruptAgent(name="user_interrupt_agent")
agent.send = AsyncMock()
agent.logger = MagicMock()
agent.sub_socket = AsyncMock()
agent.pub_socket = AsyncMock()
return agent
@pytest.mark.asyncio
async def test_send_to_speech_agent(agent):
"""Verify speech command format."""
await agent._send_to_speech_agent("Hello World")
agent.send.assert_awaited_once()
sent_msg: InternalMessage = agent.send.call_args.args[0]
assert sent_msg.to == settings.agent_settings.robot_speech_name
body = json.loads(sent_msg.body)
assert body["data"] == "Hello World"
assert body["is_priority"] is True
@pytest.mark.asyncio
async def test_send_to_gesture_agent(agent):
"""Verify gesture command format."""
await agent._send_to_gesture_agent("wave_hand")
agent.send.assert_awaited_once()
sent_msg: InternalMessage = agent.send.call_args.args[0]
assert sent_msg.to == settings.agent_settings.robot_gesture_name
body = json.loads(sent_msg.body)
assert body["data"] == "wave_hand"
assert body["is_priority"] is True
assert body["endpoint"] == RIEndpoint.GESTURE_SINGLE.value
@pytest.mark.asyncio
async def test_send_to_bdi_belief(agent):
"""Verify belief update format."""
context_str = "some_goal"
await agent._send_to_bdi_belief(context_str)
assert agent.send.await_count == 1
sent_msg = agent.send.call_args.args[0]
assert sent_msg.to == settings.agent_settings.bdi_core_name
assert sent_msg.thread == "beliefs"
assert "achieved_some_goal" in sent_msg.body
@pytest.mark.asyncio
async def test_receive_loop_routing_success(agent):
"""
Test that the loop correctly:
1. Receives 'button_pressed' topic from ZMQ
2. Parses the JSON payload to find 'type' and 'context'
3. Calls the correct handler method based on 'type'
"""
# Prepare JSON payloads as bytes
payload_speech = json.dumps({"type": "speech", "context": "Hello Speech"}).encode()
payload_gesture = json.dumps({"type": "gesture", "context": "Hello Gesture"}).encode()
# override calls _send_to_bdi (for trigger/norm) OR _send_to_bdi_belief (for goal).
# To test routing, we need to populate the maps
agent._goal_map["Hello Override"] = "some_goal_slug"
payload_override = json.dumps({"type": "override", "context": "Hello Override"}).encode()
agent.sub_socket.recv_multipart.side_effect = [
(b"button_pressed", payload_speech),
(b"button_pressed", payload_gesture),
(b"button_pressed", payload_override),
asyncio.CancelledError, # Stop the infinite loop
]
agent._send_to_speech_agent = AsyncMock()
agent._send_to_gesture_agent = AsyncMock()
agent._send_to_bdi_belief = AsyncMock()
try:
await agent._receive_button_event()
except asyncio.CancelledError:
pass
await asyncio.sleep(0)
# Speech
agent._send_to_speech_agent.assert_awaited_once_with("Hello Speech")
# Gesture
agent._send_to_gesture_agent.assert_awaited_once_with("Hello Gesture")
# Override (since we mapped it to a goal)
agent._send_to_bdi_belief.assert_awaited_once_with("some_goal_slug")
assert agent._send_to_speech_agent.await_count == 1
assert agent._send_to_gesture_agent.await_count == 1
assert agent._send_to_bdi_belief.await_count == 1
@pytest.mark.asyncio
async def test_receive_loop_unknown_type(agent):
"""Test that unknown 'type' values in the JSON log a warning and do not crash."""
# Prepare a payload with an unknown type
payload_unknown = json.dumps({"type": "unknown_thing", "context": "some_data"}).encode()
agent.sub_socket.recv_multipart.side_effect = [
(b"button_pressed", payload_unknown),
asyncio.CancelledError,
]
agent._send_to_speech_agent = AsyncMock()
agent._send_to_gesture_agent = AsyncMock()
try:
await agent._receive_button_event()
except asyncio.CancelledError:
pass
await asyncio.sleep(0)
# Ensure no handlers were called
agent._send_to_speech_agent.assert_not_called()
agent._send_to_gesture_agent.assert_not_called()
agent.logger.warning.assert_called()
@pytest.mark.asyncio
async def test_create_mapping(agent):
# Create a program with a trigger, goal, and conditional norm
import uuid
trigger_id = uuid.uuid4()
goal_id = uuid.uuid4()
norm_id = uuid.uuid4()
cond = KeywordBelief(id=uuid.uuid4(), name="k1", keyword="key")
plan = Plan(id=uuid.uuid4(), name="p1", steps=[])
trigger = Trigger(id=trigger_id, name="my_trigger", condition=cond, plan=plan)
goal = Goal(id=goal_id, name="my_goal", description="desc", plan=plan)
cn = ConditionalNorm(id=norm_id, name="my_norm", norm="be polite", condition=cond)
phase = Phase(id=uuid.uuid4(), name="phase1", norms=[cn], goals=[goal], triggers=[trigger])
prog = Program(phases=[phase])
# Call create_mapping via handle_message
msg = InternalMessage(to="me", thread="new_program", body=prog.model_dump_json())
await agent.handle_message(msg)
# Check maps
assert str(trigger_id) in agent._trigger_map
assert agent._trigger_map[str(trigger_id)] == "trigger_my_trigger"
assert str(goal_id) in agent._goal_map
assert agent._goal_map[str(goal_id)] == "my_goal"
assert str(norm_id) in agent._cond_norm_map
assert agent._cond_norm_map[str(norm_id)] == "norm_be_polite"
@pytest.mark.asyncio
async def test_create_mapping_invalid_json(agent):
# Pass invalid json to handle_message thread "new_program"
msg = InternalMessage(to="me", thread="new_program", body="invalid json")
await agent.handle_message(msg)
# Should log error and maps should remain empty or cleared
agent.logger.error.assert_called()
@pytest.mark.asyncio
async def test_handle_message_trigger_start(agent):
# Setup reverse map manually
agent._trigger_reverse_map["trigger_slug"] = "ui_id_123"
msg = InternalMessage(to="me", thread="trigger_start", body="trigger_slug")
await agent.handle_message(msg)
agent.pub_socket.send_multipart.assert_awaited_once()
args = agent.pub_socket.send_multipart.call_args[0][0]
assert args[0] == b"experiment"
payload = json.loads(args[1])
assert payload["type"] == "trigger_update"
assert payload["id"] == "ui_id_123"
assert payload["achieved"] is True
@pytest.mark.asyncio
async def test_handle_message_trigger_end(agent):
agent._trigger_reverse_map["trigger_slug"] = "ui_id_123"
msg = InternalMessage(to="me", thread="trigger_end", body="trigger_slug")
await agent.handle_message(msg)
agent.pub_socket.send_multipart.assert_awaited_once()
payload = json.loads(agent.pub_socket.send_multipart.call_args[0][0][1])
assert payload["type"] == "trigger_update"
assert payload["achieved"] is False
@pytest.mark.asyncio
async def test_handle_message_transition_phase(agent):
msg = InternalMessage(to="me", thread="transition_phase", body="phase_id_123")
await agent.handle_message(msg)
agent.pub_socket.send_multipart.assert_awaited_once()
payload = json.loads(agent.pub_socket.send_multipart.call_args[0][0][1])
assert payload["type"] == "phase_update"
assert payload["id"] == "phase_id_123"
@pytest.mark.asyncio
async def test_handle_message_goal_start(agent):
agent._goal_reverse_map["goal_slug"] = "goal_id_123"
msg = InternalMessage(to="me", thread="goal_start", body="goal_slug")
await agent.handle_message(msg)
agent.pub_socket.send_multipart.assert_awaited_once()
payload = json.loads(agent.pub_socket.send_multipart.call_args[0][0][1])
assert payload["type"] == "goal_update"
assert payload["id"] == "goal_id_123"
assert payload["active"] is True
@pytest.mark.asyncio
async def test_handle_message_active_norms_update(agent):
agent._cond_norm_reverse_map["norm_active"] = "id_1"
agent._cond_norm_reverse_map["norm_inactive"] = "id_2"
# Body is like: "('norm_active', 'other')"
# The split logic handles quotes etc.
msg = InternalMessage(to="me", thread="active_norms_update", body="'norm_active', 'other'")
await agent.handle_message(msg)
agent.pub_socket.send_multipart.assert_awaited_once()
payload = json.loads(agent.pub_socket.send_multipart.call_args[0][0][1])
assert payload["type"] == "cond_norms_state_update"
norms = {n["id"]: n["active"] for n in payload["norms"]}
assert norms["id_1"] is True
assert norms["id_2"] is False
@pytest.mark.asyncio
async def test_send_experiment_control(agent):
# Test next_phase
await agent._send_experiment_control_to_bdi_core("next_phase")
agent.send.assert_awaited()
msg = agent.send.call_args[0][0]
assert msg.thread == "force_next_phase"
# Test reset_phase
await agent._send_experiment_control_to_bdi_core("reset_phase")
msg = agent.send.call_args[0][0]
assert msg.thread == "reset_current_phase"
# Test reset_experiment
await agent._send_experiment_control_to_bdi_core("reset_experiment")
msg = agent.send.call_args[0][0]
assert msg.thread == "reset_experiment"
@pytest.mark.asyncio
async def test_send_pause_command(agent):
await agent._send_pause_command("true")
# Sends to RI and VAD
assert agent.send.await_count == 2
msgs = [call.args[0] for call in agent.send.call_args_list]
ri_msg = next(m for m in msgs if m.to == settings.agent_settings.ri_communication_name)
assert json.loads(ri_msg.body)["endpoint"] == "" # PAUSE endpoint
assert json.loads(ri_msg.body)["data"] is True
vad_msg = next(m for m in msgs if m.to == settings.agent_settings.vad_name)
assert vad_msg.body == "PAUSE"
agent.send.reset_mock()
await agent._send_pause_command("false")
assert agent.send.await_count == 2
vad_msg = next(
m for m in agent.send.call_args_list if m.args[0].to == settings.agent_settings.vad_name
).args[0]
assert vad_msg.body == "RESUME"

View File

@@ -1,63 +0,0 @@
from unittest.mock import patch
import pytest
from fastapi import FastAPI
from fastapi.testclient import TestClient
from starlette.responses import StreamingResponse
from control_backend.api.v1.endpoints import logs
@pytest.fixture
def client():
"""TestClient with logs router included."""
app = FastAPI()
app.include_router(logs.router)
return TestClient(app)
@pytest.mark.asyncio
async def test_log_stream_endpoint_lines(client):
"""Call /logs/stream with a mocked ZMQ socket to cover all lines."""
# Dummy socket to mock ZMQ behavior
class DummySocket:
def __init__(self):
self.subscribed = []
self.connected = False
self.recv_count = 0
def subscribe(self, topic):
self.subscribed.append(topic)
def connect(self, addr):
self.connected = True
async def recv_multipart(self):
# Return one message, then stop generator
if self.recv_count == 0:
self.recv_count += 1
return (b"INFO", b"test message")
else:
raise StopAsyncIteration
dummy_socket = DummySocket()
# Patch Context.instance().socket() to return dummy socket
with patch("control_backend.api.v1.endpoints.logs.Context.instance") as mock_context:
mock_context.return_value.socket.return_value = dummy_socket
# Call the endpoint directly
response = await logs.log_stream()
assert isinstance(response, StreamingResponse)
# Fetch one chunk from the generator
gen = response.body_iterator
chunk = await gen.__anext__()
if isinstance(chunk, bytes):
chunk = chunk.decode("utf-8")
assert "data:" in chunk
# Optional: assert subscribe/connect were called
assert dummy_socket.subscribed # at least some log levels subscribed
assert dummy_socket.connected # connect was called

View File

@@ -1,45 +0,0 @@
import json
import pytest
from fastapi.testclient import TestClient
from control_backend.api.v1.endpoints import message
@pytest.fixture
def client():
"""FastAPI TestClient for the message router."""
from fastapi import FastAPI
app = FastAPI()
app.include_router(message.router)
return TestClient(app)
def test_receive_message_post(client, monkeypatch):
"""Test POST /message endpoint sends message to pub socket."""
# Dummy pub socket to capture sent messages
class DummyPubSocket:
def __init__(self):
self.sent = []
async def send_multipart(self, msg):
self.sent.append(msg)
dummy_socket = DummyPubSocket()
# Patch app.state.endpoints_pub_socket
client.app.state.endpoints_pub_socket = dummy_socket
data = {"message": "Hello world"}
response = client.post("/message", json=data)
assert response.status_code == 202
assert response.json() == {"status": "Message received"}
# Ensure the message was sent via pub_socket
assert len(dummy_socket.sent) == 1
topic, body = dummy_socket.sent[0]
parsed = json.loads(body.decode("utf-8"))
assert parsed["message"] == "Hello world"

View File

@@ -1,137 +0,0 @@
import json
import uuid
from unittest.mock import AsyncMock
import pytest
from fastapi import FastAPI
from fastapi.testclient import TestClient
from control_backend.api.v1.endpoints import program
from control_backend.schemas.program import BasicNorm, Goal, Phase, Plan, Program
@pytest.fixture
def app():
"""Create a FastAPI app with the /program route and mock socket."""
app = FastAPI()
app.include_router(program.router)
return app
@pytest.fixture
def client(app):
"""Create a TestClient."""
return TestClient(app)
def make_valid_program_dict():
"""Helper to create a valid Program JSON structure."""
# Converting to JSON using Pydantic because it knows how to convert a UUID object
program_json_str = Program(
phases=[
Phase(
id=uuid.uuid4(),
name="Basic Phase",
norms=[
BasicNorm(
id=uuid.uuid4(),
name="Some norm",
norm="Do normal.",
),
],
goals=[
Goal(
id=uuid.uuid4(),
name="Some goal",
description="This description can be used to determine whether the goal "
"has been achieved.",
plan=Plan(
id=uuid.uuid4(),
name="Goal Plan",
steps=[],
),
can_fail=False,
),
],
triggers=[],
),
],
).model_dump_json()
# Converting back to a dict because that's what's expected
return json.loads(program_json_str)
def test_receive_program_success(client):
"""Valid Program JSON should be parsed and sent through the socket."""
mock_pub_socket = AsyncMock()
client.app.state.endpoints_pub_socket = mock_pub_socket
program_dict = make_valid_program_dict()
response = client.post("/program", json=program_dict)
assert response.status_code == 202
assert response.json() == {"status": "Program parsed"}
# Verify socket call
mock_pub_socket.send_multipart.assert_awaited_once()
args, kwargs = mock_pub_socket.send_multipart.await_args
assert args[0][0] == b"program"
sent_bytes = args[0][1]
sent_obj = json.loads(sent_bytes.decode())
# Converting to JSON using Pydantic because it knows how to handle UUIDs
expected_obj = json.loads(Program.model_validate(program_dict).model_dump_json())
assert sent_obj == expected_obj
def test_receive_program_invalid_json(client):
"""
Invalid JSON (malformed) -> FastAPI never calls endpoint.
It returns a 422 Unprocessable Entity.
"""
mock_pub_socket = AsyncMock()
client.app.state.endpoints_pub_socket = mock_pub_socket
# FastAPI only accepts valid JSON bodies, so send raw string
response = client.post("/program", content="{invalid json}")
assert response.status_code == 422
mock_pub_socket.send_multipart.assert_not_called()
def test_receive_program_invalid_deep_structure(client):
"""
Valid JSON but schema invalid -> Pydantic throws validation error -> 422.
"""
mock_pub_socket = AsyncMock()
client.app.state.endpoints_pub_socket = mock_pub_socket
# Missing "value" in norms element
bad_program = {
"phases": [
{
"id": "phase1",
"name": "deepfail",
"nextPhaseId": "phase2",
"phaseData": {
"norms": [
{"id": "n1", "name": "norm"} # INVALID: missing "value"
],
"goals": [
{"id": "g1", "name": "goal", "description": "desc", "achieved": False}
],
"triggers": [
{"id": "t1", "label": "trigger", "type": "keyword", "value": ["start"]}
],
},
}
]
}
response = client.post("/program", json=bad_program)
assert response.status_code == 422
mock_pub_socket.send_multipart.assert_not_called()

Some files were not shown because too many files have changed in this diff Show More