chore: merged dev into branch and updated logging

This commit is contained in:
Björn Otgaar
2025-11-06 13:52:12 +01:00
34 changed files with 461 additions and 419 deletions

40
.logging_config.yaml Normal file
View File

@@ -0,0 +1,40 @@
version: 1
custom_levels:
OBSERVATION: 25
ACTION: 26
formatters:
# Console output
colored:
(): "colorlog.ColoredFormatter"
format: "{log_color}{asctime} | {levelname:11} | {name:70} | {message}"
style: "{"
datefmt: "%H:%M:%S"
# User-facing UI (structured JSON)
json_experiment:
(): "pythonjsonlogger.jsonlogger.JsonFormatter"
format: "{name} {levelname} {levelno} {message} {created} {relativeCreated}"
style: "{"
handlers:
console:
class: logging.StreamHandler
level: DEBUG
formatter: colored
stream: ext://sys.stdout
ui:
class: zmq.log.handlers.PUBHandler
level: DEBUG
formatter: json_experiment
# Level of external libraries
root:
level: WARN
handlers: [console]
loggers:
control_backend:
level: DEBUG
handlers: [ui]

View File

@@ -1,7 +0,0 @@
{
"python.testing.pytestArgs": [
"test"
],
"python.testing.unittestEnabled": false,
"python.testing.pytestEnabled": true
}

View File

@@ -5,6 +5,7 @@ description = "Add your description here"
readme = "README.md"
requires-python = ">=3.13"
dependencies = [
"colorlog>=6.10.1",
"fastapi[all]>=0.115.6",
"mlx-whisper>=0.4.3 ; sys_platform == 'darwin'",
"numpy>=2.3.3",
@@ -16,6 +17,8 @@ dependencies = [
"pytest-asyncio>=1.2.0",
"pytest-cov>=7.0.0",
"pytest-mock>=3.15.1",
"python-json-logger>=4.0.0",
"pyyaml>=6.0.3",
"pyzmq>=27.1.0",
"silero-vad>=6.0.0",
"spade>=4.1.0",

View File

@@ -0,0 +1,7 @@
from .base import BaseAgent as BaseAgent
from .belief_collector.belief_collector import BeliefCollectorAgent as BeliefCollectorAgent
from .llm.llm import LLMAgent as LLMAgent
from .ri_command_agent import RICommandAgent as RICommandAgent
from .ri_communication_agent import RICommunicationAgent as RICommunicationAgent
from .transcription.transcription_agent import TranscriptionAgent as TranscriptionAgent
from .vad_agent import VADAgent as VADAgent

View File

@@ -0,0 +1,18 @@
import logging
from spade.agent import Agent
class BaseAgent(Agent):
"""
Base agent class for our agents to inherit from.
This ensures that all agents have a logger.
"""
logger: logging.Logger
# Whenever a subclass is initiated, give it the correct logger
def __init_subclass__(cls, **kwargs) -> None:
super().__init_subclass__(**kwargs)
cls.logger = logging.getLogger(__package__).getChild(cls.__name__)

View File

@@ -0,0 +1,2 @@
from .bdi_core import BDICoreAgent as BDICoreAgent
from .text_extractor import TBeliefExtractorAgent as TBeliefExtractorAgent

View File

@@ -5,12 +5,11 @@ from spade.behaviour import OneShotBehaviour
from spade.message import Message
from spade_bdi.bdi import BDIAgent
from control_backend.agents.bdi.behaviours.belief_setter import BeliefSetterBehaviour
from control_backend.agents.bdi.behaviours.receive_llm_resp_behaviour import (
ReceiveLLMResponseBehaviour,
)
from control_backend.core.config import settings
from .behaviours.belief_setter import BeliefSetterBehaviour
from .behaviours.receive_llm_resp_behaviour import ReceiveLLMResponseBehaviour
class BDICoreAgent(BDIAgent):
"""
@@ -20,18 +19,18 @@ class BDICoreAgent(BDIAgent):
It has the BeliefSetter behaviour and can aks and recieve requests from the LLM agent.
"""
logger = logging.getLogger("bdi_core_agent")
logger = logging.getLogger(__package__).getChild(__name__)
async def setup(self) -> None:
"""
Initializes belief behaviors and message routing.
"""
self.logger.info("BDICoreAgent setup started")
self.logger.info("BDICoreAgent setup started.")
self.add_behaviour(BeliefSetterBehaviour())
self.add_behaviour(ReceiveLLMResponseBehaviour())
self.logger.info("BDICoreAgent setup complete")
self.logger.info("BDICoreAgent setup complete.")
def add_custom_actions(self, actions) -> None:
"""
@@ -45,7 +44,7 @@ class BDICoreAgent(BDIAgent):
Example: .reply("Hello LLM!")
"""
message_text = agentspeak.grounded(term.args[0], intention.scope)
self.logger.info("Reply action sending: %s", message_text)
self.logger.debug("Reply action sending: %s", message_text)
self._send_to_llm(str(message_text))
yield
@@ -63,6 +62,6 @@ class BDICoreAgent(BDIAgent):
)
await self.send(msg)
self.agent.logger.info("Message sent to LLM: %s", text)
self.agent.logger.info("Message sent to LLM agent: %s", text)
self.add_behaviour(SendBehaviour())

View File

@@ -1,5 +1,4 @@
import json
import logging
from spade.agent import Message
from spade.behaviour import CyclicBehaviour
@@ -11,32 +10,40 @@ from control_backend.core.config import settings
class BeliefSetterBehaviour(CyclicBehaviour):
"""
This is the behaviour that the BDI agent runs. This behaviour waits for incoming
message and processes it based on sender.
message and updates the agent's beliefs accordingly.
"""
agent: BDIAgent
logger = logging.getLogger("BDI/Belief Setter")
async def run(self):
msg = await self.receive(timeout=0.1)
if msg:
self.logger.info(f"Received message {msg.body}")
self._process_message(msg)
"""Polls for messages and processes them."""
msg = await self.receive()
self.agent.logger.debug(
"Received message from %s with thread '%s' and body: %s",
msg.sender,
msg.thread,
msg.body,
)
self._process_message(msg)
def _process_message(self, message: Message):
"""Routes the message to the correct processing function based on the sender."""
sender = message.sender.node # removes host from jid and converts to str
self.logger.debug("Sender: %s", sender)
self.agent.logger.debug("Processing message from sender: %s", sender)
match sender:
case settings.agent_settings.belief_collector_agent_name:
self.logger.debug("Processing message from belief collector.")
self.agent.logger.debug(
"Message is from the belief collector agent. Processing as belief message."
)
self._process_belief_message(message)
case _:
self.logger.debug("Not the belief agent, discarding message")
self.agent.logger.debug("Not the belief agent, discarding message")
pass
def _process_belief_message(self, message: Message):
if not message.body:
self.agent.logger.debug("Ignoring message with empty body from %s", message.sender.node)
return
match message.thread:
@@ -44,23 +51,35 @@ class BeliefSetterBehaviour(CyclicBehaviour):
try:
beliefs: dict[str, list[str]] = json.loads(message.body)
self._set_beliefs(beliefs)
except json.JSONDecodeError as e:
self.logger.error("Could not decode beliefs into JSON format: %s", e)
except json.JSONDecodeError:
self.agent.logger.error(
"Could not decode beliefs from JSON. Message body: '%s'",
message.body,
exc_info=True,
)
case _:
pass
def _set_beliefs(self, beliefs: dict[str, list[str]]):
"""Remove previous values for beliefs and update them with the provided values."""
"""Removes previous values for beliefs and updates them with the provided values."""
if self.agent.bdi is None:
self.logger.warning("Cannot set beliefs, since agent's BDI is not yet initialized.")
self.agent.logger.warning("Cannot set beliefs; agent's BDI is not yet initialized.")
return
if not beliefs:
self.agent.logger.debug("Received an empty set of beliefs. No beliefs were updated.")
return
# Set new beliefs (outdated beliefs are automatically removed)
for belief, arguments in beliefs.items():
self.agent.logger.debug("Setting belief %s with arguments %s", belief, arguments)
self.agent.bdi.set_belief(belief, *arguments)
# Special case: if there's a new user message, flag that we haven't responded yet
if belief == "user_said":
self.agent.bdi.set_belief("new_message")
self.agent.logger.debug(
"Detected 'user_said' belief, also setting 'new_message' belief."
)
self.logger.info("Set belief %s with arguments %s", belief, arguments)
self.agent.logger.info("Successfully updated %d beliefs.", len(beliefs))

View File

@@ -1,5 +1,3 @@
import logging
from spade.behaviour import CyclicBehaviour
from spade.message import Message
@@ -12,18 +10,14 @@ class ReceiveLLMResponseBehaviour(CyclicBehaviour):
Adds behavior to receive responses from the LLM Agent.
"""
logger = logging.getLogger("BDI/LLM Receiver")
async def run(self):
msg = await self.receive(timeout=2)
if not msg:
return
msg = await self.receive()
sender = msg.sender.node
match sender:
case settings.agent_settings.llm_agent_name:
content = msg.body
self.logger.info("Received LLM response: %s", content)
self.agent.logger.info("Received LLM response: %s", content)
speech_command = SpeechCommand(data=content)
@@ -35,9 +29,9 @@ class ReceiveLLMResponseBehaviour(CyclicBehaviour):
body=speech_command.model_dump_json(),
)
self.logger.debug("Sending message: %s", message)
self.agent.logger.debug("Sending message: %s", message)
await self.send(message)
case _:
self.logger.debug("Not from the llm, discarding message")
self.agent.logger.debug("Discarding message from %s", sender)
pass

View File

@@ -1,6 +1,4 @@
import asyncio
import json
import logging
from spade.behaviour import CyclicBehaviour
from spade.message import Message
@@ -9,8 +7,6 @@ from control_backend.core.config import settings
class BeliefFromText(CyclicBehaviour):
logger = logging.getLogger("Belief From Text")
# TODO: LLM prompt nog hardcoded
llm_instruction_prompt = """
You are an information extraction assistent for a BDI agent. Your task is to extract values \
@@ -39,17 +35,15 @@ class BeliefFromText(CyclicBehaviour):
beliefs = {"mood": ["X"], "car": ["Y"]}
async def run(self):
msg = await self.receive(timeout=0.1)
if msg:
sender = msg.sender.node
match sender:
case settings.agent_settings.transcription_agent_name:
self.logger.info("Received text from transcriber.")
await self._process_transcription_demo(msg.body)
case _:
self.logger.info("Received message from other agent.")
pass
await asyncio.sleep(1)
msg = await self.receive()
sender = msg.sender.node
match sender:
case settings.agent_settings.transcription_agent_name:
self.logger.debug("Received text from transcriber: %s", msg.body)
await self._process_transcription_demo(msg.body)
case _:
self.logger.info("Discarding message from %s", sender)
pass
async def _process_transcription(self, text: str):
text_prompt = f"Text: {text}"
@@ -75,10 +69,10 @@ class BeliefFromText(CyclicBehaviour):
belief_message.thread = "beliefs"
await self.send(belief_message)
self.logger.info("Sent beliefs to BDI.")
self.agent.logger.info("Sent beliefs to BDI.")
except json.JSONDecodeError:
# Parsing failed, so the response is in the wrong format, log warning
self.logger.warning("Received LLM response in incorrect format.")
self.agent.logger.warning("Received LLM response in incorrect format.")
async def _process_transcription_demo(self, txt: str):
"""
@@ -97,4 +91,4 @@ class BeliefFromText(CyclicBehaviour):
belief_msg.thread = "beliefs"
await self.send(belief_msg)
self.logger.info("Sent beliefs to Belief Collector.")
self.logger.info("Sent %d beliefs to the belief collector.", len(belief["beliefs"]))

View File

@@ -1,9 +1,8 @@
from spade.agent import Agent
from control_backend.agents.base import BaseAgent
from control_backend.agents.bdi.behaviours.text_belief_extractor import BeliefFromText
from .behaviours.text_belief_extractor import BeliefFromText
class TBeliefExtractor(Agent):
class TBeliefExtractorAgent(BaseAgent):
async def setup(self):
self.b = BeliefFromText()
self.add_behaviour(self.b)
self.add_behaviour(BeliefFromText())

View File

@@ -1,13 +1,11 @@
import json
import logging
from json import JSONDecodeError
from spade.agent import Message
from spade.behaviour import CyclicBehaviour
from control_backend.core.config import settings
logger = logging.getLogger(__name__)
class ContinuousBeliefCollector(CyclicBehaviour):
"""
@@ -16,18 +14,17 @@ class ContinuousBeliefCollector(CyclicBehaviour):
"""
async def run(self):
msg = await self.receive(timeout=0.1) # Wait for 0.1s
if msg:
await self._process_message(msg)
msg = await self.receive()
await self._process_message(msg)
async def _process_message(self, msg: Message):
sender_node = self._sender_node(msg)
sender_node = msg.sender.node
# Parse JSON payload
try:
payload = json.loads(msg.body)
except Exception as e:
logger.warning(
except JSONDecodeError as e:
self.agent.logger.warning(
"BeliefCollector: failed to parse JSON from %s. Body=%r Error=%s",
sender_node,
msg.body,
@@ -39,32 +36,19 @@ class ContinuousBeliefCollector(CyclicBehaviour):
# Prefer explicit 'type' field
if msg_type == "belief_extraction_text" or sender_node == "belief_text_agent_mock":
logger.info(
"BeliefCollector: message routed to _handle_belief_text (sender=%s)", sender_node
self.agent.logger.debug(
"Message routed to _handle_belief_text (sender=%s)", sender_node
)
await self._handle_belief_text(payload, sender_node)
# This is not implemented yet, but we keep the structure for future use
elif msg_type == "emotion_extraction_text" or sender_node == "emo_text_agent_mock":
logger.info(
"BeliefCollector: message routed to _handle_emo_text (sender=%s)", sender_node
)
self.agent.logger.debug("Message routed to _handle_emo_text (sender=%s)", sender_node)
await self._handle_emo_text(payload, sender_node)
else:
logger.info(
"BeliefCollector: unrecognized message (sender=%s, type=%r). Ignoring.",
sender_node,
msg_type,
self.agent.logger.warning(
"Unrecognized message (sender=%s, type=%r). Ignoring.", sender_node, msg_type
)
@staticmethod
def _sender_node(msg: Message) -> str:
"""
Extracts the 'node' (localpart) of the sender JID.
E.g., 'agent@host/resource' -> 'agent'
"""
s = str(msg.sender) if msg.sender is not None else "no_sender"
return s.split("@", 1)[0] if "@" in s else s
async def _handle_belief_text(self, payload: dict, origin: str):
"""
Expected payload:
@@ -78,21 +62,13 @@ class ContinuousBeliefCollector(CyclicBehaviour):
beliefs = payload.get("beliefs", {})
if not beliefs:
logger.info("BeliefCollector: no beliefs to process.")
self.agent.logger.debug("Received empty beliefs set.")
return
if not isinstance(beliefs, dict):
logger.warning("BeliefCollector: 'beliefs' is not a dict: %r", beliefs)
return
if not all(isinstance(v, list) for v in beliefs.values()):
logger.warning("BeliefCollector: 'beliefs' values are not all lists: %r", beliefs)
return
logger.info("BeliefCollector: forwarding %d beliefs.", len(beliefs))
self.agent.logger.debug("Forwarding %d beliefs.", len(beliefs))
for belief_name, belief_list in beliefs.items():
for belief in belief_list:
logger.info(" - %s %s", belief_name, str(belief))
self.agent.logger.debug(" - %s %s", belief_name, str(belief))
await self._send_beliefs_to_bdi(beliefs, origin=origin)
@@ -113,4 +89,4 @@ class ContinuousBeliefCollector(CyclicBehaviour):
msg.body = json.dumps(beliefs)
await self.send(msg)
logger.info("BeliefCollector: sent %d belief(s) to BDI at %s", len(beliefs), to_jid)
self.agent.logger.info("Sent %d belief(s) to BDI core.", len(beliefs))

View File

@@ -1,15 +1,11 @@
import logging
from spade.agent import Agent
from control_backend.agents.base import BaseAgent
from .behaviours.continuous_collect import ContinuousBeliefCollector
logger = logging.getLogger(__name__)
class BeliefCollectorAgent(Agent):
class BeliefCollectorAgent(BaseAgent):
async def setup(self):
logger.info("BeliefCollectorAgent starting (%s)", self.jid)
self.logger.info("BeliefCollectorAgent starting (%s)", self.jid)
# Attach the continuous collector behaviour (listens and forwards to BDI)
self.add_behaviour(ContinuousBeliefCollector())
logger.info("BeliefCollectorAgent ready.")
self.logger.info("BeliefCollectorAgent ready.")

View File

@@ -1,31 +1,24 @@
"""
LLM Agent module for routing text queries from the BDI Core Agent to a local LLM
service and returning its responses back to the BDI Core Agent.
"""
import json
import logging
import re
from collections.abc import AsyncGenerator
import httpx
from spade.agent import Agent
from spade.behaviour import CyclicBehaviour
from spade.message import Message
from control_backend.agents.llm.llm_instructions import LLMInstructions
from control_backend.agents import BaseAgent
from control_backend.core.config import settings
from .llm_instructions import LLMInstructions
class LLMAgent(Agent):
class LLMAgent(BaseAgent):
"""
Agent responsible for processing user text input and querying a locally
hosted LLM for text generation. Receives messages from the BDI Core Agent
and responds with processed LLM output.
"""
logger = logging.getLogger("llm_agent")
class ReceiveMessageBehaviour(CyclicBehaviour):
"""
Cyclic behaviour to continuously listen for incoming messages from
@@ -37,12 +30,10 @@ class LLMAgent(Agent):
Receives SPADE messages and processes only those originating from the
configured BDI agent.
"""
msg = await self.receive(timeout=1)
if not msg:
return
msg = await self.receive()
sender = msg.sender.node
self.agent.logger.info(
self.agent.logger.debug(
"Received message: %s from %s",
msg.body,
sender,
@@ -165,7 +156,6 @@ class LLMAgent(Agent):
Sets up the SPADE behaviour to filter and process messages from the
BDI Core Agent.
"""
self.logger.info("LLMAgent setup complete")
behaviour = self.ReceiveMessageBehaviour()
self.add_behaviour(behaviour)
self.logger.info("LLMAgent setup complete")

View File

@@ -1,19 +1,16 @@
import json
import logging
import spade.agent
import zmq
from spade.agent import Agent
from spade.behaviour import CyclicBehaviour
from zmq.asyncio import Context
from control_backend.agents import BaseAgent
from control_backend.core.config import settings
from control_backend.schemas.ri_message import SpeechCommand
logger = logging.getLogger(__name__)
class RICommandAgent(Agent):
class RICommandAgent(BaseAgent):
subsocket: zmq.Socket
pubsocket: zmq.Socket
address = ""
@@ -51,7 +48,7 @@ class RICommandAgent(Agent):
# Send to the robot.
await self.agent.pubsocket.send_json(message.model_dump())
except Exception as e:
logger.error("Error processing message: %s", e)
self.agent.logger.error("Error processing message: %s", e)
class SendPythonCommandsBehaviour(CyclicBehaviour):
"""Behaviour for sending commands received from other Python agents."""
@@ -63,13 +60,13 @@ class RICommandAgent(Agent):
speech_command = SpeechCommand.model_validate_json(message.body)
await self.agent.pubsocket.send_json(speech_command.model_dump())
except Exception as e:
logger.error("Error processing message: %s", e)
self.agent.logger.error("Error processing message: %s", e)
async def setup(self):
"""
Setup the command agent
"""
logger.info("Setting up %s", self.jid)
self.logger.info("Setting up %s", self.jid)
context = Context.instance()
@@ -90,4 +87,4 @@ class RICommandAgent(Agent):
self.add_behaviour(commands_behaviour)
self.add_behaviour(self.SendPythonCommandsBehaviour())
logger.info("Finished setting up %s", self.jid)
self.logger.info("Finished setting up %s", self.jid)

View File

@@ -1,19 +1,18 @@
import asyncio
import json
import logging
import zmq.asyncio
from spade.agent import Agent
from spade.behaviour import CyclicBehaviour
from zmq.asyncio import Context
from control_backend.agents.ri_command_agent import RICommandAgent
from control_backend.agents import BaseAgent
from control_backend.core.config import settings
logger = logging.getLogger(__name__)
from .ri_command_agent import RICommandAgent
class RICommunicationAgent(Agent):
class RICommunicationAgent(BaseAgent):
req_socket: zmq.Socket
_address = ""
_bind = True
connected = False
@@ -48,34 +47,38 @@ class RICommunicationAgent(Agent):
self.agent._req_socket.send_json(message), timeout=seconds_to_wait_total / 2
)
except TimeoutError:
logger.debug(
self.agent.logger.debug(
"Waited too long to send message - "
"we probably dont have any receivers... but let's check!"
)
# Wait up to three seconds for a reply:)
try:
logger.debug(f"waiting for message for {seconds_to_wait_total / 2} seconds.")
self.agent.logger.debug(
f"waiting for message for{seconds_to_wait_total / 2} seconds."
)
message = await asyncio.wait_for(
self.agent._req_socket.recv_json(), timeout=seconds_to_wait_total / 2
)
# We didnt get a reply :(
except TimeoutError:
logger.info("No ping retrieved in 3 seconds, killing myself.")
self.agent.logger.info("No ping retrieved in 3 seconds, killing myself.")
# Tell UI we're disconnected.
topic = b"ping"
data = json.dumps(False).encode()
if self.agent.pub_socket is None:
logger.error("communication agent pub socket not correctly initialized.")
self.agent.logger.error(
"communication agent pub socket not correctly initialized."
)
else:
try:
await asyncio.wait_for(
self.agent.pub_socket.send_multipart([topic, data]), 5
)
except TimeoutError:
logger.error(
self.agent.logger.error(
"Initial connection ping for router timed"
" out in ri_communication_agent."
)
@@ -83,9 +86,9 @@ class RICommunicationAgent(Agent):
# Try to reboot.
self.agent.setup()
logger.debug('Received message "%s"', message)
self.agent.logger.debug('Received message "%s"', message)
if "endpoint" not in message:
logger.error("No received endpoint in message, excepted ping endpoint.")
self.agent.logger.error("No received endpoint in message, excepted ping endpoint.")
return
# See what endpoint we received
@@ -97,7 +100,7 @@ class RICommunicationAgent(Agent):
await self.agent.pub_socket.send_multipart([topic, data])
await asyncio.sleep(1)
case _:
logger.info(
self.agent.logger.info(
"Received message with topic different than ping, while ping expected."
)
@@ -122,7 +125,7 @@ class RICommunicationAgent(Agent):
"""
Try to setup the communication agent, we have 5 retries in case we dont have a response yet.
"""
logger.info("Setting up %s", self.jid)
self.logger.info("Setting up %s", self.jid)
# Bind request socket
await self.setup_sockets()
@@ -142,7 +145,7 @@ class RICommunicationAgent(Agent):
received_message = await asyncio.wait_for(self._req_socket.recv_json(), timeout=1.0)
except TimeoutError:
logger.warning(
self.logger.warning(
"No connection established in 20 seconds (attempt %d/%d)",
retries + 1,
max_retries,
@@ -151,7 +154,7 @@ class RICommunicationAgent(Agent):
continue
except Exception as e:
logger.error("Unexpected error during negotiation: %s", e)
self.logger.error("Unexpected error during negotiation: %s", e)
retries += 1
continue
@@ -159,7 +162,7 @@ class RICommunicationAgent(Agent):
endpoint = received_message.get("endpoint")
if endpoint != "negotiate/ports":
# TODO: Should this send a message back?
logger.error(
self.logger.error(
"Invalid endpoint '%s' received (attempt %d/%d)",
endpoint,
retries + 1,
@@ -198,10 +201,10 @@ class RICommunicationAgent(Agent):
)
await ri_commands_agent.start()
case _:
logger.warning("Unhandled negotiation id: %s", id)
self.logger.warning("Unhandled negotiation id: %s", id)
except Exception as e:
logger.error("Error unpacking negotiation data: %s", e)
self.logger.error("Error unpacking negotiation data: %s", e)
retries += 1
continue
@@ -209,7 +212,7 @@ class RICommunicationAgent(Agent):
break
else:
logger.error("Failed to set up RICommunicationAgent after %d retries", max_retries)
self.logger.error("Failed to set up RICommunicationAgent after %d retries", max_retries)
return
# Set up ping behaviour
@@ -220,13 +223,13 @@ class RICommunicationAgent(Agent):
topic = b"ping"
data = json.dumps(True).encode()
if self.pub_socket is None:
logger.error("communication agent pub socket not correctly initialized.")
self.logger.error("communication agent pub socket not correctly initialized.")
else:
try:
await asyncio.wait_for(self.pub_socket.send_multipart([topic, data]), 5)
except TimeoutError:
logger.error(
self.logger.error(
"Initial connection ping for router timed out in ri_communication_agent."
)
self.connected = True
logger.info("Finished setting up %s", self.jid)
self.logger.info("Finished setting up %s", self.jid)

View File

@@ -1,2 +0,0 @@
from .speech_recognizer import SpeechRecognizer as SpeechRecognizer
from .transcription_agent import TranscriptionAgent as TranscriptionAgent

View File

@@ -1,20 +1,18 @@
import asyncio
import logging
import numpy as np
import zmq
import zmq.asyncio as azmq
from spade.agent import Agent
from spade.behaviour import CyclicBehaviour
from spade.message import Message
from control_backend.agents.transcription.speech_recognizer import SpeechRecognizer
from control_backend.agents import BaseAgent
from control_backend.core.config import settings
logger = logging.getLogger(__name__)
from .speech_recognizer import SpeechRecognizer
class TranscriptionAgent(Agent):
class TranscriptionAgent(BaseAgent):
"""
An agent which listens to audio fragments with voice, transcribes them, and sends the
transcription to other agents.
@@ -59,10 +57,10 @@ class TranscriptionAgent(Agent):
audio = np.frombuffer(audio, dtype=np.float32)
speech = await self._transcribe(audio)
if not speech:
logger.info("Nothing transcribed.")
self.agent.logger.info("Nothing transcribed.")
return
logger.info("Transcribed speech: %s", speech)
self.agent.logger.info("Transcribed speech: %s", speech)
await self._share_transcription(speech)
@@ -77,7 +75,7 @@ class TranscriptionAgent(Agent):
self.audio_in_socket.connect(self.audio_in_address)
async def setup(self):
logger.info("Setting up %s", self.jid)
self.logger.info("Setting up %s", self.jid)
self._connect_audio_in_socket()
@@ -85,4 +83,4 @@ class TranscriptionAgent(Agent):
transcribing.warmup()
self.add_behaviour(transcribing)
logger.info("Finished setting up %s", self.jid)
self.logger.info("Finished setting up %s", self.jid)

View File

@@ -1,16 +1,13 @@
import logging
import numpy as np
import torch
import zmq
import zmq.asyncio as azmq
from spade.agent import Agent
from spade.behaviour import CyclicBehaviour
from control_backend.agents.transcription import TranscriptionAgent
from control_backend.agents import BaseAgent
from control_backend.core.config import settings
logger = logging.getLogger(__name__)
from .transcription.transcription_agent import TranscriptionAgent
class SocketPoller[T]:
@@ -61,7 +58,7 @@ class Streaming(CyclicBehaviour):
discarded = 0
while await self.audio_in_poller.poll(1) is not None:
discarded += 1
logging.info(f"Discarded {discarded} audio packets before starting.")
self.agent.logger.info(f"Discarded {discarded} audio packets before starting.")
self._ready = True
async def run(self) -> None:
@@ -71,7 +68,9 @@ class Streaming(CyclicBehaviour):
data = await self.audio_in_poller.poll()
if data is None:
if len(self.audio_buffer) > 0:
logger.debug("No audio data received. Discarding buffer until new data arrives.")
self.agent.logger.debug(
"No audio data received. Discarding buffer until new data arrives."
)
self.audio_buffer = np.array([], dtype=np.float32)
self.i_since_speech = 100
return
@@ -82,7 +81,7 @@ class Streaming(CyclicBehaviour):
if prob > 0.5:
if self.i_since_speech > 3:
logger.debug("Speech started.")
self.agent.logger.debug("Speech started.")
self.audio_buffer = np.append(self.audio_buffer, chunk)
self.i_since_speech = 0
return
@@ -95,7 +94,7 @@ class Streaming(CyclicBehaviour):
# Speech probably ended. Make sure we have a usable amount of data.
if len(self.audio_buffer) >= 3 * len(chunk):
logger.debug("Speech ended.")
self.agent.logger.debug("Speech ended.")
await self.audio_out_socket.send(self.audio_buffer[: -2 * len(chunk)].tobytes())
# At this point, we know that the speech has ended.
@@ -103,7 +102,7 @@ class Streaming(CyclicBehaviour):
self.audio_buffer = chunk
class VADAgent(Agent):
class VADAgent(BaseAgent):
"""
An agent which listens to an audio stream, does Voice Activity Detection (VAD), and sends
fragments with detected speech to other agents over ZeroMQ.
@@ -148,12 +147,12 @@ class VADAgent(Agent):
self.audio_out_socket = azmq.Context.instance().socket(zmq.PUB)
return self.audio_out_socket.bind_to_random_port("tcp://*", max_tries=100)
except zmq.ZMQBindError:
logger.error("Failed to bind an audio output socket after 100 tries.")
self.logger.error("Failed to bind an audio output socket after 100 tries.")
self.audio_out_socket = None
return None
async def setup(self):
logger.info("Setting up %s", self.jid)
self.logger.info("Setting up %s", self.jid)
self._connect_audio_in_socket()
@@ -170,4 +169,4 @@ class VADAgent(Agent):
transcriber = TranscriptionAgent(audio_out_address)
await transcriber.start()
logger.info("Finished setting up %s", self.jid)
self.logger.info("Finished setting up %s", self.jid)

View File

@@ -0,0 +1,32 @@
import logging
import zmq
from fastapi import APIRouter
from fastapi.responses import StreamingResponse
from zmq.asyncio import Context
from control_backend.core.config import settings
logger = logging.getLogger(__name__)
router = APIRouter()
# DO NOT LOG INSIDE THIS FUNCTION
@router.get("/logs/stream")
async def log_stream():
context = Context.instance()
socket = context.socket(zmq.SUB)
for level in logging.getLevelNamesMapping():
socket.subscribe(topic=level)
socket.connect(settings.zmq_settings.internal_sub_address)
async def gen():
while True:
_, message = await socket.recv_multipart()
message = message.decode().strip()
yield f"data: {message}\n\n"
return StreamingResponse(gen(), media_type="text/event-stream")

View File

@@ -1,6 +1,6 @@
from fastapi.routing import APIRouter
from control_backend.api.v1.endpoints import message, robot, sse
from control_backend.api.v1.endpoints import logs, message, robot, sse
api_router = APIRouter()
@@ -9,3 +9,5 @@ api_router.include_router(message.router, tags=["Messages"])
api_router.include_router(sse.router, tags=["SSE"])
api_router.include_router(robot.router, prefix="/robot", tags=["Pings", "Commands"])
api_router.include_router(logs.router, tags=["Logs"])

View File

@@ -0,0 +1 @@
from .setup_logging import setup_logging as setup_logging

View File

@@ -0,0 +1,59 @@
import logging
import logging.config
import os
import yaml
import zmq
from control_backend.core.config import settings
def add_logging_level(level_name: str, level_num: int, method_name: str | None = None) -> None:
"""
Adds a logging level to the `logging` module and the
currently configured logging class.
"""
if not method_name:
method_name = level_name.lower()
if hasattr(logging, level_name):
raise AttributeError(f"{level_name} already defined in logging module")
if hasattr(logging, method_name):
raise AttributeError(f"{method_name} already defined in logging module")
if hasattr(logging.getLoggerClass(), method_name):
raise AttributeError(f"{method_name} already defined in logger class")
def log_for_level(self, message, *args, **kwargs):
if self.isEnabledFor(level_num):
self._log(level_num, message, args, **kwargs)
def log_to_root(message, *args, **kwargs):
logging.log(level_num, message, *args, **kwargs)
logging.addLevelName(level_num, level_name)
setattr(logging, level_name, level_num)
setattr(logging.getLoggerClass(), method_name, log_for_level)
setattr(logging, method_name, log_to_root)
def setup_logging(path: str = ".logging_config.yaml") -> None:
if os.path.exists(path):
with open(path) as f:
try:
config = yaml.safe_load(f.read())
except (AttributeError, yaml.YAMLError) as e:
logging.warning(f"Could not load logging configuration: {e}")
config = {}
if "custom_levels" in config:
for level_name, level_num in config["custom_levels"].items():
add_logging_level(level_name, level_num)
if config.get("handlers") is not None and config.get("handlers").get("ui"):
pub_socket = zmq.Context.instance().socket(zmq.PUB)
pub_socket.connect(settings.zmq_settings.internal_pub_address)
config["handlers"]["ui"]["interface_or_socket"] = pub_socket
logging.config.dictConfig(config)
else:
logging.warning("Logging config file not found. Using default logging configuration.")

View File

@@ -1,6 +1,3 @@
# Standard library imports
# External imports
import contextlib
import logging
import threading
@@ -10,17 +7,18 @@ from fastapi import FastAPI
from fastapi.middleware.cors import CORSMiddleware
from zmq.asyncio import Context
from control_backend.agents.bdi.bdi_core import BDICoreAgent
from control_backend.agents.bdi.text_extractor import TBeliefExtractor
from control_backend.agents.belief_collector.belief_collector import BeliefCollectorAgent
from control_backend.agents.llm.llm import LLMAgent
from control_backend.agents.ri_communication_agent import RICommunicationAgent
from control_backend.agents.vad_agent import VADAgent
from control_backend.agents import (
BeliefCollectorAgent,
LLMAgent,
RICommunicationAgent,
VADAgent,
)
from control_backend.agents.bdi import BDICoreAgent, TBeliefExtractorAgent
from control_backend.api.v1.router import api_router
from control_backend.core.config import settings
from control_backend.logging import setup_logging
logger = logging.getLogger(__name__)
logging.basicConfig(level=logging.DEBUG)
def setup_sockets():
@@ -44,7 +42,13 @@ def setup_sockets():
@contextlib.asynccontextmanager
async def lifespan(app: FastAPI):
logger.info("%s starting up.", app.title)
"""
Application lifespan context manager to handle startup and shutdown events.
"""
# --- APPLICATION STARTUP ---
setup_logging()
logger.info("%s is starting up.", app.title)
logger.warning("testing extra", extra={"extra1": "one", "extra2": "two"})
# Initiate sockets
proxy_thread = threading.Thread(target=setup_sockets)
@@ -57,52 +61,83 @@ async def lifespan(app: FastAPI):
endpoints_pub_socket.connect(settings.zmq_settings.internal_pub_address)
app.state.endpoints_pub_socket = endpoints_pub_socket
# Initiate agents
ri_communication_agent = RICommunicationAgent(
jid=settings.agent_settings.ri_communication_agent_name
+ "@"
+ settings.agent_settings.host,
password=settings.agent_settings.ri_communication_agent_name,
address="tcp://*:5555",
bind=True,
)
await ri_communication_agent.start()
# --- Initialize Agents ---
logger.info("Initializing and starting agents.")
agents_to_start = {
"RICommunicationAgent": (
RICommunicationAgent,
{
"name": settings.agent_settings.ri_communication_agent_name,
"jid": f"{settings.agent_settings.ri_communication_agent_name}"
f"@{settings.agent_settings.host}",
"password": settings.agent_settings.ri_communication_agent_name,
"address": "tcp://*:5555",
"bind": True,
},
),
"LLMAgent": (
LLMAgent,
{
"name": settings.agent_settings.llm_agent_name,
"jid": f"{settings.agent_settings.llm_agent_name}@{settings.agent_settings.host}",
"password": settings.agent_settings.llm_agent_name,
},
),
"BDICoreAgent": (
BDICoreAgent,
{
"name": settings.agent_settings.bdi_core_agent_name,
"jid": f"{settings.agent_settings.bdi_core_agent_name}@"
f"{settings.agent_settings.host}",
"password": settings.agent_settings.bdi_core_agent_name,
"asl": "src/control_backend/agents/bdi/rules.asl",
},
),
"BeliefCollectorAgent": (
BeliefCollectorAgent,
{
"name": settings.agent_settings.belief_collector_agent_name,
"jid": f"{settings.agent_settings.belief_collector_agent_name}@"
f"{settings.agent_settings.host}",
"password": settings.agent_settings.belief_collector_agent_name,
},
),
"TBeliefExtractor": (
TBeliefExtractorAgent,
{
"name": settings.agent_settings.text_belief_extractor_agent_name,
"jid": f"{settings.agent_settings.text_belief_extractor_agent_name}@"
f"{settings.agent_settings.host}",
"password": settings.agent_settings.text_belief_extractor_agent_name,
},
),
"VADAgent": (
VADAgent,
{"audio_in_address": "tcp://localhost:5558", "audio_in_bind": False},
),
}
llm_agent = LLMAgent(
settings.agent_settings.llm_agent_name + "@" + settings.agent_settings.host,
settings.agent_settings.llm_agent_name,
)
await llm_agent.start()
for name, (agent_class, kwargs) in agents_to_start.items():
try:
logger.debug("Starting agent: %s", name)
agent_instance = agent_class(**{k: v for k, v in kwargs.items() if k != "name"})
await agent_instance.start()
logger.info("Agent '%s' started successfully.", name)
except Exception as e:
logger.error("Failed to start agent '%s': %s", name, e, exc_info=True)
# Consider if the application should continue if an agent fails to start.
raise
bdi_core = BDICoreAgent(
settings.agent_settings.bdi_core_agent_name + "@" + settings.agent_settings.host,
settings.agent_settings.bdi_core_agent_name,
"src/control_backend/agents/bdi/rules.asl",
)
await bdi_core.start()
belief_collector = BeliefCollectorAgent(
settings.agent_settings.belief_collector_agent_name + "@" + settings.agent_settings.host,
settings.agent_settings.belief_collector_agent_name,
)
await belief_collector.start()
text_belief_extractor = TBeliefExtractor(
settings.agent_settings.text_belief_extractor_agent_name
+ "@"
+ settings.agent_settings.host,
settings.agent_settings.text_belief_extractor_agent_name,
)
await text_belief_extractor.start()
_temp_vad_agent = VADAgent("tcp://localhost:5558", False)
await _temp_vad_agent.start()
logger.info("VAD agent started, now making ready...")
await _temp_vad_agent.streaming_behaviour.reset()
logger.info("Application startup complete.")
yield
logger.info("%s shutting down.", app.title)
# --- APPLICATION SHUTDOWN ---
logger.info("%s is shutting down.", app.title)
# Potential shutdown logic goes here
logger.info("Application shutdown complete.")
# if __name__ == "__main__":

View File

@@ -49,6 +49,7 @@ async def test_real_audio(mocker):
vad_streamer = Streaming(audio_in_socket, audio_out_socket)
vad_streamer._ready = True
vad_streamer.agent = MagicMock()
for _ in audio_chunks:
await vad_streamer.run()

View File

@@ -45,22 +45,6 @@ def create_mock_message(sender_node: str, body: str, thread: str) -> MagicMock:
return msg
@pytest.mark.asyncio
async def test_run_no_message_received(belief_setter, mocker):
"""
Test that when no message is received, _process_message is not called.
"""
# Arrange
belief_setter.receive.return_value = None
mocker.patch.object(belief_setter, "_process_message")
# Act
await belief_setter.run()
# Assert
belief_setter._process_message.assert_not_called()
@pytest.mark.asyncio
async def test_run_message_received(belief_setter, mocker):
"""
@@ -137,12 +121,10 @@ def test_process_belief_message_invalid_json(belief_setter, mocker, caplog):
mock_set_beliefs = mocker.patch.object(belief_setter, "_set_beliefs")
# Act
with caplog.at_level(logging.ERROR):
belief_setter._process_belief_message(msg)
belief_setter._process_belief_message(msg)
# Assert
mock_set_beliefs.assert_not_called()
assert "Could not decode beliefs into JSON format" in caplog.text
def test_process_belief_message_wrong_thread(belief_setter, mocker):
@@ -199,10 +181,6 @@ def test_set_beliefs_success(belief_setter, mock_agent, caplog):
mock_agent.bdi.set_belief.assert_has_calls(expected_calls, any_order=True)
assert mock_agent.bdi.set_belief.call_count == 2
# Check logs
assert "Set belief is_hot with arguments ['kitchen']" in caplog.text
assert "Set belief door_opened with arguments ['front_door', 'back_door']" in caplog.text
# def test_responded_unset(belief_setter, mock_agent):
# # Arrange

View File

@@ -8,6 +8,14 @@ from control_backend.agents.belief_collector.behaviours.continuous_collect impor
)
def create_mock_message(sender_node: str, body: str) -> MagicMock:
"""Helper function to create a configured mock message."""
msg = MagicMock()
msg.sender.node = sender_node # MagicMock automatically creates nested mocks
msg.body = body
return msg
@pytest.fixture
def mock_agent(mocker):
"""Fixture to create a mock Agent."""
@@ -29,22 +37,6 @@ def continuous_collector(mock_agent, mocker):
return collector
@pytest.mark.asyncio
async def test_run_no_message_received(continuous_collector, mocker):
"""
Test that when no message is received, _process_message is not called.
"""
# Arrange
continuous_collector.receive.return_value = None
mocker.patch.object(continuous_collector, "_process_message")
# Act
await continuous_collector.run()
# Assert
continuous_collector._process_message.assert_not_called()
@pytest.mark.asyncio
async def test_run_message_received(continuous_collector, mocker):
"""
@@ -62,48 +54,12 @@ async def test_run_message_received(continuous_collector, mocker):
continuous_collector._process_message.assert_awaited_once_with(mock_msg)
@pytest.mark.asyncio
async def test_process_message_invalid(continuous_collector, mocker):
"""
Test that when an invalid JSON message is received, a warning is logged and processing stops.
"""
# Arrange
invalid_json = "this is not json"
msg = MagicMock()
msg.body = invalid_json
msg.sender = "belief_text_agent_mock@test"
logger_mock = mocker.patch(
"control_backend.agents.belief_collector.behaviours.continuous_collect.logger"
)
# Act
await continuous_collector._process_message(msg)
# Assert
logger_mock.warning.assert_called_once()
def test_get_sender_from_message(continuous_collector):
"""
Test that _sender_node correctly extracts the sender node from the message JID.
"""
# Arrange
msg = MagicMock()
msg.sender = "agent_node@host/resource"
# Act
sender_node = continuous_collector._sender_node(msg)
# Assert
assert sender_node == "agent_node"
@pytest.mark.asyncio
async def test_routes_to_handle_belief_text_by_type(continuous_collector, mocker):
msg = MagicMock()
msg.body = json.dumps({"type": "belief_extraction_text", "beliefs": {"user_said": [["hi"]]}})
msg.sender = "anyone@test"
msg = create_mock_message(
"anyone",
json.dumps({"type": "belief_extraction_text", "beliefs": {"user_said": [["hi"]]}}),
)
spy = mocker.patch.object(continuous_collector, "_handle_belief_text", new=AsyncMock())
await continuous_collector._process_message(msg)
spy.assert_awaited_once()
@@ -111,9 +67,9 @@ async def test_routes_to_handle_belief_text_by_type(continuous_collector, mocker
@pytest.mark.asyncio
async def test_routes_to_handle_belief_text_by_sender(continuous_collector, mocker):
msg = MagicMock()
msg.body = json.dumps({"beliefs": {"user_said": [["hi"]]}}) # no type
msg.sender = "belief_text_agent_mock@test"
msg = create_mock_message(
"belief_text_agent_mock", json.dumps({"beliefs": {"user_said": [["hi"]]}})
)
spy = mocker.patch.object(continuous_collector, "_handle_belief_text", new=AsyncMock())
await continuous_collector._process_message(msg)
spy.assert_awaited_once()
@@ -121,117 +77,22 @@ async def test_routes_to_handle_belief_text_by_sender(continuous_collector, mock
@pytest.mark.asyncio
async def test_routes_to_handle_emo_text(continuous_collector, mocker):
msg = MagicMock()
msg.body = json.dumps({"type": "emotion_extraction_text"})
msg.sender = "anyone@test"
msg = create_mock_message("anyone", json.dumps({"type": "emotion_extraction_text"}))
spy = mocker.patch.object(continuous_collector, "_handle_emo_text", new=AsyncMock())
await continuous_collector._process_message(msg)
spy.assert_awaited_once()
@pytest.mark.asyncio
async def test_unrecognized_message_logs_info(continuous_collector, mocker):
msg = MagicMock()
msg.body = json.dumps({"type": "something_else"})
msg.sender = "x@test"
logger_mock = mocker.patch(
"control_backend.agents.belief_collector.behaviours.continuous_collect.logger"
)
await continuous_collector._process_message(msg)
logger_mock.info.assert_any_call(
"BeliefCollector: unrecognized message (sender=%s, type=%r). Ignoring.",
"x",
"something_else",
)
@pytest.mark.asyncio
async def test_belief_text_no_beliefs(continuous_collector, mocker):
msg_payload = {"type": "belief_extraction_text"} # no 'beliefs'
logger_mock = mocker.patch(
"control_backend.agents.belief_collector.behaviours.continuous_collect.logger"
)
await continuous_collector._handle_belief_text(msg_payload, "origin_node")
logger_mock.info.assert_any_call("BeliefCollector: no beliefs to process.")
@pytest.mark.asyncio
async def test_belief_text_beliefs_not_dict(continuous_collector, mocker):
payload = {"type": "belief_extraction_text", "beliefs": ["not", "a", "dict"]}
logger_mock = mocker.patch(
"control_backend.agents.belief_collector.behaviours.continuous_collect.logger"
)
await continuous_collector._handle_belief_text(payload, "origin")
logger_mock.warning.assert_any_call(
"BeliefCollector: 'beliefs' is not a dict: %r", ["not", "a", "dict"]
)
@pytest.mark.asyncio
async def test_belief_text_values_not_lists(continuous_collector, mocker):
payload = {"type": "belief_extraction_text", "beliefs": {"user_said": "not-a-list"}}
logger_mock = mocker.patch(
"control_backend.agents.belief_collector.behaviours.continuous_collect.logger"
)
await continuous_collector._handle_belief_text(payload, "origin")
logger_mock.warning.assert_any_call(
"BeliefCollector: 'beliefs' values are not all lists: %r", {"user_said": "not-a-list"}
)
@pytest.mark.asyncio
async def test_belief_text_happy_path_logs_items_and_sends(continuous_collector, mocker):
async def test_belief_text_happy_path_sends(continuous_collector, mocker):
payload = {"type": "belief_extraction_text", "beliefs": {"user_said": ["hello test", "No"]}}
continuous_collector.send = AsyncMock()
logger_mock = mocker.patch(
"control_backend.agents.belief_collector.behaviours.continuous_collect.logger"
)
await continuous_collector._handle_belief_text(payload, "belief_text_agent_mock")
logger_mock.info.assert_any_call("BeliefCollector: forwarding %d beliefs.", 1)
# and the item logs:
logger_mock.info.assert_any_call(" - %s %s", "user_said", "hello test")
logger_mock.info.assert_any_call(" - %s %s", "user_said", "No")
# make sure we attempted a send
continuous_collector.send.assert_awaited_once()
@pytest.mark.asyncio
async def test_send_beliefs_noop_on_empty(continuous_collector):
continuous_collector.send = AsyncMock()
await continuous_collector._send_beliefs_to_bdi([], origin="o")
continuous_collector.send.assert_not_awaited()
# @pytest.mark.asyncio
# async def test_send_beliefs_sends_json_packet(continuous_collector):
# # Patch .send and capture the message body
# sent = {}
#
# async def _fake_send(msg):
# sent["body"] = msg.body
# sent["to"] = str(msg.to)
#
# continuous_collector.send = AsyncMock(side_effect=_fake_send)
# beliefs = ["user_said hello", "user_said No"]
# await continuous_collector._send_beliefs_to_bdi(beliefs, origin="origin_node")
#
# assert "belief_packet" in json.loads(sent["body"])["type"]
# assert json.loads(sent["body"])["beliefs"] == beliefs
def test_sender_node_no_sender_returns_literal(continuous_collector):
msg = MagicMock()
msg.sender = None
assert continuous_collector._sender_node(msg) == "no_sender"
def test_sender_node_without_at(continuous_collector):
msg = MagicMock()
msg.sender = "localpartonly"
assert continuous_collector._sender_node(msg) == "localpartonly"
@pytest.mark.asyncio
async def test_belief_text_coerces_non_strings(continuous_collector, mocker):
payload = {"type": "belief_extraction_text", "beliefs": {"user_said": [["hi", 123]]}}

View File

@@ -17,12 +17,21 @@ def audio_out_socket():
@pytest.fixture
def streaming(audio_in_socket, audio_out_socket):
def mock_agent(mocker):
"""Fixture to create a mock BDIAgent."""
agent = MagicMock()
agent.jid = "vad_agent@test"
return agent
@pytest.fixture
def streaming(audio_in_socket, audio_out_socket, mock_agent):
import torch
torch.hub.load.return_value = (..., ...) # Mock
streaming = Streaming(audio_in_socket, audio_out_socket)
streaming._ready = True
streaming.agent = mock_agent
return streaming

View File

@@ -1,7 +1,9 @@
import numpy as np
from control_backend.agents.transcription import SpeechRecognizer
from control_backend.agents.transcription.speech_recognizer import OpenAIWhisperSpeechRecognizer
from control_backend.agents.transcription.speech_recognizer import (
OpenAIWhisperSpeechRecognizer,
SpeechRecognizer,
)
def test_estimate_max_tokens():

View File

@@ -8,6 +8,9 @@ def pytest_configure(config):
collected. It mocks heavy or unavailable modules to prevent ImportErrors.
"""
# --- Mock spade and spade-bdi ---
mock_agentspeak = MagicMock()
mock_httpx = MagicMock()
mock_pydantic = MagicMock()
mock_spade = MagicMock()
mock_spade.agent = MagicMock()
mock_spade.behaviour = MagicMock()
@@ -19,6 +22,9 @@ def pytest_configure(config):
mock_spade.behaviour.CyclicBehaviour = type("CyclicBehaviour", (object,), {})
mock_spade_bdi.bdi.BDIAgent = type("BDIAgent", (object,), {})
sys.modules["agentspeak"] = mock_agentspeak
sys.modules["httpx"] = mock_httpx
sys.modules["pydantic"] = mock_pydantic
sys.modules["spade"] = mock_spade
sys.modules["spade.agent"] = mock_spade.agent
sys.modules["spade.behaviour"] = mock_spade.behaviour

31
uv.lock generated
View File

@@ -313,6 +313,18 @@ wheels = [
{ url = "https://files.pythonhosted.org/packages/a7/06/3d6badcf13db419e25b07041d9c7b4a2c331d3f4e7134445ec5df57714cd/coloredlogs-15.0.1-py2.py3-none-any.whl", hash = "sha256:612ee75c546f53e92e70049c9dbfcc18c935a2b9a53b66085ce9ef6a6e5c0934", size = 46018, upload-time = "2021-06-11T10:22:42.561Z" },
]
[[package]]
name = "colorlog"
version = "6.10.1"
source = { registry = "https://pypi.org/simple" }
dependencies = [
{ name = "colorama", marker = "sys_platform == 'win32'" },
]
sdist = { url = "https://files.pythonhosted.org/packages/a2/61/f083b5ac52e505dfc1c624eafbf8c7589a0d7f32daa398d2e7590efa5fda/colorlog-6.10.1.tar.gz", hash = "sha256:eb4ae5cb65fe7fec7773c2306061a8e63e02efc2c72eba9d27b0fa23c94f1321", size = 17162, upload-time = "2025-10-16T16:14:11.978Z" }
wheels = [
{ url = "https://files.pythonhosted.org/packages/6d/c1/e419ef3723a074172b68aaa89c9f3de486ed4c2399e2dbd8113a4fdcaf9e/colorlog-6.10.1-py3-none-any.whl", hash = "sha256:2d7e8348291948af66122cff006c9f8da6255d224e7cf8e37d8de2df3bad8c9c", size = 11743, upload-time = "2025-10-16T16:14:10.512Z" },
]
[[package]]
name = "coverage"
version = "7.11.0"
@@ -616,6 +628,8 @@ wheels = [
{ url = "https://files.pythonhosted.org/packages/ee/43/3cecdc0349359e1a527cbf2e3e28e5f8f06d3343aaf82ca13437a9aa290f/greenlet-3.2.4-cp313-cp313-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:23768528f2911bcd7e475210822ffb5254ed10d71f4028387e5a99b4c6699671", size = 610497, upload-time = "2025-08-07T13:18:31.636Z" },
{ url = "https://files.pythonhosted.org/packages/b8/19/06b6cf5d604e2c382a6f31cafafd6f33d5dea706f4db7bdab184bad2b21d/greenlet-3.2.4-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:00fadb3fedccc447f517ee0d3fd8fe49eae949e1cd0f6a611818f4f6fb7dc83b", size = 1121662, upload-time = "2025-08-07T13:42:41.117Z" },
{ url = "https://files.pythonhosted.org/packages/a2/15/0d5e4e1a66fab130d98168fe984c509249c833c1a3c16806b90f253ce7b9/greenlet-3.2.4-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:d25c5091190f2dc0eaa3f950252122edbbadbb682aa7b1ef2f8af0f8c0afefae", size = 1149210, upload-time = "2025-08-07T13:18:24.072Z" },
{ url = "https://files.pythonhosted.org/packages/1c/53/f9c440463b3057485b8594d7a638bed53ba531165ef0ca0e6c364b5cc807/greenlet-3.2.4-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:6e343822feb58ac4d0a1211bd9399de2b3a04963ddeec21530fc426cc121f19b", size = 1564759, upload-time = "2025-11-04T12:42:19.395Z" },
{ url = "https://files.pythonhosted.org/packages/47/e4/3bb4240abdd0a8d23f4f88adec746a3099f0d86bfedb623f063b2e3b4df0/greenlet-3.2.4-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:ca7f6f1f2649b89ce02f6f229d7c19f680a6238af656f61e0115b24857917929", size = 1634288, upload-time = "2025-11-04T12:42:21.174Z" },
{ url = "https://files.pythonhosted.org/packages/0b/55/2321e43595e6801e105fcfdee02b34c0f996eb71e6ddffca6b10b7e1d771/greenlet-3.2.4-cp313-cp313-win_amd64.whl", hash = "sha256:554b03b6e73aaabec3745364d6239e9e012d64c68ccd0b8430c64ccc14939a8b", size = 299685, upload-time = "2025-08-07T13:24:38.824Z" },
{ url = "https://files.pythonhosted.org/packages/22/5c/85273fd7cc388285632b0498dbbab97596e04b154933dfe0f3e68156c68c/greenlet-3.2.4-cp314-cp314-macosx_11_0_universal2.whl", hash = "sha256:49a30d5fda2507ae77be16479bdb62a660fa51b1eb4928b524975b3bde77b3c0", size = 273586, upload-time = "2025-08-07T13:16:08.004Z" },
{ url = "https://files.pythonhosted.org/packages/d1/75/10aeeaa3da9332c2e761e4c50d4c3556c21113ee3f0afa2cf5769946f7a3/greenlet-3.2.4-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:299fd615cd8fc86267b47597123e3f43ad79c9d8a22bebdce535e53550763e2f", size = 686346, upload-time = "2025-08-07T13:42:59.944Z" },
@@ -623,6 +637,8 @@ wheels = [
{ url = "https://files.pythonhosted.org/packages/dc/8b/29aae55436521f1d6f8ff4e12fb676f3400de7fcf27fccd1d4d17fd8fecd/greenlet-3.2.4-cp314-cp314-manylinux2014_s390x.manylinux_2_17_s390x.whl", hash = "sha256:b4a1870c51720687af7fa3e7cda6d08d801dae660f75a76f3845b642b4da6ee1", size = 694659, upload-time = "2025-08-07T13:53:17.759Z" },
{ url = "https://files.pythonhosted.org/packages/92/2e/ea25914b1ebfde93b6fc4ff46d6864564fba59024e928bdc7de475affc25/greenlet-3.2.4-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:061dc4cf2c34852b052a8620d40f36324554bc192be474b9e9770e8c042fd735", size = 695355, upload-time = "2025-08-07T13:18:34.517Z" },
{ url = "https://files.pythonhosted.org/packages/72/60/fc56c62046ec17f6b0d3060564562c64c862948c9d4bc8aa807cf5bd74f4/greenlet-3.2.4-cp314-cp314-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:44358b9bf66c8576a9f57a590d5f5d6e72fa4228b763d0e43fee6d3b06d3a337", size = 657512, upload-time = "2025-08-07T13:18:33.969Z" },
{ url = "https://files.pythonhosted.org/packages/23/6e/74407aed965a4ab6ddd93a7ded3180b730d281c77b765788419484cdfeef/greenlet-3.2.4-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:2917bdf657f5859fbf3386b12d68ede4cf1f04c90c3a6bc1f013dd68a22e2269", size = 1612508, upload-time = "2025-11-04T12:42:23.427Z" },
{ url = "https://files.pythonhosted.org/packages/0d/da/343cd760ab2f92bac1845ca07ee3faea9fe52bee65f7bcb19f16ad7de08b/greenlet-3.2.4-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:015d48959d4add5d6c9f6c5210ee3803a830dce46356e3bc326d6776bde54681", size = 1680760, upload-time = "2025-11-04T12:42:25.341Z" },
{ url = "https://files.pythonhosted.org/packages/e3/a5/6ddab2b4c112be95601c13428db1d8b6608a8b6039816f2ba09c346c08fc/greenlet-3.2.4-cp314-cp314-win_amd64.whl", hash = "sha256:e37ab26028f12dbb0ff65f29a8d3d44a765c61e729647bf2ddfbbed621726f01", size = 303425, upload-time = "2025-08-07T13:32:27.59Z" },
]
@@ -1330,6 +1346,7 @@ name = "pepperplus-cb"
version = "0.1.0"
source = { virtual = "." }
dependencies = [
{ name = "colorlog" },
{ name = "fastapi", extra = ["all"] },
{ name = "mlx-whisper", marker = "sys_platform == 'darwin'" },
{ name = "numpy" },
@@ -1341,6 +1358,8 @@ dependencies = [
{ name = "pytest-asyncio" },
{ name = "pytest-cov" },
{ name = "pytest-mock" },
{ name = "python-json-logger" },
{ name = "pyyaml" },
{ name = "pyzmq" },
{ name = "silero-vad" },
{ name = "spade" },
@@ -1368,6 +1387,7 @@ test = [
[package.metadata]
requires-dist = [
{ name = "colorlog", specifier = ">=6.10.1" },
{ name = "fastapi", extras = ["all"], specifier = ">=0.115.6" },
{ name = "mlx-whisper", marker = "sys_platform == 'darwin'", specifier = ">=0.4.3" },
{ name = "numpy", specifier = ">=2.3.3" },
@@ -1379,6 +1399,8 @@ requires-dist = [
{ name = "pytest-asyncio", specifier = ">=1.2.0" },
{ name = "pytest-cov", specifier = ">=7.0.0" },
{ name = "pytest-mock", specifier = ">=3.15.1" },
{ name = "python-json-logger", specifier = ">=4.0.0" },
{ name = "pyyaml", specifier = ">=6.0.3" },
{ name = "pyzmq", specifier = ">=27.1.0" },
{ name = "silero-vad", specifier = ">=6.0.0" },
{ name = "spade", specifier = ">=4.1.0" },
@@ -1815,6 +1837,15 @@ wheels = [
{ url = "https://files.pythonhosted.org/packages/5f/ed/539768cf28c661b5b068d66d96a2f155c4971a5d55684a514c1a0e0dec2f/python_dotenv-1.1.1-py3-none-any.whl", hash = "sha256:31f23644fe2602f88ff55e1f5c79ba497e01224ee7737937930c448e4d0e24dc", size = 20556, upload-time = "2025-06-24T04:21:06.073Z" },
]
[[package]]
name = "python-json-logger"
version = "4.0.0"
source = { registry = "https://pypi.org/simple" }
sdist = { url = "https://files.pythonhosted.org/packages/29/bf/eca6a3d43db1dae7070f70e160ab20b807627ba953663ba07928cdd3dc58/python_json_logger-4.0.0.tar.gz", hash = "sha256:f58e68eb46e1faed27e0f574a55a0455eecd7b8a5b88b85a784519ba3cff047f", size = 17683, upload-time = "2025-10-06T04:15:18.984Z" }
wheels = [
{ url = "https://files.pythonhosted.org/packages/51/e5/fecf13f06e5e5f67e8837d777d1bc43fac0ed2b77a676804df5c34744727/python_json_logger-4.0.0-py3-none-any.whl", hash = "sha256:af09c9daf6a813aa4cc7180395f50f2a9e5fa056034c9953aec92e381c5ba1e2", size = 15548, upload-time = "2025-10-06T04:15:17.553Z" },
]
[[package]]
name = "python-multipart"
version = "0.0.20"