refactor: rename all agents and improve structure pt1

ref: N25B-257
This commit is contained in:
Björn Otgaar
2025-11-12 11:04:49 +01:00
parent 781a05328f
commit 0e45383027
37 changed files with 199 additions and 201 deletions

View File

@@ -1,2 +0,0 @@
from .bdi_core import BDICoreAgent as BDICoreAgent
from .text_extractor import TBeliefExtractorAgent as TBeliefExtractorAgent

View File

@@ -1,67 +0,0 @@
import logging
import agentspeak
from spade.behaviour import OneShotBehaviour
from spade.message import Message
from spade_bdi.bdi import BDIAgent
from control_backend.core.config import settings
from .behaviours.belief_setter import BeliefSetterBehaviour
from .behaviours.receive_llm_resp_behaviour import ReceiveLLMResponseBehaviour
class BDICoreAgent(BDIAgent):
"""
This is the Brain agent that does the belief inference with AgentSpeak.
This is a continous process that happens automatically in the background.
This class contains all the actions that can be called from AgentSpeak plans.
It has the BeliefSetter behaviour and can aks and recieve requests from the LLM agent.
"""
logger = logging.getLogger(__package__).getChild(__name__)
async def setup(self) -> None:
"""
Initializes belief behaviors and message routing.
"""
self.logger.info("BDICoreAgent setup started.")
self.add_behaviour(BeliefSetterBehaviour())
self.add_behaviour(ReceiveLLMResponseBehaviour())
self.logger.info("BDICoreAgent setup complete.")
def add_custom_actions(self, actions) -> None:
"""
Registers custom AgentSpeak actions callable from plans.
"""
@actions.add(".reply", 1)
def _reply(agent: "BDICoreAgent", term, intention):
"""
Sends text to the LLM (AgentSpeak action).
Example: .reply("Hello LLM!")
"""
message_text = agentspeak.grounded(term.args[0], intention.scope)
self.logger.debug("Reply action sending: %s", message_text)
self._send_to_llm(str(message_text))
yield
def _send_to_llm(self, text: str):
"""
Sends a text query to the LLM Agent asynchronously.
"""
class SendBehaviour(OneShotBehaviour):
async def run(self) -> None:
msg = Message(
to=settings.agent_settings.llm_agent_name + "@" + settings.agent_settings.host,
body=text,
)
await self.send(msg)
self.agent.logger.info("Message sent to LLM agent: %s", text)
self.add_behaviour(SendBehaviour())

View File

@@ -1,85 +0,0 @@
import json
from spade.agent import Message
from spade.behaviour import CyclicBehaviour
from spade_bdi.bdi import BDIAgent
from control_backend.core.config import settings
class BeliefSetterBehaviour(CyclicBehaviour):
"""
This is the behaviour that the BDI agent runs. This behaviour waits for incoming
message and updates the agent's beliefs accordingly.
"""
agent: BDIAgent
async def run(self):
"""Polls for messages and processes them."""
msg = await self.receive()
self.agent.logger.debug(
"Received message from %s with thread '%s' and body: %s",
msg.sender,
msg.thread,
msg.body,
)
self._process_message(msg)
def _process_message(self, message: Message):
"""Routes the message to the correct processing function based on the sender."""
sender = message.sender.node # removes host from jid and converts to str
self.agent.logger.debug("Processing message from sender: %s", sender)
match sender:
case settings.agent_settings.belief_collector_agent_name:
self.agent.logger.debug(
"Message is from the belief collector agent. Processing as belief message."
)
self._process_belief_message(message)
case _:
self.agent.logger.debug("Not the belief agent, discarding message")
pass
def _process_belief_message(self, message: Message):
if not message.body:
self.agent.logger.debug("Ignoring message with empty body from %s", message.sender.node)
return
match message.thread:
case "beliefs":
try:
beliefs: dict[str, list[str]] = json.loads(message.body)
self._set_beliefs(beliefs)
except json.JSONDecodeError:
self.agent.logger.error(
"Could not decode beliefs from JSON. Message body: '%s'",
message.body,
exc_info=True,
)
case _:
pass
def _set_beliefs(self, beliefs: dict[str, list[str]]):
"""Removes previous values for beliefs and updates them with the provided values."""
if self.agent.bdi is None:
self.agent.logger.warning("Cannot set beliefs; agent's BDI is not yet initialized.")
return
if not beliefs:
self.agent.logger.debug("Received an empty set of beliefs. No beliefs were updated.")
return
# Set new beliefs (outdated beliefs are automatically removed)
for belief, arguments in beliefs.items():
self.agent.logger.debug("Setting belief %s with arguments %s", belief, arguments)
self.agent.bdi.set_belief(belief, *arguments)
# Special case: if there's a new user message, flag that we haven't responded yet
if belief == "user_said":
self.agent.bdi.set_belief("new_message")
self.agent.logger.debug(
"Detected 'user_said' belief, also setting 'new_message' belief."
)
self.agent.logger.info("Successfully updated %d beliefs.", len(beliefs))

View File

@@ -1,37 +0,0 @@
from spade.behaviour import CyclicBehaviour
from spade.message import Message
from control_backend.core.config import settings
from control_backend.schemas.ri_message import SpeechCommand
class ReceiveLLMResponseBehaviour(CyclicBehaviour):
"""
Adds behavior to receive responses from the LLM Agent.
"""
async def run(self):
msg = await self.receive()
sender = msg.sender.node
match sender:
case settings.agent_settings.llm_agent_name:
content = msg.body
self.agent.logger.info("Received LLM response: %s", content)
speech_command = SpeechCommand(data=content)
message = Message(
to=settings.agent_settings.ri_command_agent_name
+ "@"
+ settings.agent_settings.host,
sender=self.agent.jid,
body=speech_command.model_dump_json(),
)
self.agent.logger.debug("Sending message: %s", message)
await self.send(message)
case _:
self.agent.logger.debug("Discarding message from %s", sender)
pass

View File

@@ -1,104 +0,0 @@
import json
import logging
from spade.behaviour import CyclicBehaviour
from spade.message import Message
from control_backend.core.config import settings
class BeliefFromText(CyclicBehaviour):
logger = logging.getLogger(__name__)
# TODO: LLM prompt nog hardcoded
llm_instruction_prompt = """
You are an information extraction assistent for a BDI agent. Your task is to extract values \
from a user's text to bind a list of ungrounded beliefs. Rules:
You will receive a JSON object with "beliefs" (a list of ungrounded AgentSpeak beliefs) \
and "text" (user's transcript).
Analyze the text to find values that sematically match the variables (X,Y,Z) in the beliefs.
A single piece of text might contain multiple instances that match a belief.
Respond ONLY with a single JSON object.
The JSON object's keys should be the belief functors (e.g., "weather").
The value for each key must be a list of lists.
Each inner list must contain the extracted arguments (as strings) for one instance \
of that belief.
CRITICAL: If no information in the text matches a belief, DO NOT include that key \
in your response.
"""
# on_start agent receives message containing the beliefs to look out for and
# sets up the LLM with instruction prompt
# async def on_start(self):
# msg = await self.receive(timeout=0.1)
# self.beliefs = dict uit message
# send instruction prompt to LLM
beliefs: dict[str, list[str]]
beliefs = {"mood": ["X"], "car": ["Y"]}
async def run(self):
msg = await self.receive()
if msg is None:
return
sender = msg.sender.node
match sender:
case settings.agent_settings.transcription_agent_name:
self.logger.debug("Received text from transcriber: %s", msg.body)
await self._process_transcription_demo(msg.body)
case _:
self.logger.info("Discarding message from %s", sender)
pass
async def _process_transcription(self, text: str):
text_prompt = f"Text: {text}"
beliefs_prompt = "These are the beliefs to be bound:\n"
for belief, values in self.beliefs.items():
beliefs_prompt += f"{belief}({', '.join(values)})\n"
prompt = text_prompt + beliefs_prompt
self.logger.info(prompt)
# prompt_msg = Message(to="LLMAgent@whatever")
# response = self.send(prompt_msg)
# Mock response; response is beliefs in JSON format, it parses do dict[str,list[list[str]]]
response = '{"mood": [["happy"]]}'
# Verify by trying to parse
try:
json.loads(response)
belief_message = Message()
belief_message.to = (
settings.agent_settings.belief_collector_agent_name
+ "@"
+ settings.agent_settings.host
)
belief_message.body = response
belief_message.thread = "beliefs"
await self.send(belief_message)
self.agent.logger.info("Sent beliefs to BDI.")
except json.JSONDecodeError:
# Parsing failed, so the response is in the wrong format, log warning
self.agent.logger.warning("Received LLM response in incorrect format.")
async def _process_transcription_demo(self, txt: str):
"""
Demo version to process the transcription input to beliefs. For the demo only the belief
'user_said' is relevant, so this function simply makes a dict with key: "user_said",
value: txt and passes this to the Belief Collector agent.
"""
belief = {"beliefs": {"user_said": [txt]}, "type": "belief_extraction_text"}
payload = json.dumps(belief)
belief_msg = Message()
belief_msg.to = (
settings.agent_settings.belief_collector_agent_name + "@" + settings.agent_settings.host
)
belief_msg.body = payload
belief_msg.thread = "beliefs"
await self.send(belief_msg)
self.logger.info("Sent %d beliefs to the belief collector.", len(belief["beliefs"]))

View File

@@ -1,3 +0,0 @@
+new_message : user_said(Message) <-
-new_message;
.reply(Message).

View File

@@ -1,8 +0,0 @@
from control_backend.agents.base import BaseAgent
from .behaviours.text_belief_extractor import BeliefFromText
class TBeliefExtractorAgent(BaseAgent):
async def setup(self):
self.add_behaviour(BeliefFromText())