feat: implemented basic belief-from-text extractor

The communication with other agents has been tested with mock data as the other agents (transcriber and belief collector) are not yet implemented.

ref: N25B-208
This commit is contained in:
Storm
2025-10-24 17:25:25 +02:00
parent 9b36982bf2
commit bece44bf7d
4 changed files with 119 additions and 1 deletions

View File

@@ -0,0 +1,76 @@
import asyncio
from spade.behaviour import CyclicBehaviour
import logging
from spade.message import Message
import json
from control_backend.core.config import settings
class BeliefFromText(CyclicBehaviour):
logger = logging.getLogger("Belief From Text")
# TODO: LLM prompt nog hardcoded
llm_instruction_prompt = """
You are an information extraction assistent for a BDI agent. Your task is to extract values from
a user's text to bind a list of ungrounded beliefs. Rules:
You will receive a JSON object with "beliefs" (a list of ungrounded AgentSpeak beliefs) and "text" (user's transcript).
Analyze the text to find values that sematically match the variables (X,Y,Z) in the beliefs.
A single piece of text might contain multiple instances that match a belief.
Respond ONLY with a single JSON object.
The JSON object's keys should be the belief functors (e.g., "weather").
The value for each key must be a list of lists.
Each inner list must contain the extracted arguments (as strings) for one instance of that belief.
CRITICAL: If no information in the text matches a belief, DO NOT include that key in your response.
"""
# on_start agent receives message containing the beliefs to look out for and sets up the LLM with instruction prompt
#async def on_start(self):
# msg = await self.receive(timeout=0.1)
# self.beliefs = dict uit message
# send instruction prompt to LLM
beliefs: dict[str,list[str]]
beliefs = {
"mood": ["X"],
"car": ["Y"]
}
async def run(self):
msg = await self.receive(timeout=0.1)
if msg:
sender = msg.sender.node
match sender:
# TODO: Change to Transcriber agent name once implemented
case settings.agent_settings.test_agent_name:
self.logger.info("Received text from transcriber.")
await self._process_transcription(msg.body)
case _:
self.logger.info("Received message from other agent.")
pass
await asyncio.sleep(1)
async def _process_transcription(self,text: str):
text_prompt = f"Text: {text}"
beliefs_prompt = "These are the beliefs to be bound:\n"
for belief, values in self.beliefs.items():
beliefs_prompt += f"{belief}({', '.join(values)})\n"
prompt = text_prompt + beliefs_prompt
self.logger.info(prompt)
#prompt_msg = Message(to="LLMAgent@whatever")
#response = self.send(prompt_msg)
# Mock response; response is beliefs in JSON format, it parses do dict[str,list[list[str]]]
response = '{"mood": [["happy"]]}'
# Verify by trying to parse
try:
json.loads(response)
belief_message = Message(to=settings.agent_settings.bdi_core_agent_name + '@' + settings.agent_settings.host, body=response)
belief_message.thread = "beliefs"
await self.send(belief_message)
self.logger.info("Sent beliefs to BDI.")
except:
#loading failed so the response is in wrong format, throw warning (let LLM respond to ask again?)
self.logger.warning("Received LLM response in incorrect format.")

View File

@@ -0,0 +1,26 @@
import spade
from spade.agent import Agent
from spade.behaviour import OneShotBehaviour
from spade.message import Message
from spade.template import Template
from control_backend.core.config import AgentSettings, settings
class SenderAgent(Agent):
class InformBehav(OneShotBehaviour):
async def run(self):
msg = Message(to=settings.agent_settings.text_belief_extractor_agent_name + '@' + settings.agent_settings.host) # Instantiate the message
msg.body = "This is a test input to extract beliefs from.\n" # Set the message content
await self.send(msg)
print("Message sent!")
# set exit_code for the behaviour
self.exit_code = "Job Finished!"
# stop agent from behaviour
await self.agent.stop()
async def setup(self):
print("SenderAgent started")
self.b = self.InformBehav()
self.add_behaviour(self.b)

View File

@@ -0,0 +1,10 @@
import spade
from spade.agent import Agent
import logging
from control_backend.agents.bdi.behaviours.text_belief_extractor import BeliefFromText
class TBeliefExtractor(Agent):
async def setup(self):
self.b = BeliefFromText()
self.add_behaviour(self.b)

View File

@@ -13,6 +13,8 @@ import zmq
# Internal imports
from control_backend.agents.bdi.bdi_core import BDICoreAgent
from control_backend.agents.bdi.text_extractor import TBeliefExtractor
from control_backend.agents.bdi.test_agent import SenderAgent
from control_backend.api.v1.router import api_router
from control_backend.core.config import AgentSettings, settings
from control_backend.core.zmq_context import context
@@ -32,8 +34,12 @@ async def lifespan(app: FastAPI):
logger.info("Internal publishing socket bound to %s", internal_comm_socket)
# Initiate agents
bdi_core = BDICoreAgent(settings.agent_settings.bdi_core_agent_name + '@' + settings.agent_settings.host, settings.agent_settings.bdi_core_agent_name, "src/control_backend/agents/bdi/rules.asl")
bdi_core = BDICoreAgent(settings.agent_settings.bdi_core_agent_name + '@' + settings.agent_settings.host, "pohpu7-huqsyH-qutduk", "src/control_backend/agents/bdi/rules.asl")
await bdi_core.start()
text_belief_extractor = TBeliefExtractor(settings.agent_settings.text_belief_extractor_agent_name + '@' + settings.agent_settings.host, "pohpu7-huqsyH-qutduk")
await text_belief_extractor.start()
test_agent = SenderAgent(settings.agent_settings.test_agent_name + '@' + settings.agent_settings.host, "pohpu7-huqsyH-qutduk")
await test_agent.start()
yield