Merge branch 'feat/belief-from-text' into 'dev'

implemented basic belief-from-text extractor including demo version

See merge request ics/sp/2025/n25b/pepperplus-cb!13
This commit was merged in pull request #13.
This commit is contained in:
Twirre
2025-10-29 14:31:36 +00:00
4 changed files with 126 additions and 8 deletions

View File

@@ -0,0 +1,96 @@
import asyncio
import json
import logging
from spade.behaviour import CyclicBehaviour
from spade.message import Message
from control_backend.core.config import settings
class BeliefFromText(CyclicBehaviour):
logger = logging.getLogger("Belief From Text")
# TODO: LLM prompt nog hardcoded
llm_instruction_prompt = """
You are an information extraction assistent for a BDI agent. Your task is to extract values from a user's text to bind a list of ungrounded beliefs. Rules:
You will receive a JSON object with "beliefs" (a list of ungrounded AgentSpeak beliefs) and "text" (user's transcript).
Analyze the text to find values that sematically match the variables (X,Y,Z) in the beliefs.
A single piece of text might contain multiple instances that match a belief.
Respond ONLY with a single JSON object.
The JSON object's keys should be the belief functors (e.g., "weather").
The value for each key must be a list of lists.
Each inner list must contain the extracted arguments (as strings) for one instance of that belief.
CRITICAL: If no information in the text matches a belief, DO NOT include that key in your response.
"""
# on_start agent receives message containing the beliefs to look out for and sets up the LLM with instruction prompt
#async def on_start(self):
# msg = await self.receive(timeout=0.1)
# self.beliefs = dict uit message
# send instruction prompt to LLM
beliefs: dict[str, list[str]]
beliefs = {
"mood": ["X"],
"car": ["Y"]
}
async def run(self):
msg = await self.receive(timeout=0.1)
if msg:
sender = msg.sender.node
match sender:
# TODO: Change to Transcriber agent name once implemented
case settings.agent_settings.test_agent_name:
self.logger.info("Received text from transcriber.")
await self._process_transcription_demo(msg.body)
case _:
self.logger.info("Received message from other agent.")
pass
await asyncio.sleep(1)
async def _process_transcription(self, text: str):
text_prompt = f"Text: {text}"
beliefs_prompt = "These are the beliefs to be bound:\n"
for belief, values in self.beliefs.items():
beliefs_prompt += f"{belief}({', '.join(values)})\n"
prompt = text_prompt + beliefs_prompt
self.logger.info(prompt)
#prompt_msg = Message(to="LLMAgent@whatever")
#response = self.send(prompt_msg)
# Mock response; response is beliefs in JSON format, it parses do dict[str,list[list[str]]]
response = '{"mood": [["happy"]]}'
# Verify by trying to parse
try:
json.loads(response)
belief_message = Message(
to=settings.agent_settings.bdi_core_agent_name + '@' + settings.agent_settings.host,
body=response)
belief_message.thread = "beliefs"
await self.send(belief_message)
self.logger.info("Sent beliefs to BDI.")
except json.JSONDecodeError:
# Parsing failed, so the response is in the wrong format, log warning
self.logger.warning("Received LLM response in incorrect format.")
async def _process_transcription_demo(self, txt: str):
"""
Demo version to process the transcription input to beliefs. For the demo only the belief
'user_said' is relevant, so this function simply makes a dict with key: "user_said",
value: txt and passes this to the Belief Collector agent.
"""
belief = {"user_said": [txt]}
payload = json.dumps(belief)
# TODO: Change to belief collector
belief_msg = Message(to=settings.agent_settings.bdi_core_agent_name
+ '@' + settings.agent_settings.host,
body=payload)
belief_msg.thread = "beliefs"
await self.send(belief_msg)
self.logger.info("Sent beliefs to Belief Collector.")

View File

@@ -0,0 +1,9 @@
from spade.agent import Agent
from control_backend.agents.bdi.behaviours.text_belief_extractor import BeliefFromText
class TBeliefExtractor(Agent):
async def setup(self):
self.b = BeliefFromText()
self.add_behaviour(self.b)

View File

@@ -10,6 +10,7 @@ class AgentSettings(BaseModel):
host: str = "localhost"
bdi_core_agent_name: str = "bdi_core"
belief_collector_agent_name: str = "belief_collector"
text_belief_extractor_agent_name: str = "text_belief_extractor"
vad_agent_name: str = "vad_agent"
llm_agent_name: str = "llm_agent"
test_agent_name: str = "test_agent"
@@ -19,8 +20,8 @@ class AgentSettings(BaseModel):
class LLMSettings(BaseModel):
local_llm_url: str = "http://145.107.82.68:1234/v1/chat/completions"
local_llm_model: str = "openai/gpt-oss-120b"
local_llm_url: str = "http://localhost:1234/v1/chat/completions"
local_llm_model: str = "openai/gpt-oss-20b"
class Settings(BaseSettings):
app_title: str = "PepperPlus"

View File

@@ -13,6 +13,7 @@ from control_backend.agents.ri_communication_agent import RICommunicationAgent
from control_backend.agents.bdi.bdi_core import BDICoreAgent
from control_backend.agents.vad_agent import VADAgent
from control_backend.agents.llm.llm import LLMAgent
from control_backend.agents.bdi.text_extractor import TBeliefExtractor
from control_backend.api.v1.router import api_router
from control_backend.core.config import settings
from control_backend.core.zmq_context import context
@@ -41,15 +42,26 @@ async def lifespan(app: FastAPI):
bind=True,
)
await ri_communication_agent.start()
llm_agent = LLMAgent(settings.agent_settings.llm_agent_name + '@' + settings.agent_settings.host,
settings.agent_settings.llm_agent_name)
llm_agent = LLMAgent(
settings.agent_settings.llm_agent_name + '@' + settings.agent_settings.host,
settings.agent_settings.llm_agent_name,
)
await llm_agent.start()
bdi_core = BDICoreAgent(settings.agent_settings.bdi_core_agent_name + '@' + settings.agent_settings.host,
settings.agent_settings.bdi_core_agent_name, "src/control_backend/agents/bdi/rules.asl")
bdi_core = BDICoreAgent(
settings.agent_settings.bdi_core_agent_name + '@' + settings.agent_settings.host,
settings.agent_settings.bdi_core_agent_name,
"src/control_backend/agents/bdi/rules.asl",
)
await bdi_core.start()
text_belief_extractor = TBeliefExtractor(
settings.agent_settings.text_belief_extractor_agent_name + '@' + settings.agent_settings.host,
settings.agent_settings.text_belief_extractor_agent_name,
)
await text_belief_extractor.start()
_temp_vad_agent = VADAgent("tcp://localhost:5558", False)
await _temp_vad_agent.start()