implemented basic belief-from-text extractor including demo version #13

Merged
s.o.h.luijkx merged 7 commits from feat/belief-from-text into dev 2025-10-29 14:31:37 +00:00
5 changed files with 47 additions and 30 deletions
Showing only changes of commit 889ec1db51 - Show all commits

View File

@@ -1,8 +1,10 @@
import asyncio
from spade.behaviour import CyclicBehaviour
import logging
from spade.message import Message
import json
import logging
from spade.behaviour import CyclicBehaviour
from spade.message import Message
from control_backend.core.config import settings
@@ -11,8 +13,7 @@ class BeliefFromText(CyclicBehaviour):
# TODO: LLM prompt nog hardcoded
llm_instruction_prompt = """
You are an information extraction assistent for a BDI agent. Your task is to extract values from
a user's text to bind a list of ungrounded beliefs. Rules:
You are an information extraction assistent for a BDI agent. Your task is to extract values from a user's text to bind a list of ungrounded beliefs. Rules:
You will receive a JSON object with "beliefs" (a list of ungrounded AgentSpeak beliefs) and "text" (user's transcript).
Analyze the text to find values that sematically match the variables (X,Y,Z) in the beliefs.
A single piece of text might contain multiple instances that match a belief.
@@ -22,14 +23,14 @@ class BeliefFromText(CyclicBehaviour):
Each inner list must contain the extracted arguments (as strings) for one instance of that belief.
CRITICAL: If no information in the text matches a belief, DO NOT include that key in your response.
"""
# on_start agent receives message containing the beliefs to look out for and sets up the LLM with instruction prompt
#async def on_start(self):
# msg = await self.receive(timeout=0.1)
# self.beliefs = dict uit message
# send instruction prompt to LLM
beliefs: dict[str,list[str]]
beliefs: dict[str, list[str]]
beliefs = {
"mood": ["X"],
"car": ["Y"]
@@ -48,14 +49,14 @@ class BeliefFromText(CyclicBehaviour):
self.logger.info("Received message from other agent.")
pass
await asyncio.sleep(1)
async def _process_transcription(self,text: str):
async def _process_transcription(self, text: str):
text_prompt = f"Text: {text}"
beliefs_prompt = "These are the beliefs to be bound:\n"
for belief, values in self.beliefs.items():
beliefs_prompt += f"{belief}({', '.join(values)})\n"
prompt = text_prompt + beliefs_prompt
self.logger.info(prompt)
#prompt_msg = Message(to="LLMAgent@whatever")
@@ -66,26 +67,30 @@ class BeliefFromText(CyclicBehaviour):
# Verify by trying to parse
try:
json.loads(response)
belief_message = Message(to=settings.agent_settings.bdi_core_agent_name + '@' + settings.agent_settings.host, body=response)
belief_message = Message(
to=settings.agent_settings.bdi_core_agent_name + '@' + settings.agent_settings.host,
body=response)
belief_message.thread = "beliefs"
await self.send(belief_message)
self.logger.info("Sent beliefs to BDI.")
except:
#loading failed so the response is in wrong format, throw warning (let LLM respond to ask again?)
except json.JSONDecodeError:
# Parsing failed, so the response is in the wrong format, log warning
self.logger.warning("Received LLM response in incorrect format.")
async def _process_transcription_demo(self, txt: str):
"""
Demo version to process the transcription input to beliefs. For the demo only the belief 'user_said' is relevant so
this function simply makes a dict with key: "user_said", value: txt and passes this to the Belief Collector agent.
Demo version to process the transcription input to beliefs. For the demo only the belief
'user_said' is relevant, so this function simply makes a dict with key: "user_said",
value: txt and passes this to the Belief Collector agent.
"""
belief = {"user_said": [[txt]]}
payload = json.dumps(belief)
# TODO: Change to belief collector
belief_msg = Message(to=settings.agent_settings.bdi_core_agent_name + '@' + settings.agent_settings.host, body=payload)
belief_msg = Message(to=settings.agent_settings.bdi_core_agent_name
+ '@' + settings.agent_settings.host,
body=payload)
belief_msg.thread = "beliefs"
await self.send(belief_msg)
self.logger.info("Sent beliefs to Belief Collector.")

View File

@@ -1,9 +1,10 @@
import spade
from spade.agent import Agent
from spade.behaviour import OneShotBehaviour
from spade.message import Message
from control_backend.core.config import settings
class SenderAgent(Agent):
class InformBehav(OneShotBehaviour):
async def run(self):

View File

@@ -1,9 +1,8 @@
import spade
from spade.agent import Agent
import logging
from control_backend.agents.bdi.behaviours.text_belief_extractor import BeliefFromText
class TBeliefExtractor(Agent):
async def setup(self):
self.b = BeliefFromText()

View File

@@ -10,6 +10,7 @@ class AgentSettings(BaseModel):
host: str = "localhost"
bdi_core_agent_name: str = "bdi_core"
belief_collector_agent_name: str = "belief_collector"
text_belief_extractor_agent_name: str = "text_belief_extractor"
vad_agent_name: str = "vad_agent"
llm_agent_name: str = "llm_agent"
test_agent_name: str = "test_agent"
@@ -19,8 +20,8 @@ class AgentSettings(BaseModel):
class LLMSettings(BaseModel):
local_llm_url: str = "http://145.107.82.68:1234/v1/chat/completions"
local_llm_model: str = "openai/gpt-oss-120b"
local_llm_url: str = "http://localhost:1234/v1/chat/completions"
local_llm_model: str = "openai/gpt-oss-20b"
class Settings(BaseSettings):
app_title: str = "PepperPlus"

View File

@@ -44,18 +44,29 @@ async def lifespan(app: FastAPI):
)
await ri_communication_agent.start()
llm_agent = LLMAgent(settings.agent_settings.llm_agent_name + '@' + settings.agent_settings.host,
settings.agent_settings.llm_agent_name)
llm_agent = LLMAgent(
settings.agent_settings.llm_agent_name + '@' + settings.agent_settings.host,
settings.agent_settings.llm_agent_name,
)
await llm_agent.start()
bdi_core = BDICoreAgent(settings.agent_settings.bdi_core_agent_name + '@' + settings.agent_settings.host,
settings.agent_settings.bdi_core_agent_name, "src/control_backend/agents/bdi/rules.asl")
bdi_core = BDICoreAgent(
settings.agent_settings.bdi_core_agent_name + '@' + settings.agent_settings.host,
settings.agent_settings.bdi_core_agent_name,
"src/control_backend/agents/bdi/rules.asl",
)
await bdi_core.start()
text_belief_extractor = TBeliefExtractor(settings.agent_settings.belief_collector_agent_name + '@' + settings.agent_settings.host, "placehodler")
text_belief_extractor = TBeliefExtractor(
settings.agent_settings.text_belief_extractor_agent_name + '@' + settings.agent_settings.host,
settings.agent_settings.text_belief_extractor_agent_name,
)
await text_belief_extractor.start()
test_agent = SenderAgent(settings.agent_settings.test_agent_name + '@' + settings.agent_settings.host, "placeholder")
test_agent = SenderAgent(
settings.agent_settings.test_agent_name + '@' + settings.agent_settings.host,
settings.agent_settings.test_agent_name
)
await test_agent.start()
_temp_vad_agent = VADAgent("tcp://localhost:5558", False)