chore: fix merge conflicts and small items

ref: N25B-208
This commit is contained in:
Twirre Meulenbelt
2025-10-29 15:28:15 +01:00
parent 7744852e88
commit 889ec1db51
5 changed files with 47 additions and 30 deletions

View File

@@ -1,8 +1,10 @@
import asyncio import asyncio
from spade.behaviour import CyclicBehaviour
import logging
from spade.message import Message
import json import json
import logging
from spade.behaviour import CyclicBehaviour
from spade.message import Message
from control_backend.core.config import settings from control_backend.core.config import settings
@@ -11,8 +13,7 @@ class BeliefFromText(CyclicBehaviour):
# TODO: LLM prompt nog hardcoded # TODO: LLM prompt nog hardcoded
llm_instruction_prompt = """ llm_instruction_prompt = """
You are an information extraction assistent for a BDI agent. Your task is to extract values from You are an information extraction assistent for a BDI agent. Your task is to extract values from a user's text to bind a list of ungrounded beliefs. Rules:
a user's text to bind a list of ungrounded beliefs. Rules:
You will receive a JSON object with "beliefs" (a list of ungrounded AgentSpeak beliefs) and "text" (user's transcript). You will receive a JSON object with "beliefs" (a list of ungrounded AgentSpeak beliefs) and "text" (user's transcript).
Analyze the text to find values that sematically match the variables (X,Y,Z) in the beliefs. Analyze the text to find values that sematically match the variables (X,Y,Z) in the beliefs.
A single piece of text might contain multiple instances that match a belief. A single piece of text might contain multiple instances that match a belief.
@@ -29,7 +30,7 @@ class BeliefFromText(CyclicBehaviour):
# self.beliefs = dict uit message # self.beliefs = dict uit message
# send instruction prompt to LLM # send instruction prompt to LLM
beliefs: dict[str,list[str]] beliefs: dict[str, list[str]]
beliefs = { beliefs = {
"mood": ["X"], "mood": ["X"],
"car": ["Y"] "car": ["Y"]
@@ -49,7 +50,7 @@ class BeliefFromText(CyclicBehaviour):
pass pass
await asyncio.sleep(1) await asyncio.sleep(1)
async def _process_transcription(self,text: str): async def _process_transcription(self, text: str):
text_prompt = f"Text: {text}" text_prompt = f"Text: {text}"
beliefs_prompt = "These are the beliefs to be bound:\n" beliefs_prompt = "These are the beliefs to be bound:\n"
@@ -66,26 +67,30 @@ class BeliefFromText(CyclicBehaviour):
# Verify by trying to parse # Verify by trying to parse
try: try:
json.loads(response) json.loads(response)
belief_message = Message(to=settings.agent_settings.bdi_core_agent_name + '@' + settings.agent_settings.host, body=response) belief_message = Message(
to=settings.agent_settings.bdi_core_agent_name + '@' + settings.agent_settings.host,
body=response)
belief_message.thread = "beliefs" belief_message.thread = "beliefs"
await self.send(belief_message) await self.send(belief_message)
self.logger.info("Sent beliefs to BDI.") self.logger.info("Sent beliefs to BDI.")
except: except json.JSONDecodeError:
#loading failed so the response is in wrong format, throw warning (let LLM respond to ask again?) # Parsing failed, so the response is in the wrong format, log warning
self.logger.warning("Received LLM response in incorrect format.") self.logger.warning("Received LLM response in incorrect format.")
async def _process_transcription_demo(self, txt: str): async def _process_transcription_demo(self, txt: str):
""" """
Demo version to process the transcription input to beliefs. For the demo only the belief 'user_said' is relevant so Demo version to process the transcription input to beliefs. For the demo only the belief
this function simply makes a dict with key: "user_said", value: txt and passes this to the Belief Collector agent. 'user_said' is relevant, so this function simply makes a dict with key: "user_said",
value: txt and passes this to the Belief Collector agent.
""" """
belief = {"user_said": [[txt]]} belief = {"user_said": [[txt]]}
payload = json.dumps(belief) payload = json.dumps(belief)
# TODO: Change to belief collector # TODO: Change to belief collector
belief_msg = Message(to=settings.agent_settings.bdi_core_agent_name + '@' + settings.agent_settings.host, body=payload) belief_msg = Message(to=settings.agent_settings.bdi_core_agent_name
+ '@' + settings.agent_settings.host,
body=payload)
belief_msg.thread = "beliefs" belief_msg.thread = "beliefs"
await self.send(belief_msg) await self.send(belief_msg)
self.logger.info("Sent beliefs to Belief Collector.") self.logger.info("Sent beliefs to Belief Collector.")

View File

@@ -1,9 +1,10 @@
import spade
from spade.agent import Agent from spade.agent import Agent
from spade.behaviour import OneShotBehaviour from spade.behaviour import OneShotBehaviour
from spade.message import Message from spade.message import Message
from control_backend.core.config import settings from control_backend.core.config import settings
class SenderAgent(Agent): class SenderAgent(Agent):
class InformBehav(OneShotBehaviour): class InformBehav(OneShotBehaviour):
async def run(self): async def run(self):

View File

@@ -1,9 +1,8 @@
import spade
from spade.agent import Agent from spade.agent import Agent
import logging
from control_backend.agents.bdi.behaviours.text_belief_extractor import BeliefFromText from control_backend.agents.bdi.behaviours.text_belief_extractor import BeliefFromText
class TBeliefExtractor(Agent): class TBeliefExtractor(Agent):
async def setup(self): async def setup(self):
self.b = BeliefFromText() self.b = BeliefFromText()

View File

@@ -10,6 +10,7 @@ class AgentSettings(BaseModel):
host: str = "localhost" host: str = "localhost"
bdi_core_agent_name: str = "bdi_core" bdi_core_agent_name: str = "bdi_core"
belief_collector_agent_name: str = "belief_collector" belief_collector_agent_name: str = "belief_collector"
text_belief_extractor_agent_name: str = "text_belief_extractor"
vad_agent_name: str = "vad_agent" vad_agent_name: str = "vad_agent"
llm_agent_name: str = "llm_agent" llm_agent_name: str = "llm_agent"
test_agent_name: str = "test_agent" test_agent_name: str = "test_agent"
@@ -19,8 +20,8 @@ class AgentSettings(BaseModel):
class LLMSettings(BaseModel): class LLMSettings(BaseModel):
local_llm_url: str = "http://145.107.82.68:1234/v1/chat/completions" local_llm_url: str = "http://localhost:1234/v1/chat/completions"
local_llm_model: str = "openai/gpt-oss-120b" local_llm_model: str = "openai/gpt-oss-20b"
class Settings(BaseSettings): class Settings(BaseSettings):
app_title: str = "PepperPlus" app_title: str = "PepperPlus"

View File

@@ -44,18 +44,29 @@ async def lifespan(app: FastAPI):
) )
await ri_communication_agent.start() await ri_communication_agent.start()
llm_agent = LLMAgent(settings.agent_settings.llm_agent_name + '@' + settings.agent_settings.host, llm_agent = LLMAgent(
settings.agent_settings.llm_agent_name) settings.agent_settings.llm_agent_name + '@' + settings.agent_settings.host,
settings.agent_settings.llm_agent_name,
)
await llm_agent.start() await llm_agent.start()
bdi_core = BDICoreAgent(settings.agent_settings.bdi_core_agent_name + '@' + settings.agent_settings.host, bdi_core = BDICoreAgent(
settings.agent_settings.bdi_core_agent_name, "src/control_backend/agents/bdi/rules.asl") settings.agent_settings.bdi_core_agent_name + '@' + settings.agent_settings.host,
settings.agent_settings.bdi_core_agent_name,
"src/control_backend/agents/bdi/rules.asl",
)
await bdi_core.start() await bdi_core.start()
text_belief_extractor = TBeliefExtractor(settings.agent_settings.belief_collector_agent_name + '@' + settings.agent_settings.host, "placehodler") text_belief_extractor = TBeliefExtractor(
settings.agent_settings.text_belief_extractor_agent_name + '@' + settings.agent_settings.host,
settings.agent_settings.text_belief_extractor_agent_name,
)
await text_belief_extractor.start() await text_belief_extractor.start()
test_agent = SenderAgent(settings.agent_settings.test_agent_name + '@' + settings.agent_settings.host, "placeholder") test_agent = SenderAgent(
settings.agent_settings.test_agent_name + '@' + settings.agent_settings.host,
settings.agent_settings.test_agent_name
)
await test_agent.start() await test_agent.start()
_temp_vad_agent = VADAgent("tcp://localhost:5558", False) _temp_vad_agent = VADAgent("tcp://localhost:5558", False)