diff --git a/src/control_backend/agents/bdi/behaviours/text_belief_extractor.py b/src/control_backend/agents/bdi/behaviours/text_belief_extractor.py new file mode 100644 index 0000000..ea1b04f --- /dev/null +++ b/src/control_backend/agents/bdi/behaviours/text_belief_extractor.py @@ -0,0 +1,96 @@ +import asyncio +import json +import logging + +from spade.behaviour import CyclicBehaviour +from spade.message import Message + +from control_backend.core.config import settings + + +class BeliefFromText(CyclicBehaviour): + logger = logging.getLogger("Belief From Text") + + # TODO: LLM prompt nog hardcoded + llm_instruction_prompt = """ + You are an information extraction assistent for a BDI agent. Your task is to extract values from a user's text to bind a list of ungrounded beliefs. Rules: + You will receive a JSON object with "beliefs" (a list of ungrounded AgentSpeak beliefs) and "text" (user's transcript). + Analyze the text to find values that sematically match the variables (X,Y,Z) in the beliefs. + A single piece of text might contain multiple instances that match a belief. + Respond ONLY with a single JSON object. + The JSON object's keys should be the belief functors (e.g., "weather"). + The value for each key must be a list of lists. + Each inner list must contain the extracted arguments (as strings) for one instance of that belief. + CRITICAL: If no information in the text matches a belief, DO NOT include that key in your response. + """ + + # on_start agent receives message containing the beliefs to look out for and sets up the LLM with instruction prompt + #async def on_start(self): + # msg = await self.receive(timeout=0.1) + # self.beliefs = dict uit message + # send instruction prompt to LLM + + beliefs: dict[str, list[str]] + beliefs = { + "mood": ["X"], + "car": ["Y"] + } + + async def run(self): + msg = await self.receive(timeout=0.1) + if msg: + sender = msg.sender.node + match sender: + # TODO: Change to Transcriber agent name once implemented + case settings.agent_settings.test_agent_name: + self.logger.info("Received text from transcriber.") + await self._process_transcription_demo(msg.body) + case _: + self.logger.info("Received message from other agent.") + pass + await asyncio.sleep(1) + + async def _process_transcription(self, text: str): + text_prompt = f"Text: {text}" + + beliefs_prompt = "These are the beliefs to be bound:\n" + for belief, values in self.beliefs.items(): + beliefs_prompt += f"{belief}({', '.join(values)})\n" + + prompt = text_prompt + beliefs_prompt + self.logger.info(prompt) + #prompt_msg = Message(to="LLMAgent@whatever") + #response = self.send(prompt_msg) + + # Mock response; response is beliefs in JSON format, it parses do dict[str,list[list[str]]] + response = '{"mood": [["happy"]]}' + # Verify by trying to parse + try: + json.loads(response) + belief_message = Message( + to=settings.agent_settings.bdi_core_agent_name + '@' + settings.agent_settings.host, + body=response) + belief_message.thread = "beliefs" + + await self.send(belief_message) + self.logger.info("Sent beliefs to BDI.") + except json.JSONDecodeError: + # Parsing failed, so the response is in the wrong format, log warning + self.logger.warning("Received LLM response in incorrect format.") + + async def _process_transcription_demo(self, txt: str): + """ + Demo version to process the transcription input to beliefs. For the demo only the belief + 'user_said' is relevant, so this function simply makes a dict with key: "user_said", + value: txt and passes this to the Belief Collector agent. + """ + belief = {"user_said": [txt]} + payload = json.dumps(belief) + # TODO: Change to belief collector + belief_msg = Message(to=settings.agent_settings.bdi_core_agent_name + + '@' + settings.agent_settings.host, + body=payload) + belief_msg.thread = "beliefs" + + await self.send(belief_msg) + self.logger.info("Sent beliefs to Belief Collector.") diff --git a/src/control_backend/agents/bdi/text_extractor.py b/src/control_backend/agents/bdi/text_extractor.py new file mode 100644 index 0000000..596a3fe --- /dev/null +++ b/src/control_backend/agents/bdi/text_extractor.py @@ -0,0 +1,9 @@ +from spade.agent import Agent + +from control_backend.agents.bdi.behaviours.text_belief_extractor import BeliefFromText + + +class TBeliefExtractor(Agent): + async def setup(self): + self.b = BeliefFromText() + self.add_behaviour(self.b) \ No newline at end of file diff --git a/src/control_backend/core/config.py b/src/control_backend/core/config.py index 5298d9c..a9c7588 100644 --- a/src/control_backend/core/config.py +++ b/src/control_backend/core/config.py @@ -10,6 +10,7 @@ class AgentSettings(BaseModel): host: str = "localhost" bdi_core_agent_name: str = "bdi_core" belief_collector_agent_name: str = "belief_collector" + text_belief_extractor_agent_name: str = "text_belief_extractor" vad_agent_name: str = "vad_agent" llm_agent_name: str = "llm_agent" test_agent_name: str = "test_agent" @@ -22,8 +23,8 @@ class AgentSettings(BaseModel): class LLMSettings(BaseModel): - local_llm_url: str = "http://145.107.82.68:1234/v1/chat/completions" - local_llm_model: str = "openai/gpt-oss-120b" + local_llm_url: str = "http://localhost:1234/v1/chat/completions" + local_llm_model: str = "openai/gpt-oss-20b" class Settings(BaseSettings): app_title: str = "PepperPlus" diff --git a/src/control_backend/main.py b/src/control_backend/main.py index 0dfc309..d3588ea 100644 --- a/src/control_backend/main.py +++ b/src/control_backend/main.py @@ -13,17 +13,12 @@ from control_backend.agents.ri_communication_agent import RICommunicationAgent from control_backend.agents.bdi.bdi_core import BDICoreAgent from control_backend.agents.vad_agent import VADAgent from control_backend.agents.llm.llm import LLMAgent +from control_backend.agents.bdi.text_extractor import TBeliefExtractor +from control_backend.agents.belief_collector.belief_collector import BeliefCollectorAgent from control_backend.api.v1.router import api_router from control_backend.core.config import settings from control_backend.core.zmq_context import context - -# Agents -from control_backend.agents.bdi.bdi_core import BDICoreAgent -from control_backend.agents.belief_collector.belief_collector import BeliefCollectorAgent -from control_backend.agents.mock_agents.emo_text_mock import EmoTextAgent -from control_backend.agents.mock_agents.belief_text_mock import BeliefTextAgent - logger = logging.getLogger(__name__) logging.basicConfig(level=logging.DEBUG) @@ -41,41 +36,40 @@ async def lifespan(app: FastAPI): # Initiate agents + ri_communication_agent = RICommunicationAgent( + settings.agent_settings.ri_communication_agent_name + "@" + settings.agent_settings.host, + settings.agent_settings.ri_communication_agent_name, + address="tcp://*:5555", + bind=True, + ) + await ri_communication_agent.start() + + llm_agent = LLMAgent( + settings.agent_settings.llm_agent_name + '@' + settings.agent_settings.host, + settings.agent_settings.llm_agent_name, + ) + await llm_agent.start() + bdi_core = BDICoreAgent( settings.agent_settings.bdi_core_agent_name + '@' + settings.agent_settings.host, settings.agent_settings.bdi_core_agent_name, - "src/control_backend/agents/bdi/rules.asl" + "src/control_backend/agents/bdi/rules.asl", ) - + await bdi_core.start() + belief_collector = BeliefCollectorAgent( settings.agent_settings.belief_collector_agent_name + '@' + settings.agent_settings.host, - settings.agent_settings.belief_collector_agent_name + settings.agent_settings.belief_collector_agent_name, ) - belief_text_mock = BeliefTextAgent( - settings.agent_settings.belief_text_agent_mock_name + '@' + settings.agent_settings.host, - settings.agent_settings.belief_text_agent_mock_name - ) - - - ri_communication_agent = RICommunicationAgent( - settings.agent_settings.ri_communication_agent_name + "@" + settings.agent_settings.host, - settings.agent_settings.ri_communication_agent_name, - address="tcp://*:5555", - bind=True, - ) - - llm_agent = LLMAgent(settings.agent_settings.llm_agent_name + '@' + settings.agent_settings.host, - settings.agent_settings.llm_agent_name) - - - _temp_vad_agent = VADAgent("tcp://localhost:5558", False) - - await bdi_core.start() await belief_collector.start() - await belief_text_mock.start() - await ri_communication_agent.start() - await llm_agent.start() - await bdi_core.start() + + text_belief_extractor = TBeliefExtractor( + settings.agent_settings.text_belief_extractor_agent_name + '@' + settings.agent_settings.host, + settings.agent_settings.text_belief_extractor_agent_name, + ) + await text_belief_extractor.start() + + _temp_vad_agent = VADAgent("tcp://localhost:5558", False) await _temp_vad_agent.start() yield @@ -98,4 +92,4 @@ app.include_router(api_router, prefix="") # TODO: make prefix /api/v1 @app.get("/") async def root(): - return {"status": "ok"} \ No newline at end of file + return {"status": "ok"}