From c5b71450fc5cd26e38967133d2c1b0e448c6739b Mon Sep 17 00:00:00 2001 From: JobvAlewijk Date: Mon, 27 Oct 2025 14:21:18 +0100 Subject: [PATCH 01/10] feat: LLM agent body: added the llmAgent class and made it run at the start. modified the bdi_core to send a test message and recieve an awnser from LLM agent Added a connection to a local llm via lmstudio. Tests are Tba. ref: N25B-207 --- README.md | 11 ++ src/control_backend/agents/bdi/bdi_core.py | 89 +++++++++++-- .../agents/bdi/behaviours/belief_setter.py | 1 + src/control_backend/agents/llm/llm.py | 125 ++++++++++++++++++ src/control_backend/core/config.py | 3 +- src/control_backend/main.py | 9 +- 6 files changed, 222 insertions(+), 16 deletions(-) create mode 100644 src/control_backend/agents/llm/llm.py diff --git a/README.md b/README.md index c2a8702..6601529 100644 --- a/README.md +++ b/README.md @@ -16,6 +16,17 @@ Using UV, installing the packages and virtual environment is as simple as typing uv sync ``` +## Local LLM + +To run a LLM locally download https://lmstudio.ai +When installing select developer mode, download a model (it will already suggest one) and run it (see developer window, status: running) + +copy the url at the top right and replace LOCAL_LLM_URL with it + v1/chat/completions. +This + part might differ based on what model you choose. + +copy the model name in the module loaded and replace LOCAL_LLM_MODEL. + + ## Running To run the project (development server), execute the following command (while inside the root repository): diff --git a/src/control_backend/agents/bdi/bdi_core.py b/src/control_backend/agents/bdi/bdi_core.py index 7311061..b960d3f 100644 --- a/src/control_backend/agents/bdi/bdi_core.py +++ b/src/control_backend/agents/bdi/bdi_core.py @@ -1,35 +1,96 @@ import logging import agentspeak +from spade.behaviour import CyclicBehaviour, OneShotBehaviour +from spade.message import Message +from spade.template import Template from spade_bdi.bdi import BDIAgent from control_backend.agents.bdi.behaviours.belief_setter import BeliefSetter +from control_backend.core.config import settings + class BDICoreAgent(BDIAgent): """ This is the Brain agent that does the belief inference with AgentSpeak. This is a continous process that happens automatically in the background. This class contains all the actions that can be called from AgentSpeak plans. - It has the BeliefSetter behaviour. + It has the BeliefSetter behaviour and can aks and recieve requests from the LLM agent. """ - logger = logging.getLogger("BDI Core") - async def setup(self): - belief_setter = BeliefSetter() - self.add_behaviour(belief_setter) + logger = logging.getLogger("bdi_core_agent") + + async def setup(self) -> None: + """ + Initializes belief behaviors and message routing. + """ + self.logger.info("BDICoreAgent setup started") + + self.add_behaviour(BeliefSetter()) + self._add_llm_response_receiver() + + await self._send_to_llm("Hello we are the Pepper plus team") + # This is the example message currently sent to the llm at the start of the Program + + self.logger.info("BDICoreAgent setup complete") + + def add_custom_actions(self, actions) -> None: + """ + Registers custom AgentSpeak actions callable from plans. + """ - def add_custom_actions(self, actions): @actions.add(".reply", 1) - def _reply(agent, term, intention): - message = agentspeak.grounded(term.args[0], intention.scope) - self.logger.info(f"Replying to message: {message}") - reply = self._send_to_llm(message) - self.logger.info(f"Received reply: {reply}") + def _reply(agent: "BDICoreAgent", term, intention): + """ + Sends text to the LLM (AgentSpeak action). + Example: .reply("Hello LLM!") + """ + message_text = agentspeak.grounded(term.args[0], intention.scope) + self.logger.info("Reply action sending: %s", message_text) + self._send_to_llm(message_text) yield - def _send_to_llm(self, message) -> str: - """TODO: implement""" - return f"This is a reply to {message}" + async def _send_to_llm(self, text: str) -> str: + """ + Sends a text query to the LLM Agent asynchronously. + """ + class SendBehaviour(OneShotBehaviour): + async def run(self) -> None: + msg = Message( + to=f"{settings.agent_settings.test_agent_name}@" + f"{settings.agent_settings.host}", + body=text, + thread="llm_request", + ) + msg.set_metadata("performative", "inform") + await self.send(msg) + self.agent.logger.debug("Message sent to LLM: %s", text) + self.add_behaviour(SendBehaviour()) + return "LLM message dispatch scheduled" + + def _add_llm_response_receiver(self) -> None: + """ + Adds behavior to receive responses from the LLM Agent. + """ + + class ReceiveLLMResponseBehaviour(CyclicBehaviour): + async def run(self) -> None: + msg = await self.receive(timeout=2) + if not msg: + return + + content = msg.body + self.agent.logger.info("Received LLM response: %s", content) + + # TODO: Convert response into a belief (optional future feature) + # Example: + # self.agent.add_belief("llm_response", content) + # self.agent.logger.debug("Added belief: llm_response(%s)", content) + + template = Template() + template.thread = "llm_response" + + self.add_behaviour(ReceiveLLMResponseBehaviour(), template) diff --git a/src/control_backend/agents/bdi/behaviours/belief_setter.py b/src/control_backend/agents/bdi/behaviours/belief_setter.py index 777dda3..e788e76 100644 --- a/src/control_backend/agents/bdi/behaviours/belief_setter.py +++ b/src/control_backend/agents/bdi/behaviours/belief_setter.py @@ -33,6 +33,7 @@ class BeliefSetter(CyclicBehaviour): self.logger.debug("Processing message from belief collector.") self._process_belief_message(message) case _: + self.logger.debug("Not the belief agent, discarding message") pass def _process_belief_message(self, message: Message): diff --git a/src/control_backend/agents/llm/llm.py b/src/control_backend/agents/llm/llm.py new file mode 100644 index 0000000..c3c4dfd --- /dev/null +++ b/src/control_backend/agents/llm/llm.py @@ -0,0 +1,125 @@ +""" +LLM Agent module for routing text queries from the BDI Core Agent to a local LLM +service and returning its responses back to the BDI Core Agent. +""" + +import json +import logging +from typing import Any + +import asyncio +import httpx +from spade.agent import Agent +from spade.behaviour import CyclicBehaviour +from spade.message import Message +from spade.template import Template + +from control_backend.core.config import settings + + +class LLMAgent(Agent): + """ + Agent responsible for processing user text input and querying a locally + hosted LLM for text generation. Receives messages from the BDI Core Agent + and responds with processed LLM output. + """ + + logger = logging.getLogger("llm_agent") + + class ReceiveMessageBehaviour(CyclicBehaviour): + """ + Cyclic behaviour to continuously listen for incoming messages from + the BDI Core Agent and handle them. + """ + + LOCAL_LLM_URL: str = "http://127.0.0.1:1234/v1/chat/completions" + LOCAL_LLM_MODEL: str = "openai/gpt-oss-20b" + + async def run(self) -> None: + """ + Receives SPADE messages and processes only those originating from the + configured BDI agent. + """ + msg = await self.receive(timeout=1) + if not msg: + return + + sender = msg.sender.node + self.agent.logger.info( + "Received message: %s from %s", + msg.body, + sender, + ) + + if sender == settings.agent_settings.bdi_core_agent_name: + self.agent.logger.debug("Processing message from BDI Core Agent") + await self._process_bdi_message(msg) + else: + self.agent.logger.debug("Message ignored (not from BDI Core Agent)") + + async def _process_bdi_message(self, message: Message) -> None: + """ + Forwards user text to the LLM and replies with the generated text. + """ + user_text = message.body + llm_response = await self._query_llm(user_text) + await self._reply(llm_response) + + async def _reply(self, msg: str) -> None: + """ + Sends a response message back to the BDI Core Agent. + """ + reply = Message( + to=f"{settings.agent_settings.bdi_core_agent_name}@" + f"{settings.agent_settings.host}", + body=msg, + thread="llm_response", + ) + await self.send(reply) + self.agent.logger.info("Reply sent to BDI Core Agent") + + async def _query_llm(self, prompt: str) -> str: + """ + Sends a chat completion request to the local LLM service. + + :param prompt: Input text prompt to pass to the LLM. + :return: LLM-generated content or fallback message. + """ + async with httpx.AsyncClient(timeout=120.0) as client: + response = await client.post( + self.LOCAL_LLM_URL, + headers={"Content-Type": "application/json"}, + json={ + "model": self.LOCAL_LLM_MODEL, + "messages": [{"role": "user", "content": prompt}], + "temperature": 0.3, + }, + ) + try: + response.raise_for_status() + data: dict[str, Any] = response.json() + return data.get("choices", [{}])[0].get( + "message", {} + ).get("content", "No response") + except httpx.HTTPError as err: + self.agent.logger.error("HTTP error: %s", err) + return "LLM service unavailable." + except Exception as err: + self.agent.logger.error("Unexpected error: %s", err) + return "Error processing the request." + + async def setup(self) -> None: + """ + Sets up the SPADE behaviour to filter and process messages from the + BDI Core Agent. + """ + self.logger.info("LLMAgent setup complete") + + template = Template() + template.sender = ( + f"{settings.agent_settings.bdi_core_agent_name}@" + f"{settings.agent_settings.host}" + ) + + behaviour = self.ReceiveMessageBehaviour() + self.add_behaviour(behaviour, template) diff --git a/src/control_backend/core/config.py b/src/control_backend/core/config.py index 07a828d..e1fda30 100644 --- a/src/control_backend/core/config.py +++ b/src/control_backend/core/config.py @@ -6,9 +6,10 @@ class ZMQSettings(BaseModel): internal_comm_address: str = "tcp://localhost:5560" class AgentSettings(BaseModel): - host: str = "localhost" + host: str = "xmpp.twirre.dev" bdi_core_agent_name: str = "bdi_core" belief_collector_agent_name: str = "belief_collector" + llm_agent_name: str = "llm_agent" test_agent_name: str = "test_agent" class Settings(BaseSettings): diff --git a/src/control_backend/main.py b/src/control_backend/main.py index 1f377c4..200b52d 100644 --- a/src/control_backend/main.py +++ b/src/control_backend/main.py @@ -13,6 +13,7 @@ import zmq # Internal imports from control_backend.agents.bdi.bdi_core import BDICoreAgent +from control_backend.agents.llm.llm import LLMAgent from control_backend.api.v1.router import api_router from control_backend.core.config import AgentSettings, settings from control_backend.core.zmq_context import context @@ -31,9 +32,15 @@ async def lifespan(app: FastAPI): app.state.internal_comm_socket = internal_comm_socket logger.info("Internal publishing socket bound to %s", internal_comm_socket) + # Initiate agents - bdi_core = BDICoreAgent(settings.agent_settings.bdi_core_agent_name + '@' + settings.agent_settings.host, settings.agent_settings.bdi_core_agent_name, "src/control_backend/agents/bdi/rules.asl") + + + llm_agent = LLMAgent(settings.agent_settings.test_agent_name + '@' + settings.agent_settings.host, "secret, ask twirre") + await llm_agent.start() + bdi_core = BDICoreAgent(settings.agent_settings.bdi_core_agent_name + '@' + settings.agent_settings.host, "secret, ask twirre", "src/control_backend/agents/bdi/rules.asl") await bdi_core.start() + yield From 17056da8324a36c547ead049236ab9e45cb06923 Mon Sep 17 00:00:00 2001 From: JobvAlewijk Date: Tue, 28 Oct 2025 11:07:28 +0100 Subject: [PATCH 02/10] chore: cleanup made llm get url from settings cleanup uneceserry fstring ref: N25B-207 --- src/control_backend/agents/bdi/bdi_core.py | 7 +++---- src/control_backend/agents/llm/llm.py | 19 ++++++------------- src/control_backend/core/config.py | 8 ++++++++ 3 files changed, 17 insertions(+), 17 deletions(-) diff --git a/src/control_backend/agents/bdi/bdi_core.py b/src/control_backend/agents/bdi/bdi_core.py index b960d3f..209c83f 100644 --- a/src/control_backend/agents/bdi/bdi_core.py +++ b/src/control_backend/agents/bdi/bdi_core.py @@ -59,10 +59,9 @@ class BDICoreAgent(BDIAgent): class SendBehaviour(OneShotBehaviour): async def run(self) -> None: msg = Message( - to=f"{settings.agent_settings.test_agent_name}@" - f"{settings.agent_settings.host}", - body=text, - thread="llm_request", + to= settings.agent_settings.test_agent_name + '@' + settings.agent_settings.host, + body= text, + thread= "llm_request", ) msg.set_metadata("performative", "inform") await self.send(msg) diff --git a/src/control_backend/agents/llm/llm.py b/src/control_backend/agents/llm/llm.py index c3c4dfd..38914a1 100644 --- a/src/control_backend/agents/llm/llm.py +++ b/src/control_backend/agents/llm/llm.py @@ -32,9 +32,6 @@ class LLMAgent(Agent): the BDI Core Agent and handle them. """ - LOCAL_LLM_URL: str = "http://127.0.0.1:1234/v1/chat/completions" - LOCAL_LLM_MODEL: str = "openai/gpt-oss-20b" - async def run(self) -> None: """ Receives SPADE messages and processes only those originating from the @@ -70,10 +67,9 @@ class LLMAgent(Agent): Sends a response message back to the BDI Core Agent. """ reply = Message( - to=f"{settings.agent_settings.bdi_core_agent_name}@" - f"{settings.agent_settings.host}", - body=msg, - thread="llm_response", + to= settings.agent_settings.bdi_core_agent_name + '@' + settings.agent_settings.host, + body= msg, + thread= "llm_response", ) await self.send(reply) self.agent.logger.info("Reply sent to BDI Core Agent") @@ -87,10 +83,10 @@ class LLMAgent(Agent): """ async with httpx.AsyncClient(timeout=120.0) as client: response = await client.post( - self.LOCAL_LLM_URL, + settings.llm_settings.local_llm_url, headers={"Content-Type": "application/json"}, json={ - "model": self.LOCAL_LLM_MODEL, + "model": settings.llm_settings.local_llm_model, "messages": [{"role": "user", "content": prompt}], "temperature": 0.3, }, @@ -116,10 +112,7 @@ class LLMAgent(Agent): self.logger.info("LLMAgent setup complete") template = Template() - template.sender = ( - f"{settings.agent_settings.bdi_core_agent_name}@" - f"{settings.agent_settings.host}" - ) + template.sender = settings.agent_settings.bdi_core_agent_name + '@' + settings.agent_settings.host behaviour = self.ReceiveMessageBehaviour() self.add_behaviour(behaviour, template) diff --git a/src/control_backend/core/config.py b/src/control_backend/core/config.py index e1fda30..4b11291 100644 --- a/src/control_backend/core/config.py +++ b/src/control_backend/core/config.py @@ -12,6 +12,10 @@ class AgentSettings(BaseModel): llm_agent_name: str = "llm_agent" test_agent_name: str = "test_agent" +class LLMSettings(BaseModel): + local_llm_url: str = "http://127.0.0.1:1234/v1/chat/completions" + local_llm_model: str = "openai/gpt-oss-20b" + class Settings(BaseSettings): app_title: str = "PepperPlus" @@ -20,7 +24,11 @@ class Settings(BaseSettings): zmq_settings: ZMQSettings = ZMQSettings() agent_settings: AgentSettings = AgentSettings() + + llm_settings: LLMSettings = LLMSettings() model_config = SettingsConfigDict(env_file=".env") + + settings = Settings() From 281bc57b6ee4ce319eab02101bc558baa586b3d4 Mon Sep 17 00:00:00 2001 From: JobvAlewijk Date: Tue, 28 Oct 2025 12:03:30 +0100 Subject: [PATCH 03/10] chore: cleanup made bdi match incoming messages changed llm from test agent to llm agent in config. ref: N25B-207 --- README.md | 4 ++-- src/control_backend/agents/bdi/bdi_core.py | 17 ++++++++++------- 2 files changed, 12 insertions(+), 9 deletions(-) diff --git a/README.md b/README.md index 6601529..6c28fcf 100644 --- a/README.md +++ b/README.md @@ -21,10 +21,10 @@ uv sync To run a LLM locally download https://lmstudio.ai When installing select developer mode, download a model (it will already suggest one) and run it (see developer window, status: running) -copy the url at the top right and replace LOCAL_LLM_URL with it + v1/chat/completions. +copy the url at the top right and replace local_llm_url with it + v1/chat/completions. This + part might differ based on what model you choose. -copy the model name in the module loaded and replace LOCAL_LLM_MODEL. +copy the model name in the module loaded and replace local_llm_modelL. In settings. ## Running diff --git a/src/control_backend/agents/bdi/bdi_core.py b/src/control_backend/agents/bdi/bdi_core.py index 209c83f..1026df5 100644 --- a/src/control_backend/agents/bdi/bdi_core.py +++ b/src/control_backend/agents/bdi/bdi_core.py @@ -59,7 +59,7 @@ class BDICoreAgent(BDIAgent): class SendBehaviour(OneShotBehaviour): async def run(self) -> None: msg = Message( - to= settings.agent_settings.test_agent_name + '@' + settings.agent_settings.host, + to= settings.agent_settings.llm_agent_name + '@' + settings.agent_settings.host, body= text, thread= "llm_request", ) @@ -81,13 +81,16 @@ class BDICoreAgent(BDIAgent): if not msg: return - content = msg.body - self.agent.logger.info("Received LLM response: %s", content) + sender = msg.sender.node + match sender: + case settings.agent_settings.llm_agent_name: + content = msg.body + self.agent.logger.info("Received LLM response: %s", content) + #Here the BDI can pass the message back as a response + case _: + self.logger.debug("Not from the llm, discarding message") + pass - # TODO: Convert response into a belief (optional future feature) - # Example: - # self.agent.add_belief("llm_response", content) - # self.agent.logger.debug("Added belief: llm_response(%s)", content) template = Template() template.thread = "llm_response" From f8d08ac7ca534ab3eb805a89f2dce5be8658e69a Mon Sep 17 00:00:00 2001 From: JobvAlewijk Date: Tue, 28 Oct 2025 13:44:28 +0100 Subject: [PATCH 04/10] chore: moved behavoir moved recieve llm behavoir into a the behavoir folder ref: N25B-207 --- src/control_backend/agents/bdi/bdi_core.py | 34 +++---------------- .../behaviours/recieve_llm_resp_behavoir.py | 29 ++++++++++++++++ src/control_backend/main.py | 6 ++-- 3 files changed, 37 insertions(+), 32 deletions(-) create mode 100644 src/control_backend/agents/bdi/behaviours/recieve_llm_resp_behavoir.py diff --git a/src/control_backend/agents/bdi/bdi_core.py b/src/control_backend/agents/bdi/bdi_core.py index 1026df5..910beae 100644 --- a/src/control_backend/agents/bdi/bdi_core.py +++ b/src/control_backend/agents/bdi/bdi_core.py @@ -7,6 +7,7 @@ from spade.template import Template from spade_bdi.bdi import BDIAgent from control_backend.agents.bdi.behaviours.belief_setter import BeliefSetter +from control_backend.agents.bdi.behaviours.recieve_llm_resp_behavoir import ReceiveLLMResponseBehaviour from control_backend.core.config import settings @@ -27,7 +28,7 @@ class BDICoreAgent(BDIAgent): self.logger.info("BDICoreAgent setup started") self.add_behaviour(BeliefSetter()) - self._add_llm_response_receiver() + self.add_behaviour(ReceiveLLMResponseBehaviour()) await self._send_to_llm("Hello we are the Pepper plus team") # This is the example message currently sent to the llm at the start of the Program @@ -63,36 +64,9 @@ class BDICoreAgent(BDIAgent): body= text, thread= "llm_request", ) - msg.set_metadata("performative", "inform") + await self.send(msg) self.agent.logger.debug("Message sent to LLM: %s", text) self.add_behaviour(SendBehaviour()) - return "LLM message dispatch scheduled" - - def _add_llm_response_receiver(self) -> None: - """ - Adds behavior to receive responses from the LLM Agent. - """ - - class ReceiveLLMResponseBehaviour(CyclicBehaviour): - async def run(self) -> None: - msg = await self.receive(timeout=2) - if not msg: - return - - sender = msg.sender.node - match sender: - case settings.agent_settings.llm_agent_name: - content = msg.body - self.agent.logger.info("Received LLM response: %s", content) - #Here the BDI can pass the message back as a response - case _: - self.logger.debug("Not from the llm, discarding message") - pass - - - template = Template() - template.thread = "llm_response" - - self.add_behaviour(ReceiveLLMResponseBehaviour(), template) + return "LLM message dispatch scheduled" \ No newline at end of file diff --git a/src/control_backend/agents/bdi/behaviours/recieve_llm_resp_behavoir.py b/src/control_backend/agents/bdi/behaviours/recieve_llm_resp_behavoir.py new file mode 100644 index 0000000..2b788ae --- /dev/null +++ b/src/control_backend/agents/bdi/behaviours/recieve_llm_resp_behavoir.py @@ -0,0 +1,29 @@ +import asyncio +import json +import logging + +from spade.agent import Message +from spade.behaviour import CyclicBehaviour +from spade_bdi.bdi import BDIAgent + +from control_backend.core.config import settings + +class ReceiveLLMResponseBehaviour(CyclicBehaviour): + """ + Adds behavior to receive responses from the LLM Agent. + """ + logger = logging.getLogger("BDI/LLM Reciever") + async def run(self): + msg = await self.receive(timeout=2) + if not msg: + return + + sender = msg.sender.node + match sender: + case settings.agent_settings.llm_agent_name: + content = msg.body + self.logger.info("Received LLM response: %s", content) + #Here the BDI can pass the message back as a response + case _: + self.logger.debug("Not from the llm, discarding message") + pass \ No newline at end of file diff --git a/src/control_backend/main.py b/src/control_backend/main.py index 200b52d..97b8218 100644 --- a/src/control_backend/main.py +++ b/src/control_backend/main.py @@ -36,9 +36,11 @@ async def lifespan(app: FastAPI): # Initiate agents - llm_agent = LLMAgent(settings.agent_settings.test_agent_name + '@' + settings.agent_settings.host, "secret, ask twirre") + llm_agent = LLMAgent(settings.agent_settings.llm_agent_name + '@' + settings.agent_settings.host, + "secret, ask twirre") await llm_agent.start() - bdi_core = BDICoreAgent(settings.agent_settings.bdi_core_agent_name + '@' + settings.agent_settings.host, "secret, ask twirre", "src/control_backend/agents/bdi/rules.asl") + bdi_core = BDICoreAgent(settings.agent_settings.bdi_core_agent_name + '@' + settings.agent_settings.host, + "secret, ask twirre", "src/control_backend/agents/bdi/rules.asl") await bdi_core.start() From f44413ca1e1fdddcff6097485af645b508e80f46 Mon Sep 17 00:00:00 2001 From: JobvAlewijk Date: Tue, 28 Oct 2025 13:47:48 +0100 Subject: [PATCH 05/10] style: typo ref: N25B-207 --- src/control_backend/agents/bdi/bdi_core.py | 2 +- ...cieve_llm_resp_behavoir.py => receive_llm_resp_behaviour.py} | 0 2 files changed, 1 insertion(+), 1 deletion(-) rename src/control_backend/agents/bdi/behaviours/{recieve_llm_resp_behavoir.py => receive_llm_resp_behaviour.py} (100%) diff --git a/src/control_backend/agents/bdi/bdi_core.py b/src/control_backend/agents/bdi/bdi_core.py index 910beae..628ce09 100644 --- a/src/control_backend/agents/bdi/bdi_core.py +++ b/src/control_backend/agents/bdi/bdi_core.py @@ -7,7 +7,7 @@ from spade.template import Template from spade_bdi.bdi import BDIAgent from control_backend.agents.bdi.behaviours.belief_setter import BeliefSetter -from control_backend.agents.bdi.behaviours.recieve_llm_resp_behavoir import ReceiveLLMResponseBehaviour +from control_backend.agents.bdi.behaviours.receive_llm_resp_behaviour import ReceiveLLMResponseBehaviour from control_backend.core.config import settings diff --git a/src/control_backend/agents/bdi/behaviours/recieve_llm_resp_behavoir.py b/src/control_backend/agents/bdi/behaviours/receive_llm_resp_behaviour.py similarity index 100% rename from src/control_backend/agents/bdi/behaviours/recieve_llm_resp_behavoir.py rename to src/control_backend/agents/bdi/behaviours/receive_llm_resp_behaviour.py From af789bd459ec586379936d7e966c5a844ff97fb3 Mon Sep 17 00:00:00 2001 From: JobvAlewijk Date: Wed, 29 Oct 2025 12:45:13 +0100 Subject: [PATCH 06/10] feat: norms and goals to llm base goals and norms can be defined in llm_instructions.py cleaned the code ref: N25B-215 --- src/control_backend/agents/bdi/bdi_core.py | 10 ++--- src/control_backend/agents/llm/llm.py | 36 ++++++++++----- .../agents/llm/llm_instructions.py | 44 +++++++++++++++++++ src/control_backend/main.py | 4 +- 4 files changed, 74 insertions(+), 20 deletions(-) create mode 100644 src/control_backend/agents/llm/llm_instructions.py diff --git a/src/control_backend/agents/bdi/bdi_core.py b/src/control_backend/agents/bdi/bdi_core.py index 0be42c9..ecc0b0c 100644 --- a/src/control_backend/agents/bdi/bdi_core.py +++ b/src/control_backend/agents/bdi/bdi_core.py @@ -30,7 +30,7 @@ class BDICoreAgent(BDIAgent): self.add_behaviour(BeliefSetter()) self.add_behaviour(ReceiveLLMResponseBehaviour()) - await self._send_to_llm("Hello we are the Pepper plus team") + await self._send_to_llm("Hi pepper, how are you?") # This is the example message currently sent to the llm at the start of the Program self.logger.info("BDICoreAgent setup complete") @@ -52,7 +52,7 @@ class BDICoreAgent(BDIAgent): self._send_to_llm(message_text) yield - async def _send_to_llm(self, text: str) -> str: + async def _send_to_llm(self, text: str): """ Sends a text query to the LLM Agent asynchronously. """ @@ -61,12 +61,10 @@ class BDICoreAgent(BDIAgent): async def run(self) -> None: msg = Message( to= settings.agent_settings.llm_agent_name + '@' + settings.agent_settings.host, - body= text, - thread= "llm_request", + body= text ) await self.send(msg) self.agent.logger.debug("Message sent to LLM: %s", text) - self.add_behaviour(SendBehaviour()) - return "LLM message dispatch scheduled" \ No newline at end of file + self.add_behaviour(SendBehaviour()) \ No newline at end of file diff --git a/src/control_backend/agents/llm/llm.py b/src/control_backend/agents/llm/llm.py index 38914a1..8dabb0f 100644 --- a/src/control_backend/agents/llm/llm.py +++ b/src/control_backend/agents/llm/llm.py @@ -15,6 +15,7 @@ from spade.message import Message from spade.template import Template from control_backend.core.config import settings +from control_backend.agents.llm.llm_instructions import LLMInstructions class LLMAgent(Agent): @@ -32,7 +33,7 @@ class LLMAgent(Agent): the BDI Core Agent and handle them. """ - async def run(self) -> None: + async def run(self): """ Receives SPADE messages and processes only those originating from the configured BDI agent. @@ -54,7 +55,7 @@ class LLMAgent(Agent): else: self.agent.logger.debug("Message ignored (not from BDI Core Agent)") - async def _process_bdi_message(self, message: Message) -> None: + async def _process_bdi_message(self, message: Message): """ Forwards user text to the LLM and replies with the generated text. """ @@ -62,14 +63,13 @@ class LLMAgent(Agent): llm_response = await self._query_llm(user_text) await self._reply(llm_response) - async def _reply(self, msg: str) -> None: + async def _reply(self, msg: str): """ Sends a response message back to the BDI Core Agent. """ reply = Message( to= settings.agent_settings.bdi_core_agent_name + '@' + settings.agent_settings.host, - body= msg, - thread= "llm_response", + body= msg ) await self.send(reply) self.agent.logger.info("Reply sent to BDI Core Agent") @@ -82,15 +82,30 @@ class LLMAgent(Agent): :return: LLM-generated content or fallback message. """ async with httpx.AsyncClient(timeout=120.0) as client: + # Example dynamic content for future (optional) + + instructions = LLMInstructions() + developer_instruction = instructions.build_developer_instruction() + response = await client.post( settings.llm_settings.local_llm_url, headers={"Content-Type": "application/json"}, json={ "model": settings.llm_settings.local_llm_model, - "messages": [{"role": "user", "content": prompt}], - "temperature": 0.3, + "messages": [ + { + "role": "developer", + "content": developer_instruction + }, + { + "role": "user", + "content": prompt + } + ], + "temperature": 0.3 }, ) + try: response.raise_for_status() data: dict[str, Any] = response.json() @@ -104,15 +119,12 @@ class LLMAgent(Agent): self.agent.logger.error("Unexpected error: %s", err) return "Error processing the request." - async def setup(self) -> None: + async def setup(self): """ Sets up the SPADE behaviour to filter and process messages from the BDI Core Agent. """ self.logger.info("LLMAgent setup complete") - template = Template() - template.sender = settings.agent_settings.bdi_core_agent_name + '@' + settings.agent_settings.host - behaviour = self.ReceiveMessageBehaviour() - self.add_behaviour(behaviour, template) + self.add_behaviour(behaviour) diff --git a/src/control_backend/agents/llm/llm_instructions.py b/src/control_backend/agents/llm/llm_instructions.py new file mode 100644 index 0000000..a35101d --- /dev/null +++ b/src/control_backend/agents/llm/llm_instructions.py @@ -0,0 +1,44 @@ +class LLMInstructions: + """ + Defines structured instructions that are sent along with each request + to the LLM to guide its behavior (norms, goals, etc.). + """ + + @staticmethod + def default_norms() -> str: + return f""" + Be friendly and respectful. + Make the conversation feel natural and engaging. + """.strip() + + @staticmethod + def default_goals() -> str: + return f""" + Try to learn the user's name during conversation. + """.strip() + + def __init__(self, norms: str | None = None, goals: str | None = None): + self.norms = norms if norms is not None else self.default_norms() + self.goals = goals if goals is not None else self.default_goals() + + def build_developer_instruction(self) -> str: + """ + Builds a multi-line formatted instruction string for the LLM. + Includes only non-empty structured fields. + """ + sections = [ + "You are a Pepper robot engaging in natural human conversation.", + "Keep responses between 1–5 sentences, unless instructed otherwise.\n", + ] + + if self.norms: + sections.append("Norms to follow:") + sections.append(self.norms) + sections.append("") + + if self.goals: + sections.append("Goals to reach:") + sections.append(self.goals) + sections.append("") + + return "\n".join(sections).strip() diff --git a/src/control_backend/main.py b/src/control_backend/main.py index de357d8..0050efa 100644 --- a/src/control_backend/main.py +++ b/src/control_backend/main.py @@ -44,10 +44,10 @@ async def lifespan(app: FastAPI): llm_agent = LLMAgent(settings.agent_settings.llm_agent_name + '@' + settings.agent_settings.host, - "secret, ask twirre") + settings.agent_settings.llm_agent_name) await llm_agent.start() bdi_core = BDICoreAgent(settings.agent_settings.bdi_core_agent_name + '@' + settings.agent_settings.host, - "secret, ask twirre", "src/control_backend/agents/bdi/rules.asl") + settings.agent_settings.bdi_core_agent_name, "src/control_backend/agents/bdi/rules.asl") await bdi_core.start() _temp_vad_agent = VADAgent("tcp://localhost:5558", False) From c7a2effa7853aef5b058a18486575512c2bce19f Mon Sep 17 00:00:00 2001 From: JobvAlewijk Date: Wed, 29 Oct 2025 13:01:00 +0100 Subject: [PATCH 07/10] style: linted everything ref: N25B-207 --- src/control_backend/agents/bdi/bdi_core.py | 7 ++++--- .../agents/bdi/behaviours/receive_llm_resp_behaviour.py | 5 +---- src/control_backend/agents/llm/llm.py | 9 +++------ src/control_backend/agents/llm/llm_instructions.py | 4 ++-- 4 files changed, 10 insertions(+), 15 deletions(-) diff --git a/src/control_backend/agents/bdi/bdi_core.py b/src/control_backend/agents/bdi/bdi_core.py index ecc0b0c..859e25a 100644 --- a/src/control_backend/agents/bdi/bdi_core.py +++ b/src/control_backend/agents/bdi/bdi_core.py @@ -1,13 +1,14 @@ import logging import agentspeak -from spade.behaviour import CyclicBehaviour, OneShotBehaviour +from spade.behaviour import OneShotBehaviour from spade.message import Message -from spade.template import Template from spade_bdi.bdi import BDIAgent from control_backend.agents.bdi.behaviours.belief_setter import BeliefSetter -from control_backend.agents.bdi.behaviours.receive_llm_resp_behaviour import ReceiveLLMResponseBehaviour +from control_backend.agents.bdi.behaviours.receive_llm_resp_behaviour import ( + ReceiveLLMResponseBehaviour, +) from control_backend.core.config import settings diff --git a/src/control_backend/agents/bdi/behaviours/receive_llm_resp_behaviour.py b/src/control_backend/agents/bdi/behaviours/receive_llm_resp_behaviour.py index 2b788ae..747ab4c 100644 --- a/src/control_backend/agents/bdi/behaviours/receive_llm_resp_behaviour.py +++ b/src/control_backend/agents/bdi/behaviours/receive_llm_resp_behaviour.py @@ -1,13 +1,10 @@ -import asyncio -import json import logging -from spade.agent import Message from spade.behaviour import CyclicBehaviour -from spade_bdi.bdi import BDIAgent from control_backend.core.config import settings + class ReceiveLLMResponseBehaviour(CyclicBehaviour): """ Adds behavior to receive responses from the LLM Agent. diff --git a/src/control_backend/agents/llm/llm.py b/src/control_backend/agents/llm/llm.py index 8dabb0f..0f78095 100644 --- a/src/control_backend/agents/llm/llm.py +++ b/src/control_backend/agents/llm/llm.py @@ -3,19 +3,16 @@ LLM Agent module for routing text queries from the BDI Core Agent to a local LLM service and returning its responses back to the BDI Core Agent. """ -import json import logging from typing import Any -import asyncio import httpx from spade.agent import Agent from spade.behaviour import CyclicBehaviour from spade.message import Message -from spade.template import Template -from control_backend.core.config import settings from control_backend.agents.llm.llm_instructions import LLMInstructions +from control_backend.core.config import settings class LLMAgent(Agent): @@ -68,8 +65,8 @@ class LLMAgent(Agent): Sends a response message back to the BDI Core Agent. """ reply = Message( - to= settings.agent_settings.bdi_core_agent_name + '@' + settings.agent_settings.host, - body= msg + to=settings.agent_settings.bdi_core_agent_name + '@' + settings.agent_settings.host, + body=msg ) await self.send(reply) self.agent.logger.info("Reply sent to BDI Core Agent") diff --git a/src/control_backend/agents/llm/llm_instructions.py b/src/control_backend/agents/llm/llm_instructions.py index a35101d..9636d88 100644 --- a/src/control_backend/agents/llm/llm_instructions.py +++ b/src/control_backend/agents/llm/llm_instructions.py @@ -6,14 +6,14 @@ class LLMInstructions: @staticmethod def default_norms() -> str: - return f""" + return """ Be friendly and respectful. Make the conversation feel natural and engaging. """.strip() @staticmethod def default_goals() -> str: - return f""" + return """ Try to learn the user's name during conversation. """.strip() From 3661b2a1e68b96d130435ea514aeaacd3cfdb3cc Mon Sep 17 00:00:00 2001 From: JobvAlewijk Date: Wed, 29 Oct 2025 13:03:57 +0100 Subject: [PATCH 08/10] fix: local host ref: N25B-208 --- src/control_backend/core/config.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/control_backend/core/config.py b/src/control_backend/core/config.py index 34032ba..736e939 100644 --- a/src/control_backend/core/config.py +++ b/src/control_backend/core/config.py @@ -7,7 +7,7 @@ class ZMQSettings(BaseModel): class AgentSettings(BaseModel): - host: str = "xmpp.twirre.dev" + host: str = "localhost" bdi_core_agent_name: str = "bdi_core" belief_collector_agent_name: str = "belief_collector" vad_agent_name: str = "vad_agent" From 5f2fd11a3360a15180f6c8dca8e1d261c0ad1ce0 Mon Sep 17 00:00:00 2001 From: Twirre Meulenbelt <43213592+TwirreM@users.noreply.github.com> Date: Wed, 29 Oct 2025 13:55:18 +0100 Subject: [PATCH 09/10] style: tiny style fixes --- src/control_backend/agents/bdi/bdi_core.py | 2 -- src/control_backend/agents/bdi/behaviours/belief_setter.py | 2 +- src/control_backend/core/config.py | 4 ++-- 3 files changed, 3 insertions(+), 5 deletions(-) diff --git a/src/control_backend/agents/bdi/bdi_core.py b/src/control_backend/agents/bdi/bdi_core.py index 7f424cc..a0a5570 100644 --- a/src/control_backend/agents/bdi/bdi_core.py +++ b/src/control_backend/agents/bdi/bdi_core.py @@ -1,8 +1,6 @@ -import asyncio import logging import agentspeak -import spade_bdi.bdi from spade.behaviour import OneShotBehaviour from spade.message import Message from spade_bdi.bdi import BDIAgent diff --git a/src/control_backend/agents/bdi/behaviours/belief_setter.py b/src/control_backend/agents/bdi/behaviours/belief_setter.py index 6179052..3155a38 100644 --- a/src/control_backend/agents/bdi/behaviours/belief_setter.py +++ b/src/control_backend/agents/bdi/behaviours/belief_setter.py @@ -60,7 +60,7 @@ class BeliefSetterBehaviour(CyclicBehaviour): for belief, arguments in beliefs.items(): self.agent.bdi.set_belief(belief, *arguments) - # Special case: if there's a new user message, we need to flag that we haven't responded yet + # Special case: if there's a new user message, flag that we haven't responded yet if belief == "user_said": try: self.agent.bdi.remove_belief("responded") diff --git a/src/control_backend/core/config.py b/src/control_backend/core/config.py index 736e939..5d539d0 100644 --- a/src/control_backend/core/config.py +++ b/src/control_backend/core/config.py @@ -19,8 +19,8 @@ class AgentSettings(BaseModel): class LLMSettings(BaseModel): - local_llm_url: str = "http://127.0.0.1:1234/v1/chat/completions" - local_llm_model: str = "openai/gpt-oss-20b" + local_llm_url: str = "http://145.107.82.68:1234/v1/chat/completions" + local_llm_model: str = "openai/gpt-oss-120b" class Settings(BaseSettings): app_title: str = "PepperPlus" From 7779d3a41c2f82039d9e9084190742f9520f814e Mon Sep 17 00:00:00 2001 From: Twirre Meulenbelt <43213592+TwirreM@users.noreply.github.com> Date: Wed, 29 Oct 2025 13:58:10 +0100 Subject: [PATCH 10/10] style: another tiny style fixes --- src/control_backend/agents/bdi/bdi_core.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/src/control_backend/agents/bdi/bdi_core.py b/src/control_backend/agents/bdi/bdi_core.py index a0a5570..a9b10d2 100644 --- a/src/control_backend/agents/bdi/bdi_core.py +++ b/src/control_backend/agents/bdi/bdi_core.py @@ -6,8 +6,9 @@ from spade.message import Message from spade_bdi.bdi import BDIAgent from control_backend.agents.bdi.behaviours.belief_setter import BeliefSetterBehaviour -from control_backend.agents.bdi.behaviours.receive_llm_resp_behaviour import ReceiveLLMResponseBehaviour - +from control_backend.agents.bdi.behaviours.receive_llm_resp_behaviour import ( + ReceiveLLMResponseBehaviour, +) from control_backend.core.config import settings