From 281bc57b6ee4ce319eab02101bc558baa586b3d4 Mon Sep 17 00:00:00 2001 From: JobvAlewijk Date: Tue, 28 Oct 2025 12:03:30 +0100 Subject: [PATCH] chore: cleanup made bdi match incoming messages changed llm from test agent to llm agent in config. ref: N25B-207 --- README.md | 4 ++-- src/control_backend/agents/bdi/bdi_core.py | 17 ++++++++++------- 2 files changed, 12 insertions(+), 9 deletions(-) diff --git a/README.md b/README.md index 6601529..6c28fcf 100644 --- a/README.md +++ b/README.md @@ -21,10 +21,10 @@ uv sync To run a LLM locally download https://lmstudio.ai When installing select developer mode, download a model (it will already suggest one) and run it (see developer window, status: running) -copy the url at the top right and replace LOCAL_LLM_URL with it + v1/chat/completions. +copy the url at the top right and replace local_llm_url with it + v1/chat/completions. This + part might differ based on what model you choose. -copy the model name in the module loaded and replace LOCAL_LLM_MODEL. +copy the model name in the module loaded and replace local_llm_modelL. In settings. ## Running diff --git a/src/control_backend/agents/bdi/bdi_core.py b/src/control_backend/agents/bdi/bdi_core.py index 209c83f..1026df5 100644 --- a/src/control_backend/agents/bdi/bdi_core.py +++ b/src/control_backend/agents/bdi/bdi_core.py @@ -59,7 +59,7 @@ class BDICoreAgent(BDIAgent): class SendBehaviour(OneShotBehaviour): async def run(self) -> None: msg = Message( - to= settings.agent_settings.test_agent_name + '@' + settings.agent_settings.host, + to= settings.agent_settings.llm_agent_name + '@' + settings.agent_settings.host, body= text, thread= "llm_request", ) @@ -81,13 +81,16 @@ class BDICoreAgent(BDIAgent): if not msg: return - content = msg.body - self.agent.logger.info("Received LLM response: %s", content) + sender = msg.sender.node + match sender: + case settings.agent_settings.llm_agent_name: + content = msg.body + self.agent.logger.info("Received LLM response: %s", content) + #Here the BDI can pass the message back as a response + case _: + self.logger.debug("Not from the llm, discarding message") + pass - # TODO: Convert response into a belief (optional future feature) - # Example: - # self.agent.add_belief("llm_response", content) - # self.agent.logger.debug("Added belief: llm_response(%s)", content) template = Template() template.thread = "llm_response"