From 17056da8324a36c547ead049236ab9e45cb06923 Mon Sep 17 00:00:00 2001 From: JobvAlewijk Date: Tue, 28 Oct 2025 11:07:28 +0100 Subject: [PATCH] chore: cleanup made llm get url from settings cleanup uneceserry fstring ref: N25B-207 --- src/control_backend/agents/bdi/bdi_core.py | 7 +++---- src/control_backend/agents/llm/llm.py | 19 ++++++------------- src/control_backend/core/config.py | 8 ++++++++ 3 files changed, 17 insertions(+), 17 deletions(-) diff --git a/src/control_backend/agents/bdi/bdi_core.py b/src/control_backend/agents/bdi/bdi_core.py index b960d3f..209c83f 100644 --- a/src/control_backend/agents/bdi/bdi_core.py +++ b/src/control_backend/agents/bdi/bdi_core.py @@ -59,10 +59,9 @@ class BDICoreAgent(BDIAgent): class SendBehaviour(OneShotBehaviour): async def run(self) -> None: msg = Message( - to=f"{settings.agent_settings.test_agent_name}@" - f"{settings.agent_settings.host}", - body=text, - thread="llm_request", + to= settings.agent_settings.test_agent_name + '@' + settings.agent_settings.host, + body= text, + thread= "llm_request", ) msg.set_metadata("performative", "inform") await self.send(msg) diff --git a/src/control_backend/agents/llm/llm.py b/src/control_backend/agents/llm/llm.py index c3c4dfd..38914a1 100644 --- a/src/control_backend/agents/llm/llm.py +++ b/src/control_backend/agents/llm/llm.py @@ -32,9 +32,6 @@ class LLMAgent(Agent): the BDI Core Agent and handle them. """ - LOCAL_LLM_URL: str = "http://127.0.0.1:1234/v1/chat/completions" - LOCAL_LLM_MODEL: str = "openai/gpt-oss-20b" - async def run(self) -> None: """ Receives SPADE messages and processes only those originating from the @@ -70,10 +67,9 @@ class LLMAgent(Agent): Sends a response message back to the BDI Core Agent. """ reply = Message( - to=f"{settings.agent_settings.bdi_core_agent_name}@" - f"{settings.agent_settings.host}", - body=msg, - thread="llm_response", + to= settings.agent_settings.bdi_core_agent_name + '@' + settings.agent_settings.host, + body= msg, + thread= "llm_response", ) await self.send(reply) self.agent.logger.info("Reply sent to BDI Core Agent") @@ -87,10 +83,10 @@ class LLMAgent(Agent): """ async with httpx.AsyncClient(timeout=120.0) as client: response = await client.post( - self.LOCAL_LLM_URL, + settings.llm_settings.local_llm_url, headers={"Content-Type": "application/json"}, json={ - "model": self.LOCAL_LLM_MODEL, + "model": settings.llm_settings.local_llm_model, "messages": [{"role": "user", "content": prompt}], "temperature": 0.3, }, @@ -116,10 +112,7 @@ class LLMAgent(Agent): self.logger.info("LLMAgent setup complete") template = Template() - template.sender = ( - f"{settings.agent_settings.bdi_core_agent_name}@" - f"{settings.agent_settings.host}" - ) + template.sender = settings.agent_settings.bdi_core_agent_name + '@' + settings.agent_settings.host behaviour = self.ReceiveMessageBehaviour() self.add_behaviour(behaviour, template) diff --git a/src/control_backend/core/config.py b/src/control_backend/core/config.py index e1fda30..4b11291 100644 --- a/src/control_backend/core/config.py +++ b/src/control_backend/core/config.py @@ -12,6 +12,10 @@ class AgentSettings(BaseModel): llm_agent_name: str = "llm_agent" test_agent_name: str = "test_agent" +class LLMSettings(BaseModel): + local_llm_url: str = "http://127.0.0.1:1234/v1/chat/completions" + local_llm_model: str = "openai/gpt-oss-20b" + class Settings(BaseSettings): app_title: str = "PepperPlus" @@ -20,7 +24,11 @@ class Settings(BaseSettings): zmq_settings: ZMQSettings = ZMQSettings() agent_settings: AgentSettings = AgentSettings() + + llm_settings: LLMSettings = LLMSettings() model_config = SettingsConfigDict(env_file=".env") + + settings = Settings()