refactor: agents inherit logger from BaseAgent

Created a class `BaseAgent`, from which all agents inherit. They get
assigned a logger with a nice name (something like
`control_backend.agents.AgentName`).

The BDI core takes care of its own logger, as bdi is still a module.

ref: N25B-241
This commit is contained in:
2025-11-04 20:48:55 +01:00
parent d43cb9394a
commit a98018ddda
15 changed files with 174 additions and 159 deletions

View File

@@ -1,29 +1,22 @@
"""
LLM Agent module for routing text queries from the BDI Core Agent to a local LLM
service and returning its responses back to the BDI Core Agent.
"""
import logging
from typing import Any
import httpx
from spade.agent import Agent
from spade.behaviour import CyclicBehaviour
from spade.message import Message
from .llm_instructions import LLMInstructions
from control_backend.agents import BaseAgent
from control_backend.core.config import settings
from .llm_instructions import LLMInstructions
class LLMAgent(Agent):
class LLMAgent(BaseAgent):
"""
Agent responsible for processing user text input and querying a locally
hosted LLM for text generation. Receives messages from the BDI Core Agent
and responds with processed LLM output.
"""
logger = logging.getLogger("llm_agent")
class ReceiveMessageBehaviour(CyclicBehaviour):
"""
Cyclic behaviour to continuously listen for incoming messages from
@@ -63,8 +56,8 @@ class LLMAgent(Agent):
Sends a response message back to the BDI Core Agent.
"""
reply = Message(
to=settings.agent_settings.bdi_core_agent_name + '@' + settings.agent_settings.host,
body=msg
to=settings.agent_settings.bdi_core_agent_name + "@" + settings.agent_settings.host,
body=msg,
)
await self.send(reply)
self.agent.logger.info("Reply sent to BDI Core Agent")
@@ -78,35 +71,31 @@ class LLMAgent(Agent):
"""
async with httpx.AsyncClient(timeout=120.0) as client:
# Example dynamic content for future (optional)
instructions = LLMInstructions()
developer_instruction = instructions.build_developer_instruction()
response = await client.post(
settings.llm_settings.local_llm_url,
headers={"Content-Type": "application/json"},
json={
"model": settings.llm_settings.local_llm_model,
"messages": [
{
"role": "developer",
"content": developer_instruction
},
{
"role": "user",
"content": prompt
}
{"role": "developer", "content": developer_instruction},
{"role": "user", "content": prompt},
],
"temperature": 0.3
"temperature": 0.3,
},
)
try:
response.raise_for_status()
data: dict[str, Any] = response.json()
return data.get("choices", [{}])[0].get(
"message", {}
).get("content", "No response")
return (
data.get("choices", [{}])[0]
.get("message", {})
.get("content", "No response")
)
except httpx.HTTPError as err:
self.agent.logger.error("HTTP error: %s", err)
return "LLM service unavailable."