feat: LLM agent
body: added the llmAgent class and made it run at the start.
modified the bdi_core to send a test message and recieve an awnser from LLM agent
Added a connection to a local llm via lmstudio.
Tests are Tba.
ref: N25B-207
This commit is contained in:
125
src/control_backend/agents/llm/llm.py
Normal file
125
src/control_backend/agents/llm/llm.py
Normal file
@@ -0,0 +1,125 @@
|
||||
"""
|
||||
LLM Agent module for routing text queries from the BDI Core Agent to a local LLM
|
||||
service and returning its responses back to the BDI Core Agent.
|
||||
"""
|
||||
|
||||
import json
|
||||
import logging
|
||||
from typing import Any
|
||||
|
||||
import asyncio
|
||||
import httpx
|
||||
from spade.agent import Agent
|
||||
from spade.behaviour import CyclicBehaviour
|
||||
from spade.message import Message
|
||||
from spade.template import Template
|
||||
|
||||
from control_backend.core.config import settings
|
||||
|
||||
|
||||
class LLMAgent(Agent):
|
||||
"""
|
||||
Agent responsible for processing user text input and querying a locally
|
||||
hosted LLM for text generation. Receives messages from the BDI Core Agent
|
||||
and responds with processed LLM output.
|
||||
"""
|
||||
|
||||
logger = logging.getLogger("llm_agent")
|
||||
|
||||
class ReceiveMessageBehaviour(CyclicBehaviour):
|
||||
"""
|
||||
Cyclic behaviour to continuously listen for incoming messages from
|
||||
the BDI Core Agent and handle them.
|
||||
"""
|
||||
|
||||
LOCAL_LLM_URL: str = "http://127.0.0.1:1234/v1/chat/completions"
|
||||
LOCAL_LLM_MODEL: str = "openai/gpt-oss-20b"
|
||||
|
||||
async def run(self) -> None:
|
||||
"""
|
||||
Receives SPADE messages and processes only those originating from the
|
||||
configured BDI agent.
|
||||
"""
|
||||
msg = await self.receive(timeout=1)
|
||||
if not msg:
|
||||
return
|
||||
|
||||
sender = msg.sender.node
|
||||
self.agent.logger.info(
|
||||
"Received message: %s from %s",
|
||||
msg.body,
|
||||
sender,
|
||||
)
|
||||
|
||||
if sender == settings.agent_settings.bdi_core_agent_name:
|
||||
self.agent.logger.debug("Processing message from BDI Core Agent")
|
||||
await self._process_bdi_message(msg)
|
||||
else:
|
||||
self.agent.logger.debug("Message ignored (not from BDI Core Agent)")
|
||||
|
||||
async def _process_bdi_message(self, message: Message) -> None:
|
||||
"""
|
||||
Forwards user text to the LLM and replies with the generated text.
|
||||
"""
|
||||
user_text = message.body
|
||||
llm_response = await self._query_llm(user_text)
|
||||
await self._reply(llm_response)
|
||||
|
||||
async def _reply(self, msg: str) -> None:
|
||||
"""
|
||||
Sends a response message back to the BDI Core Agent.
|
||||
"""
|
||||
reply = Message(
|
||||
to=f"{settings.agent_settings.bdi_core_agent_name}@"
|
||||
f"{settings.agent_settings.host}",
|
||||
body=msg,
|
||||
thread="llm_response",
|
||||
)
|
||||
await self.send(reply)
|
||||
self.agent.logger.info("Reply sent to BDI Core Agent")
|
||||
|
||||
async def _query_llm(self, prompt: str) -> str:
|
||||
"""
|
||||
Sends a chat completion request to the local LLM service.
|
||||
|
||||
:param prompt: Input text prompt to pass to the LLM.
|
||||
:return: LLM-generated content or fallback message.
|
||||
"""
|
||||
async with httpx.AsyncClient(timeout=120.0) as client:
|
||||
response = await client.post(
|
||||
self.LOCAL_LLM_URL,
|
||||
headers={"Content-Type": "application/json"},
|
||||
json={
|
||||
"model": self.LOCAL_LLM_MODEL,
|
||||
"messages": [{"role": "user", "content": prompt}],
|
||||
"temperature": 0.3,
|
||||
},
|
||||
)
|
||||
try:
|
||||
response.raise_for_status()
|
||||
data: dict[str, Any] = response.json()
|
||||
return data.get("choices", [{}])[0].get(
|
||||
"message", {}
|
||||
).get("content", "No response")
|
||||
except httpx.HTTPError as err:
|
||||
self.agent.logger.error("HTTP error: %s", err)
|
||||
return "LLM service unavailable."
|
||||
except Exception as err:
|
||||
self.agent.logger.error("Unexpected error: %s", err)
|
||||
return "Error processing the request."
|
||||
|
||||
async def setup(self) -> None:
|
||||
"""
|
||||
Sets up the SPADE behaviour to filter and process messages from the
|
||||
BDI Core Agent.
|
||||
"""
|
||||
self.logger.info("LLMAgent setup complete")
|
||||
|
||||
template = Template()
|
||||
template.sender = (
|
||||
f"{settings.agent_settings.bdi_core_agent_name}@"
|
||||
f"{settings.agent_settings.host}"
|
||||
)
|
||||
|
||||
behaviour = self.ReceiveMessageBehaviour()
|
||||
self.add_behaviour(behaviour, template)
|
||||
Reference in New Issue
Block a user