feat: LLM agent #12
11
README.md
11
README.md
@@ -16,6 +16,17 @@ Using UV, installing the packages and virtual environment is as simple as typing
|
|||||||
uv sync
|
uv sync
|
||||||
```
|
```
|
||||||
|
|
||||||
|
## Local LLM
|
||||||
|
|
||||||
|
To run a LLM locally download https://lmstudio.ai
|
||||||
|
When installing select developer mode, download a model (it will already suggest one) and run it (see developer window, status: running)
|
||||||
|
|
||||||
|
copy the url at the top right and replace LOCAL_LLM_URL with it + v1/chat/completions.
|
||||||
|
This + part might differ based on what model you choose.
|
||||||
|
|
||||||
|
copy the model name in the module loaded and replace LOCAL_LLM_MODEL.
|
||||||
|
|
||||||
|
|
||||||
## Running
|
## Running
|
||||||
To run the project (development server), execute the following command (while inside the root repository):
|
To run the project (development server), execute the following command (while inside the root repository):
|
||||||
|
|
||||||
|
|||||||
@@ -1,35 +1,96 @@
|
|||||||
import logging
|
import logging
|
||||||
|
|
||||||
import agentspeak
|
import agentspeak
|
||||||
|
from spade.behaviour import CyclicBehaviour, OneShotBehaviour
|
||||||
|
from spade.message import Message
|
||||||
|
from spade.template import Template
|
||||||
from spade_bdi.bdi import BDIAgent
|
from spade_bdi.bdi import BDIAgent
|
||||||
|
|
||||||
from control_backend.agents.bdi.behaviours.belief_setter import BeliefSetter
|
from control_backend.agents.bdi.behaviours.belief_setter import BeliefSetter
|
||||||
|
from control_backend.core.config import settings
|
||||||
|
|
||||||
|
|
||||||
class BDICoreAgent(BDIAgent):
|
class BDICoreAgent(BDIAgent):
|
||||||
"""
|
"""
|
||||||
This is the Brain agent that does the belief inference with AgentSpeak.
|
This is the Brain agent that does the belief inference with AgentSpeak.
|
||||||
This is a continous process that happens automatically in the background.
|
This is a continous process that happens automatically in the background.
|
||||||
This class contains all the actions that can be called from AgentSpeak plans.
|
This class contains all the actions that can be called from AgentSpeak plans.
|
||||||
It has the BeliefSetter behaviour.
|
It has the BeliefSetter behaviour and can aks and recieve requests from the LLM agent.
|
||||||
"""
|
"""
|
||||||
logger = logging.getLogger("BDI Core")
|
|
||||||
|
|
||||||
async def setup(self):
|
logger = logging.getLogger("bdi_core_agent")
|
||||||
belief_setter = BeliefSetter()
|
|
||||||
self.add_behaviour(belief_setter)
|
async def setup(self) -> None:
|
||||||
|
"""
|
||||||
|
Initializes belief behaviors and message routing.
|
||||||
|
"""
|
||||||
|
self.logger.info("BDICoreAgent setup started")
|
||||||
|
|
||||||
|
self.add_behaviour(BeliefSetter())
|
||||||
|
self._add_llm_response_receiver()
|
||||||
|
|
||||||
|
await self._send_to_llm("Hello we are the Pepper plus team")
|
||||||
|
# This is the example message currently sent to the llm at the start of the Program
|
||||||
|
|
||||||
|
self.logger.info("BDICoreAgent setup complete")
|
||||||
|
|
||||||
|
def add_custom_actions(self, actions) -> None:
|
||||||
|
"""
|
||||||
|
Registers custom AgentSpeak actions callable from plans.
|
||||||
|
"""
|
||||||
|
|
||||||
def add_custom_actions(self, actions):
|
|
||||||
@actions.add(".reply", 1)
|
@actions.add(".reply", 1)
|
||||||
def _reply(agent, term, intention):
|
def _reply(agent: "BDICoreAgent", term, intention):
|
||||||
message = agentspeak.grounded(term.args[0], intention.scope)
|
"""
|
||||||
self.logger.info(f"Replying to message: {message}")
|
Sends text to the LLM (AgentSpeak action).
|
||||||
reply = self._send_to_llm(message)
|
Example: .reply("Hello LLM!")
|
||||||
self.logger.info(f"Received reply: {reply}")
|
"""
|
||||||
|
message_text = agentspeak.grounded(term.args[0], intention.scope)
|
||||||
|
self.logger.info("Reply action sending: %s", message_text)
|
||||||
|
|
||||||
|
self._send_to_llm(message_text)
|
||||||
yield
|
yield
|
||||||
|
|
||||||
def _send_to_llm(self, message) -> str:
|
async def _send_to_llm(self, text: str) -> str:
|
||||||
"""TODO: implement"""
|
"""
|
||||||
return f"This is a reply to {message}"
|
Sends a text query to the LLM Agent asynchronously.
|
||||||
|
"""
|
||||||
|
|
||||||
|
class SendBehaviour(OneShotBehaviour):
|
||||||
|
async def run(self) -> None:
|
||||||
|
msg = Message(
|
||||||
|
to=f"{settings.agent_settings.test_agent_name}@"
|
||||||
|
f"{settings.agent_settings.host}",
|
||||||
|
body=text,
|
||||||
|
thread="llm_request",
|
||||||
|
)
|
||||||
|
msg.set_metadata("performative", "inform")
|
||||||
|
await self.send(msg)
|
||||||
|
self.agent.logger.debug("Message sent to LLM: %s", text)
|
||||||
|
|
||||||
|
self.add_behaviour(SendBehaviour())
|
||||||
|
return "LLM message dispatch scheduled"
|
||||||
|
|
||||||
|
def _add_llm_response_receiver(self) -> None:
|
||||||
|
"""
|
||||||
|
Adds behavior to receive responses from the LLM Agent.
|
||||||
|
"""
|
||||||
|
|
||||||
|
class ReceiveLLMResponseBehaviour(CyclicBehaviour):
|
||||||
|
async def run(self) -> None:
|
||||||
|
msg = await self.receive(timeout=2)
|
||||||
|
if not msg:
|
||||||
|
return
|
||||||
|
|
||||||
|
content = msg.body
|
||||||
|
self.agent.logger.info("Received LLM response: %s", content)
|
||||||
|
|
||||||
|
# TODO: Convert response into a belief (optional future feature)
|
||||||
|
# Example:
|
||||||
|
# self.agent.add_belief("llm_response", content)
|
||||||
|
# self.agent.logger.debug("Added belief: llm_response(%s)", content)
|
||||||
|
|
||||||
|
template = Template()
|
||||||
|
template.thread = "llm_response"
|
||||||
|
|
||||||
|
self.add_behaviour(ReceiveLLMResponseBehaviour(), template)
|
||||||
|
|||||||
@@ -33,6 +33,7 @@ class BeliefSetter(CyclicBehaviour):
|
|||||||
self.logger.debug("Processing message from belief collector.")
|
self.logger.debug("Processing message from belief collector.")
|
||||||
self._process_belief_message(message)
|
self._process_belief_message(message)
|
||||||
case _:
|
case _:
|
||||||
|
self.logger.debug("Not the belief agent, discarding message")
|
||||||
pass
|
pass
|
||||||
|
|
||||||
def _process_belief_message(self, message: Message):
|
def _process_belief_message(self, message: Message):
|
||||||
|
|||||||
125
src/control_backend/agents/llm/llm.py
Normal file
125
src/control_backend/agents/llm/llm.py
Normal file
@@ -0,0 +1,125 @@
|
|||||||
|
"""
|
||||||
|
LLM Agent module for routing text queries from the BDI Core Agent to a local LLM
|
||||||
|
service and returning its responses back to the BDI Core Agent.
|
||||||
|
"""
|
||||||
|
|
||||||
|
import json
|
||||||
|
import logging
|
||||||
|
from typing import Any
|
||||||
|
|
||||||
|
import asyncio
|
||||||
|
import httpx
|
||||||
|
from spade.agent import Agent
|
||||||
|
from spade.behaviour import CyclicBehaviour
|
||||||
|
from spade.message import Message
|
||||||
|
from spade.template import Template
|
||||||
|
|
||||||
|
from control_backend.core.config import settings
|
||||||
|
|
||||||
|
|
||||||
|
class LLMAgent(Agent):
|
||||||
|
"""
|
||||||
|
Agent responsible for processing user text input and querying a locally
|
||||||
|
hosted LLM for text generation. Receives messages from the BDI Core Agent
|
||||||
|
and responds with processed LLM output.
|
||||||
|
"""
|
||||||
|
|
||||||
|
logger = logging.getLogger("llm_agent")
|
||||||
|
|
||||||
|
class ReceiveMessageBehaviour(CyclicBehaviour):
|
||||||
|
"""
|
||||||
|
Cyclic behaviour to continuously listen for incoming messages from
|
||||||
|
the BDI Core Agent and handle them.
|
||||||
|
"""
|
||||||
|
|
||||||
|
LOCAL_LLM_URL: str = "http://127.0.0.1:1234/v1/chat/completions"
|
||||||
|
LOCAL_LLM_MODEL: str = "openai/gpt-oss-20b"
|
||||||
|
|
||||||
|
async def run(self) -> None:
|
||||||
|
"""
|
||||||
|
Receives SPADE messages and processes only those originating from the
|
||||||
|
configured BDI agent.
|
||||||
|
"""
|
||||||
|
msg = await self.receive(timeout=1)
|
||||||
|
if not msg:
|
||||||
|
return
|
||||||
|
|
||||||
|
sender = msg.sender.node
|
||||||
|
self.agent.logger.info(
|
||||||
|
"Received message: %s from %s",
|
||||||
|
msg.body,
|
||||||
|
sender,
|
||||||
|
)
|
||||||
|
|
||||||
|
if sender == settings.agent_settings.bdi_core_agent_name:
|
||||||
|
self.agent.logger.debug("Processing message from BDI Core Agent")
|
||||||
|
await self._process_bdi_message(msg)
|
||||||
|
else:
|
||||||
|
self.agent.logger.debug("Message ignored (not from BDI Core Agent)")
|
||||||
|
|
||||||
|
async def _process_bdi_message(self, message: Message) -> None:
|
||||||
|
"""
|
||||||
|
Forwards user text to the LLM and replies with the generated text.
|
||||||
|
"""
|
||||||
|
user_text = message.body
|
||||||
|
llm_response = await self._query_llm(user_text)
|
||||||
|
await self._reply(llm_response)
|
||||||
|
|
||||||
|
async def _reply(self, msg: str) -> None:
|
||||||
|
"""
|
||||||
|
Sends a response message back to the BDI Core Agent.
|
||||||
|
"""
|
||||||
|
reply = Message(
|
||||||
|
to=f"{settings.agent_settings.bdi_core_agent_name}@"
|
||||||
|
f"{settings.agent_settings.host}",
|
||||||
|
body=msg,
|
||||||
|
thread="llm_response",
|
||||||
|
)
|
||||||
|
await self.send(reply)
|
||||||
|
self.agent.logger.info("Reply sent to BDI Core Agent")
|
||||||
|
|
||||||
|
async def _query_llm(self, prompt: str) -> str:
|
||||||
|
"""
|
||||||
|
Sends a chat completion request to the local LLM service.
|
||||||
|
|
||||||
|
:param prompt: Input text prompt to pass to the LLM.
|
||||||
|
:return: LLM-generated content or fallback message.
|
||||||
|
"""
|
||||||
|
async with httpx.AsyncClient(timeout=120.0) as client:
|
||||||
|
response = await client.post(
|
||||||
|
self.LOCAL_LLM_URL,
|
||||||
|
headers={"Content-Type": "application/json"},
|
||||||
|
json={
|
||||||
|
"model": self.LOCAL_LLM_MODEL,
|
||||||
|
"messages": [{"role": "user", "content": prompt}],
|
||||||
|
"temperature": 0.3,
|
||||||
|
},
|
||||||
|
)
|
||||||
|
try:
|
||||||
|
response.raise_for_status()
|
||||||
|
data: dict[str, Any] = response.json()
|
||||||
|
return data.get("choices", [{}])[0].get(
|
||||||
|
"message", {}
|
||||||
|
).get("content", "No response")
|
||||||
|
except httpx.HTTPError as err:
|
||||||
|
self.agent.logger.error("HTTP error: %s", err)
|
||||||
|
return "LLM service unavailable."
|
||||||
|
except Exception as err:
|
||||||
|
self.agent.logger.error("Unexpected error: %s", err)
|
||||||
|
return "Error processing the request."
|
||||||
|
|
||||||
|
async def setup(self) -> None:
|
||||||
|
"""
|
||||||
|
Sets up the SPADE behaviour to filter and process messages from the
|
||||||
|
BDI Core Agent.
|
||||||
|
"""
|
||||||
|
self.logger.info("LLMAgent setup complete")
|
||||||
|
|
||||||
|
template = Template()
|
||||||
|
template.sender = (
|
||||||
|
f"{settings.agent_settings.bdi_core_agent_name}@"
|
||||||
|
f"{settings.agent_settings.host}"
|
||||||
|
)
|
||||||
|
|
||||||
|
behaviour = self.ReceiveMessageBehaviour()
|
||||||
|
self.add_behaviour(behaviour, template)
|
||||||
@@ -6,9 +6,10 @@ class ZMQSettings(BaseModel):
|
|||||||
internal_comm_address: str = "tcp://localhost:5560"
|
internal_comm_address: str = "tcp://localhost:5560"
|
||||||
|
|
||||||
class AgentSettings(BaseModel):
|
class AgentSettings(BaseModel):
|
||||||
host: str = "localhost"
|
host: str = "xmpp.twirre.dev"
|
||||||
bdi_core_agent_name: str = "bdi_core"
|
bdi_core_agent_name: str = "bdi_core"
|
||||||
belief_collector_agent_name: str = "belief_collector"
|
belief_collector_agent_name: str = "belief_collector"
|
||||||
|
llm_agent_name: str = "llm_agent"
|
||||||
test_agent_name: str = "test_agent"
|
test_agent_name: str = "test_agent"
|
||||||
|
|
||||||
class Settings(BaseSettings):
|
class Settings(BaseSettings):
|
||||||
|
|||||||
@@ -13,6 +13,7 @@ import zmq
|
|||||||
|
|
||||||
# Internal imports
|
# Internal imports
|
||||||
from control_backend.agents.bdi.bdi_core import BDICoreAgent
|
from control_backend.agents.bdi.bdi_core import BDICoreAgent
|
||||||
|
from control_backend.agents.llm.llm import LLMAgent
|
||||||
from control_backend.api.v1.router import api_router
|
from control_backend.api.v1.router import api_router
|
||||||
from control_backend.core.config import AgentSettings, settings
|
from control_backend.core.config import AgentSettings, settings
|
||||||
from control_backend.core.zmq_context import context
|
from control_backend.core.zmq_context import context
|
||||||
@@ -31,9 +32,15 @@ async def lifespan(app: FastAPI):
|
|||||||
app.state.internal_comm_socket = internal_comm_socket
|
app.state.internal_comm_socket = internal_comm_socket
|
||||||
logger.info("Internal publishing socket bound to %s", internal_comm_socket)
|
logger.info("Internal publishing socket bound to %s", internal_comm_socket)
|
||||||
|
|
||||||
|
|
||||||
# Initiate agents
|
# Initiate agents
|
||||||
bdi_core = BDICoreAgent(settings.agent_settings.bdi_core_agent_name + '@' + settings.agent_settings.host, settings.agent_settings.bdi_core_agent_name, "src/control_backend/agents/bdi/rules.asl")
|
|
||||||
|
|
||||||
|
llm_agent = LLMAgent(settings.agent_settings.test_agent_name + '@' + settings.agent_settings.host, "secret, ask twirre")
|
||||||
|
await llm_agent.start()
|
||||||
|
bdi_core = BDICoreAgent(settings.agent_settings.bdi_core_agent_name + '@' + settings.agent_settings.host, "secret, ask twirre", "src/control_backend/agents/bdi/rules.asl")
|
||||||
await bdi_core.start()
|
await bdi_core.start()
|
||||||
|
|
||||||
|
|
||||||
yield
|
yield
|
||||||
|
|
||||||
|
|||||||
Reference in New Issue
Block a user