Merge branch 'feat/llm-agent' into 'dev'

feat: LLM agent

See merge request ics/sp/2025/n25b/pepperplus-cb!12
This commit was merged in pull request #12.
This commit is contained in:
Twirre
2025-10-29 12:58:41 +00:00
8 changed files with 279 additions and 25 deletions

View File

@@ -16,6 +16,17 @@ Using UV, installing the packages and virtual environment is as simple as typing
uv sync
```
## Local LLM
To run a LLM locally download https://lmstudio.ai
When installing select developer mode, download a model (it will already suggest one) and run it (see developer window, status: running)
copy the url at the top right and replace local_llm_url with it + v1/chat/completions.
This + part might differ based on what model you choose.
copy the model name in the module loaded and replace local_llm_modelL. In settings.
## Running
To run the project (development server), execute the following command (while inside the root repository):

View File

@@ -1,11 +1,15 @@
import asyncio
import logging
import agentspeak
import spade_bdi.bdi
from spade.behaviour import OneShotBehaviour
from spade.message import Message
from spade_bdi.bdi import BDIAgent
from control_backend.agents.bdi.behaviours.belief_setter import BeliefSetterBehaviour
from control_backend.agents.bdi.behaviours.receive_llm_resp_behaviour import (
ReceiveLLMResponseBehaviour,
)
from control_backend.core.config import settings
class BDICoreAgent(BDIAgent):
@@ -13,24 +17,55 @@ class BDICoreAgent(BDIAgent):
This is the Brain agent that does the belief inference with AgentSpeak.
This is a continous process that happens automatically in the background.
This class contains all the actions that can be called from AgentSpeak plans.
It has the BeliefSetter behaviour and can aks and recieve requests from the LLM agent.
"""
logger = logging.getLogger("BDI Core")
logger = logging.getLogger("bdi_core_agent")
async def setup(self):
belief_setter = BeliefSetterBehaviour()
self.add_behaviour(belief_setter)
async def setup(self) -> None:
"""
Initializes belief behaviors and message routing.
"""
self.logger.info("BDICoreAgent setup started")
self.add_behaviour(BeliefSetterBehaviour())
self.add_behaviour(ReceiveLLMResponseBehaviour())
await self._send_to_llm("Hi pepper, how are you?")
# This is the example message currently sent to the llm at the start of the Program
self.logger.info("BDICoreAgent setup complete")
def add_custom_actions(self, actions) -> None:
"""
Registers custom AgentSpeak actions callable from plans.
"""
def add_custom_actions(self, actions):
@actions.add(".reply", 1)
def _reply(agent, term, intention):
message = agentspeak.grounded(term.args[0], intention.scope)
self.logger.info(f"Replying to message: {message}")
reply = self._send_to_llm(message)
self.logger.info(f"Received reply: {reply}")
def _reply(agent: "BDICoreAgent", term, intention):
"""
Sends text to the LLM (AgentSpeak action).
Example: .reply("Hello LLM!")
"""
message_text = agentspeak.grounded(term.args[0], intention.scope)
self.logger.info("Reply action sending: %s", message_text)
self._send_to_llm(message_text)
yield
def _send_to_llm(self, message) -> str:
"""TODO: implement"""
return f"This is a reply to {message}"
async def _send_to_llm(self, text: str):
"""
Sends a text query to the LLM Agent asynchronously.
"""
class SendBehaviour(OneShotBehaviour):
async def run(self) -> None:
msg = Message(
to= settings.agent_settings.llm_agent_name + '@' + settings.agent_settings.host,
body= text
)
await self.send(msg)
self.agent.logger.debug("Message sent to LLM: %s", text)
self.add_behaviour(SendBehaviour())

View File

@@ -33,6 +33,7 @@ class BeliefSetterBehaviour(CyclicBehaviour):
self.logger.debug("Processing message from belief collector.")
self._process_belief_message(message)
case _:
self.logger.debug("Not the belief agent, discarding message")
pass
def _process_belief_message(self, message: Message):
@@ -59,7 +60,7 @@ class BeliefSetterBehaviour(CyclicBehaviour):
for belief, arguments in beliefs.items():
self.agent.bdi.set_belief(belief, *arguments)
# Special case: if there's a new user message, we need to flag that we haven't responded yet
# Special case: if there's a new user message, flag that we haven't responded yet
if belief == "user_said":
try:
self.agent.bdi.remove_belief("responded")

View File

@@ -0,0 +1,26 @@
import logging
from spade.behaviour import CyclicBehaviour
from control_backend.core.config import settings
class ReceiveLLMResponseBehaviour(CyclicBehaviour):
"""
Adds behavior to receive responses from the LLM Agent.
"""
logger = logging.getLogger("BDI/LLM Reciever")
async def run(self):
msg = await self.receive(timeout=2)
if not msg:
return
sender = msg.sender.node
match sender:
case settings.agent_settings.llm_agent_name:
content = msg.body
self.logger.info("Received LLM response: %s", content)
#Here the BDI can pass the message back as a response
case _:
self.logger.debug("Not from the llm, discarding message")
pass

View File

@@ -0,0 +1,127 @@
"""
LLM Agent module for routing text queries from the BDI Core Agent to a local LLM
service and returning its responses back to the BDI Core Agent.
"""
import logging
from typing import Any
import httpx
from spade.agent import Agent
from spade.behaviour import CyclicBehaviour
from spade.message import Message
from control_backend.agents.llm.llm_instructions import LLMInstructions
from control_backend.core.config import settings
class LLMAgent(Agent):
"""
Agent responsible for processing user text input and querying a locally
hosted LLM for text generation. Receives messages from the BDI Core Agent
and responds with processed LLM output.
"""
logger = logging.getLogger("llm_agent")
class ReceiveMessageBehaviour(CyclicBehaviour):
"""
Cyclic behaviour to continuously listen for incoming messages from
the BDI Core Agent and handle them.
"""
async def run(self):
"""
Receives SPADE messages and processes only those originating from the
configured BDI agent.
"""
msg = await self.receive(timeout=1)
if not msg:
return
sender = msg.sender.node
self.agent.logger.info(
"Received message: %s from %s",
msg.body,
sender,
)
if sender == settings.agent_settings.bdi_core_agent_name:
self.agent.logger.debug("Processing message from BDI Core Agent")
await self._process_bdi_message(msg)
else:
self.agent.logger.debug("Message ignored (not from BDI Core Agent)")
async def _process_bdi_message(self, message: Message):
"""
Forwards user text to the LLM and replies with the generated text.
"""
user_text = message.body
llm_response = await self._query_llm(user_text)
await self._reply(llm_response)
async def _reply(self, msg: str):
"""
Sends a response message back to the BDI Core Agent.
"""
reply = Message(
to=settings.agent_settings.bdi_core_agent_name + '@' + settings.agent_settings.host,
body=msg
)
await self.send(reply)
self.agent.logger.info("Reply sent to BDI Core Agent")
async def _query_llm(self, prompt: str) -> str:
"""
Sends a chat completion request to the local LLM service.
:param prompt: Input text prompt to pass to the LLM.
:return: LLM-generated content or fallback message.
"""
async with httpx.AsyncClient(timeout=120.0) as client:
# Example dynamic content for future (optional)
instructions = LLMInstructions()
developer_instruction = instructions.build_developer_instruction()
response = await client.post(
settings.llm_settings.local_llm_url,
headers={"Content-Type": "application/json"},
json={
"model": settings.llm_settings.local_llm_model,
"messages": [
{
"role": "developer",
"content": developer_instruction
},
{
"role": "user",
"content": prompt
}
],
"temperature": 0.3
},
)
try:
response.raise_for_status()
data: dict[str, Any] = response.json()
return data.get("choices", [{}])[0].get(
"message", {}
).get("content", "No response")
except httpx.HTTPError as err:
self.agent.logger.error("HTTP error: %s", err)
return "LLM service unavailable."
except Exception as err:
self.agent.logger.error("Unexpected error: %s", err)
return "Error processing the request."
async def setup(self):
"""
Sets up the SPADE behaviour to filter and process messages from the
BDI Core Agent.
"""
self.logger.info("LLMAgent setup complete")
behaviour = self.ReceiveMessageBehaviour()
self.add_behaviour(behaviour)

View File

@@ -0,0 +1,44 @@
class LLMInstructions:
"""
Defines structured instructions that are sent along with each request
to the LLM to guide its behavior (norms, goals, etc.).
"""
@staticmethod
def default_norms() -> str:
return """
Be friendly and respectful.
Make the conversation feel natural and engaging.
""".strip()
@staticmethod
def default_goals() -> str:
return """
Try to learn the user's name during conversation.
""".strip()
def __init__(self, norms: str | None = None, goals: str | None = None):
self.norms = norms if norms is not None else self.default_norms()
self.goals = goals if goals is not None else self.default_goals()
def build_developer_instruction(self) -> str:
"""
Builds a multi-line formatted instruction string for the LLM.
Includes only non-empty structured fields.
"""
sections = [
"You are a Pepper robot engaging in natural human conversation.",
"Keep responses between 15 sentences, unless instructed otherwise.\n",
]
if self.norms:
sections.append("Norms to follow:")
sections.append(self.norms)
sections.append("")
if self.goals:
sections.append("Goals to reach:")
sections.append(self.goals)
sections.append("")
return "\n".join(sections).strip()

View File

@@ -11,11 +11,17 @@ class AgentSettings(BaseModel):
bdi_core_agent_name: str = "bdi_core"
belief_collector_agent_name: str = "belief_collector"
vad_agent_name: str = "vad_agent"
llm_agent_name: str = "llm_agent"
test_agent_name: str = "test_agent"
ri_communication_agent_name: str = "ri_communication_agent"
ri_command_agent_name: str = "ri_command_agent"
class LLMSettings(BaseModel):
local_llm_url: str = "http://145.107.82.68:1234/v1/chat/completions"
local_llm_model: str = "openai/gpt-oss-120b"
class Settings(BaseSettings):
app_title: str = "PepperPlus"
@@ -25,7 +31,8 @@ class Settings(BaseSettings):
agent_settings: AgentSettings = AgentSettings()
llm_settings: LLMSettings = LLMSettings()
model_config = SettingsConfigDict(env_file=".env")
settings = Settings()

View File

@@ -12,6 +12,7 @@ from fastapi.middleware.cors import CORSMiddleware
from control_backend.agents.ri_communication_agent import RICommunicationAgent
from control_backend.agents.bdi.bdi_core import BDICoreAgent
from control_backend.agents.vad_agent import VADAgent
from control_backend.agents.llm.llm import LLMAgent
from control_backend.api.v1.router import api_router
from control_backend.core.config import settings
from control_backend.core.zmq_context import context
@@ -31,6 +32,7 @@ async def lifespan(app: FastAPI):
app.state.internal_comm_socket = internal_comm_socket
logger.info("Internal publishing socket bound to %s", internal_comm_socket)
# Initiate agents
ri_communication_agent = RICommunicationAgent(
settings.agent_settings.ri_communication_agent_name + "@" + settings.agent_settings.host,
@@ -40,11 +42,12 @@ async def lifespan(app: FastAPI):
)
await ri_communication_agent.start()
bdi_core = BDICoreAgent(
settings.agent_settings.bdi_core_agent_name + "@" + settings.agent_settings.host,
settings.agent_settings.bdi_core_agent_name,
"src/control_backend/agents/bdi/rules.asl",
)
llm_agent = LLMAgent(settings.agent_settings.llm_agent_name + '@' + settings.agent_settings.host,
settings.agent_settings.llm_agent_name)
await llm_agent.start()
bdi_core = BDICoreAgent(settings.agent_settings.bdi_core_agent_name + '@' + settings.agent_settings.host,
settings.agent_settings.bdi_core_agent_name, "src/control_backend/agents/bdi/rules.asl")
await bdi_core.start()
_temp_vad_agent = VADAgent("tcp://localhost:5558", False)