Merge remote-tracking branch 'origin/dev' into feat/transcription-agent

# Conflicts:
#	src/control_backend/core/config.py
This commit is contained in:
Twirre Meulenbelt
2025-10-29 14:00:57 +01:00
9 changed files with 316 additions and 48 deletions

View File

@@ -1,9 +1,15 @@
import logging
import agentspeak
from spade.behaviour import OneShotBehaviour
from spade.message import Message
from spade_bdi.bdi import BDIAgent
from control_backend.agents.bdi.behaviours.belief_setter import BeliefSetter
from control_backend.agents.bdi.behaviours.belief_setter import BeliefSetterBehaviour
from control_backend.agents.bdi.behaviours.receive_llm_resp_behaviour import (
ReceiveLLMResponseBehaviour,
)
from control_backend.core.config import settings
class BDICoreAgent(BDIAgent):
@@ -11,25 +17,55 @@ class BDICoreAgent(BDIAgent):
This is the Brain agent that does the belief inference with AgentSpeak.
This is a continous process that happens automatically in the background.
This class contains all the actions that can be called from AgentSpeak plans.
It has the BeliefSetter behaviour.
It has the BeliefSetter behaviour and can aks and recieve requests from the LLM agent.
"""
logger = logging.getLogger("BDI Core")
logger = logging.getLogger("bdi_core_agent")
async def setup(self):
belief_setter = BeliefSetter()
self.add_behaviour(belief_setter)
async def setup(self) -> None:
"""
Initializes belief behaviors and message routing.
"""
self.logger.info("BDICoreAgent setup started")
self.add_behaviour(BeliefSetterBehaviour())
self.add_behaviour(ReceiveLLMResponseBehaviour())
await self._send_to_llm("Hi pepper, how are you?")
# This is the example message currently sent to the llm at the start of the Program
self.logger.info("BDICoreAgent setup complete")
def add_custom_actions(self, actions) -> None:
"""
Registers custom AgentSpeak actions callable from plans.
"""
def add_custom_actions(self, actions):
@actions.add(".reply", 1)
def _reply(agent, term, intention):
message = agentspeak.grounded(term.args[0], intention.scope)
self.logger.info(f"Replying to message: {message}")
reply = self._send_to_llm(message)
self.logger.info(f"Received reply: {reply}")
def _reply(agent: "BDICoreAgent", term, intention):
"""
Sends text to the LLM (AgentSpeak action).
Example: .reply("Hello LLM!")
"""
message_text = agentspeak.grounded(term.args[0], intention.scope)
self.logger.info("Reply action sending: %s", message_text)
self._send_to_llm(message_text)
yield
def _send_to_llm(self, message) -> str:
"""TODO: implement"""
return f"This is a reply to {message}"
async def _send_to_llm(self, text: str):
"""
Sends a text query to the LLM Agent asynchronously.
"""
class SendBehaviour(OneShotBehaviour):
async def run(self) -> None:
msg = Message(
to= settings.agent_settings.llm_agent_name + '@' + settings.agent_settings.host,
body= text
)
await self.send(msg)
self.agent.logger.debug("Message sent to LLM: %s", text)
self.add_behaviour(SendBehaviour())

View File

@@ -1,19 +1,17 @@
import asyncio
import json
import logging
from spade.agent import Message
from spade.behaviour import CyclicBehaviour
from spade_bdi.bdi import BDIAgent
from spade_bdi.bdi import BDIAgent, BeliefNotInitiated
from control_backend.core.config import settings
class BeliefSetter(CyclicBehaviour):
class BeliefSetterBehaviour(CyclicBehaviour):
"""
This is the behaviour that the BDI agent runs. This behaviour waits for incoming
message and processes it based on sender. Currently, it only waits for messages
containing beliefs from BeliefCollector and adds these to its KB.
message and processes it based on sender.
"""
agent: BDIAgent
@@ -24,7 +22,7 @@ class BeliefSetter(CyclicBehaviour):
if msg:
self.logger.info(f"Received message {msg.body}")
self._process_message(msg)
await asyncio.sleep(1)
def _process_message(self, message: Message):
sender = message.sender.node # removes host from jid and converts to str
@@ -35,6 +33,7 @@ class BeliefSetter(CyclicBehaviour):
self.logger.debug("Processing message from belief collector.")
self._process_belief_message(message)
case _:
self.logger.debug("Not the belief agent, discarding message")
pass
def _process_belief_message(self, message: Message):
@@ -44,19 +43,28 @@ class BeliefSetter(CyclicBehaviour):
match message.thread:
case "beliefs":
try:
beliefs: dict[str, list[list[str]]] = json.loads(message.body)
beliefs: dict[str, list[str]] = json.loads(message.body)
self._set_beliefs(beliefs)
except json.JSONDecodeError as e:
self.logger.error("Could not decode beliefs into JSON format: %s", e)
case _:
pass
def _set_beliefs(self, beliefs: dict[str, list[list[str]]]):
def _set_beliefs(self, beliefs: dict[str, list[str]]):
"""Remove previous values for beliefs and update them with the provided values."""
if self.agent.bdi is None:
self.logger.warning("Cannot set beliefs, since agent's BDI is not yet initialized.")
return
for belief, arguments_list in beliefs.items():
for arguments in arguments_list:
self.agent.bdi.set_belief(belief, *arguments)
self.logger.info("Set belief %s with arguments %s", belief, arguments)
# Set new beliefs (outdated beliefs are automatically removed)
for belief, arguments in beliefs.items():
self.agent.bdi.set_belief(belief, *arguments)
# Special case: if there's a new user message, flag that we haven't responded yet
if belief == "user_said":
try:
self.agent.bdi.remove_belief("responded")
except BeliefNotInitiated:
pass
self.logger.info("Set belief %s with arguments %s", belief, arguments)

View File

@@ -0,0 +1,26 @@
import logging
from spade.behaviour import CyclicBehaviour
from control_backend.core.config import settings
class ReceiveLLMResponseBehaviour(CyclicBehaviour):
"""
Adds behavior to receive responses from the LLM Agent.
"""
logger = logging.getLogger("BDI/LLM Reciever")
async def run(self):
msg = await self.receive(timeout=2)
if not msg:
return
sender = msg.sender.node
match sender:
case settings.agent_settings.llm_agent_name:
content = msg.body
self.logger.info("Received LLM response: %s", content)
#Here the BDI can pass the message back as a response
case _:
self.logger.debug("Not from the llm, discarding message")
pass

View File

@@ -0,0 +1,127 @@
"""
LLM Agent module for routing text queries from the BDI Core Agent to a local LLM
service and returning its responses back to the BDI Core Agent.
"""
import logging
from typing import Any
import httpx
from spade.agent import Agent
from spade.behaviour import CyclicBehaviour
from spade.message import Message
from control_backend.agents.llm.llm_instructions import LLMInstructions
from control_backend.core.config import settings
class LLMAgent(Agent):
"""
Agent responsible for processing user text input and querying a locally
hosted LLM for text generation. Receives messages from the BDI Core Agent
and responds with processed LLM output.
"""
logger = logging.getLogger("llm_agent")
class ReceiveMessageBehaviour(CyclicBehaviour):
"""
Cyclic behaviour to continuously listen for incoming messages from
the BDI Core Agent and handle them.
"""
async def run(self):
"""
Receives SPADE messages and processes only those originating from the
configured BDI agent.
"""
msg = await self.receive(timeout=1)
if not msg:
return
sender = msg.sender.node
self.agent.logger.info(
"Received message: %s from %s",
msg.body,
sender,
)
if sender == settings.agent_settings.bdi_core_agent_name:
self.agent.logger.debug("Processing message from BDI Core Agent")
await self._process_bdi_message(msg)
else:
self.agent.logger.debug("Message ignored (not from BDI Core Agent)")
async def _process_bdi_message(self, message: Message):
"""
Forwards user text to the LLM and replies with the generated text.
"""
user_text = message.body
llm_response = await self._query_llm(user_text)
await self._reply(llm_response)
async def _reply(self, msg: str):
"""
Sends a response message back to the BDI Core Agent.
"""
reply = Message(
to=settings.agent_settings.bdi_core_agent_name + '@' + settings.agent_settings.host,
body=msg
)
await self.send(reply)
self.agent.logger.info("Reply sent to BDI Core Agent")
async def _query_llm(self, prompt: str) -> str:
"""
Sends a chat completion request to the local LLM service.
:param prompt: Input text prompt to pass to the LLM.
:return: LLM-generated content or fallback message.
"""
async with httpx.AsyncClient(timeout=120.0) as client:
# Example dynamic content for future (optional)
instructions = LLMInstructions()
developer_instruction = instructions.build_developer_instruction()
response = await client.post(
settings.llm_settings.local_llm_url,
headers={"Content-Type": "application/json"},
json={
"model": settings.llm_settings.local_llm_model,
"messages": [
{
"role": "developer",
"content": developer_instruction
},
{
"role": "user",
"content": prompt
}
],
"temperature": 0.3
},
)
try:
response.raise_for_status()
data: dict[str, Any] = response.json()
return data.get("choices", [{}])[0].get(
"message", {}
).get("content", "No response")
except httpx.HTTPError as err:
self.agent.logger.error("HTTP error: %s", err)
return "LLM service unavailable."
except Exception as err:
self.agent.logger.error("Unexpected error: %s", err)
return "Error processing the request."
async def setup(self):
"""
Sets up the SPADE behaviour to filter and process messages from the
BDI Core Agent.
"""
self.logger.info("LLMAgent setup complete")
behaviour = self.ReceiveMessageBehaviour()
self.add_behaviour(behaviour)

View File

@@ -0,0 +1,44 @@
class LLMInstructions:
"""
Defines structured instructions that are sent along with each request
to the LLM to guide its behavior (norms, goals, etc.).
"""
@staticmethod
def default_norms() -> str:
return """
Be friendly and respectful.
Make the conversation feel natural and engaging.
""".strip()
@staticmethod
def default_goals() -> str:
return """
Try to learn the user's name during conversation.
""".strip()
def __init__(self, norms: str | None = None, goals: str | None = None):
self.norms = norms if norms is not None else self.default_norms()
self.goals = goals if goals is not None else self.default_goals()
def build_developer_instruction(self) -> str:
"""
Builds a multi-line formatted instruction string for the LLM.
Includes only non-empty structured fields.
"""
sections = [
"You are a Pepper robot engaging in natural human conversation.",
"Keep responses between 15 sentences, unless instructed otherwise.\n",
]
if self.norms:
sections.append("Norms to follow:")
sections.append(self.norms)
sections.append("")
if self.goals:
sections.append("Goals to reach:")
sections.append(self.goals)
sections.append("")
return "\n".join(sections).strip()

View File

@@ -11,12 +11,18 @@ class AgentSettings(BaseModel):
bdi_core_agent_name: str = "bdi_core"
belief_collector_agent_name: str = "belief_collector"
vad_agent_name: str = "vad_agent"
llm_agent_name: str = "llm_agent"
test_agent_name: str = "test_agent"
transcription_agent_name: str = "transcription_agent"
ri_communication_agent_name: str = "ri_communication_agent"
ri_command_agent_name: str = "ri_command_agent"
class LLMSettings(BaseModel):
local_llm_url: str = "http://145.107.82.68:1234/v1/chat/completions"
local_llm_model: str = "openai/gpt-oss-120b"
class Settings(BaseSettings):
app_title: str = "PepperPlus"
@@ -26,7 +32,8 @@ class Settings(BaseSettings):
agent_settings: AgentSettings = AgentSettings()
llm_settings: LLMSettings = LLMSettings()
model_config = SettingsConfigDict(env_file=".env")
settings = Settings()

View File

@@ -12,6 +12,7 @@ from fastapi.middleware.cors import CORSMiddleware
from control_backend.agents.ri_communication_agent import RICommunicationAgent
from control_backend.agents.bdi.bdi_core import BDICoreAgent
from control_backend.agents.vad_agent import VADAgent
from control_backend.agents.llm.llm import LLMAgent
from control_backend.api.v1.router import api_router
from control_backend.core.config import settings
from control_backend.core.zmq_context import context
@@ -31,6 +32,7 @@ async def lifespan(app: FastAPI):
app.state.internal_comm_socket = internal_comm_socket
logger.info("Internal publishing socket bound to %s", internal_comm_socket)
# Initiate agents
ri_communication_agent = RICommunicationAgent(
settings.agent_settings.ri_communication_agent_name + "@" + settings.agent_settings.host,
@@ -39,12 +41,13 @@ async def lifespan(app: FastAPI):
bind=True,
)
await ri_communication_agent.start()
bdi_core = BDICoreAgent(
settings.agent_settings.bdi_core_agent_name + "@" + settings.agent_settings.host,
settings.agent_settings.bdi_core_agent_name,
"src/control_backend/agents/bdi/rules.asl",
)
llm_agent = LLMAgent(settings.agent_settings.llm_agent_name + '@' + settings.agent_settings.host,
settings.agent_settings.llm_agent_name)
await llm_agent.start()
bdi_core = BDICoreAgent(settings.agent_settings.bdi_core_agent_name + '@' + settings.agent_settings.host,
settings.agent_settings.bdi_core_agent_name, "src/control_backend/agents/bdi/rules.asl")
await bdi_core.start()
_temp_vad_agent = VADAgent("tcp://localhost:5558", False)