feat: norms and goals to llm

base goals and norms can be defined in llm_instructions.py
cleaned the code

ref: N25B-215
This commit is contained in:
JobvAlewijk
2025-10-29 12:45:13 +01:00
parent f163e0ee6c
commit af789bd459
4 changed files with 74 additions and 20 deletions

View File

@@ -30,7 +30,7 @@ class BDICoreAgent(BDIAgent):
self.add_behaviour(BeliefSetter())
self.add_behaviour(ReceiveLLMResponseBehaviour())
await self._send_to_llm("Hello we are the Pepper plus team")
await self._send_to_llm("Hi pepper, how are you?")
# This is the example message currently sent to the llm at the start of the Program
self.logger.info("BDICoreAgent setup complete")
@@ -52,7 +52,7 @@ class BDICoreAgent(BDIAgent):
self._send_to_llm(message_text)
yield
async def _send_to_llm(self, text: str) -> str:
async def _send_to_llm(self, text: str):
"""
Sends a text query to the LLM Agent asynchronously.
"""
@@ -61,12 +61,10 @@ class BDICoreAgent(BDIAgent):
async def run(self) -> None:
msg = Message(
to= settings.agent_settings.llm_agent_name + '@' + settings.agent_settings.host,
body= text,
thread= "llm_request",
body= text
)
await self.send(msg)
self.agent.logger.debug("Message sent to LLM: %s", text)
self.add_behaviour(SendBehaviour())
return "LLM message dispatch scheduled"
self.add_behaviour(SendBehaviour())

View File

@@ -15,6 +15,7 @@ from spade.message import Message
from spade.template import Template
from control_backend.core.config import settings
from control_backend.agents.llm.llm_instructions import LLMInstructions
class LLMAgent(Agent):
@@ -32,7 +33,7 @@ class LLMAgent(Agent):
the BDI Core Agent and handle them.
"""
async def run(self) -> None:
async def run(self):
"""
Receives SPADE messages and processes only those originating from the
configured BDI agent.
@@ -54,7 +55,7 @@ class LLMAgent(Agent):
else:
self.agent.logger.debug("Message ignored (not from BDI Core Agent)")
async def _process_bdi_message(self, message: Message) -> None:
async def _process_bdi_message(self, message: Message):
"""
Forwards user text to the LLM and replies with the generated text.
"""
@@ -62,14 +63,13 @@ class LLMAgent(Agent):
llm_response = await self._query_llm(user_text)
await self._reply(llm_response)
async def _reply(self, msg: str) -> None:
async def _reply(self, msg: str):
"""
Sends a response message back to the BDI Core Agent.
"""
reply = Message(
to= settings.agent_settings.bdi_core_agent_name + '@' + settings.agent_settings.host,
body= msg,
thread= "llm_response",
body= msg
)
await self.send(reply)
self.agent.logger.info("Reply sent to BDI Core Agent")
@@ -82,15 +82,30 @@ class LLMAgent(Agent):
:return: LLM-generated content or fallback message.
"""
async with httpx.AsyncClient(timeout=120.0) as client:
# Example dynamic content for future (optional)
instructions = LLMInstructions()
developer_instruction = instructions.build_developer_instruction()
response = await client.post(
settings.llm_settings.local_llm_url,
headers={"Content-Type": "application/json"},
json={
"model": settings.llm_settings.local_llm_model,
"messages": [{"role": "user", "content": prompt}],
"temperature": 0.3,
"messages": [
{
"role": "developer",
"content": developer_instruction
},
{
"role": "user",
"content": prompt
}
],
"temperature": 0.3
},
)
try:
response.raise_for_status()
data: dict[str, Any] = response.json()
@@ -104,15 +119,12 @@ class LLMAgent(Agent):
self.agent.logger.error("Unexpected error: %s", err)
return "Error processing the request."
async def setup(self) -> None:
async def setup(self):
"""
Sets up the SPADE behaviour to filter and process messages from the
BDI Core Agent.
"""
self.logger.info("LLMAgent setup complete")
template = Template()
template.sender = settings.agent_settings.bdi_core_agent_name + '@' + settings.agent_settings.host
behaviour = self.ReceiveMessageBehaviour()
self.add_behaviour(behaviour, template)
self.add_behaviour(behaviour)

View File

@@ -0,0 +1,44 @@
class LLMInstructions:
"""
Defines structured instructions that are sent along with each request
to the LLM to guide its behavior (norms, goals, etc.).
"""
@staticmethod
def default_norms() -> str:
return f"""
Be friendly and respectful.
Make the conversation feel natural and engaging.
""".strip()
@staticmethod
def default_goals() -> str:
return f"""
Try to learn the user's name during conversation.
""".strip()
def __init__(self, norms: str | None = None, goals: str | None = None):
self.norms = norms if norms is not None else self.default_norms()
self.goals = goals if goals is not None else self.default_goals()
def build_developer_instruction(self) -> str:
"""
Builds a multi-line formatted instruction string for the LLM.
Includes only non-empty structured fields.
"""
sections = [
"You are a Pepper robot engaging in natural human conversation.",
"Keep responses between 15 sentences, unless instructed otherwise.\n",
]
if self.norms:
sections.append("Norms to follow:")
sections.append(self.norms)
sections.append("")
if self.goals:
sections.append("Goals to reach:")
sections.append(self.goals)
sections.append("")
return "\n".join(sections).strip()

View File

@@ -44,10 +44,10 @@ async def lifespan(app: FastAPI):
llm_agent = LLMAgent(settings.agent_settings.llm_agent_name + '@' + settings.agent_settings.host,
"secret, ask twirre")
settings.agent_settings.llm_agent_name)
await llm_agent.start()
bdi_core = BDICoreAgent(settings.agent_settings.bdi_core_agent_name + '@' + settings.agent_settings.host,
"secret, ask twirre", "src/control_backend/agents/bdi/rules.asl")
settings.agent_settings.bdi_core_agent_name, "src/control_backend/agents/bdi/rules.asl")
await bdi_core.start()
_temp_vad_agent = VADAgent("tcp://localhost:5558", False)