feat: norms and goals in BDI

ref: N25B-200
This commit is contained in:
2025-11-12 13:35:15 +01:00
parent e39139cac9
commit 273f621b1b
5 changed files with 35 additions and 20 deletions

View File

@@ -1,3 +1,4 @@
import json
import logging import logging
import agentspeak import agentspeak
@@ -37,28 +38,38 @@ class BDICoreAgent(BDIAgent):
Registers custom AgentSpeak actions callable from plans. Registers custom AgentSpeak actions callable from plans.
""" """
@actions.add(".reply", 1) @actions.add(".reply", 3)
def _reply(agent: "BDICoreAgent", term, intention): def _reply(agent: "BDICoreAgent", term, intention):
""" """
Sends text to the LLM (AgentSpeak action). Sends text to the LLM (AgentSpeak action).
Example: .reply("Hello LLM!") Example: .reply("Hello LLM!")
""" """
message_text = agentspeak.grounded(term.args[0], intention.scope) message_text = agentspeak.grounded(term.args[0], intention.scope)
norm = agentspeak.grounded(term.args[1], intention.scope)
goal = agentspeak.grounded(term.args[2], intention.scope)
self.logger.debug("Norms: %s", norm)
self.logger.debug("Goals: %s", goal)
self.logger.debug("Reply action sending: %s", message_text) self.logger.debug("Reply action sending: %s", message_text)
self._send_to_llm(str(message_text)) self._send_to_llm(str(message_text), str(norm), str(goal))
yield yield
def _send_to_llm(self, text: str): def _send_to_llm(self, text: str, norm: str, goal: str):
""" """
Sends a text query to the LLM Agent asynchronously. Sends a text query to the LLM Agent asynchronously.
""" """
class SendBehaviour(OneShotBehaviour): class SendBehaviour(OneShotBehaviour):
async def run(self) -> None: async def run(self) -> None:
message_dict = {
"text": text,
"norms": norm,
"goals": goal,
}
msg = Message( msg = Message(
to=settings.agent_settings.llm_agent_name + "@" + settings.agent_settings.host, to=settings.agent_settings.llm_agent_name + "@" + settings.agent_settings.host,
body=text, body=json.dumps(message_dict),
) )
await self.send(msg) await self.send(msg)

View File

@@ -39,8 +39,13 @@ class BeliefSetterBehaviour(CyclicBehaviour):
"Message is from the belief collector agent. Processing as belief message." "Message is from the belief collector agent. Processing as belief message."
) )
self._process_belief_message(message) self._process_belief_message(message)
case settings.agent_settings.program_manager_name:
self.agent.logger.debug(
"Processing message from the program manager. Processing as belief message."
)
self._process_belief_message(message)
case _: case _:
self.agent.logger.debug("Not the belief agent, discarding message") self.agent.logger.debug("Not from expected agents, discarding message")
pass pass
def _process_belief_message(self, message: Message): def _process_belief_message(self, message: Message):

View File

@@ -1,3 +1,6 @@
+new_message : user_said(Message) <- norms(test_norm).
goals(test_goal).
+new_message : user_said(Message) & norms(Norms) & goals(Goals) <-
-new_message; -new_message;
.reply(Message). .reply(Message, Norms, Goals).

View File

@@ -52,9 +52,13 @@ class LLMAgent(BaseAgent):
Forwards user text from the BDI to the LLM and replies with the generated text in chunks Forwards user text from the BDI to the LLM and replies with the generated text in chunks
separated by punctuation. separated by punctuation.
""" """
user_text = message.body try:
message = json.loads(message.body)
except json.JSONDecodeError:
self.agent.logger.error("Could not process BDI message.", exc_info=True)
# Consume the streaming generator and send a reply for every chunk # Consume the streaming generator and send a reply for every chunk
async for chunk in self._query_llm(user_text): async for chunk in self._query_llm(message["text"], message["norms"], message["goals"]):
await self._reply(chunk) await self._reply(chunk)
self.agent.logger.debug( self.agent.logger.debug(
"Finished processing BDI message. Response sent in chunks to BDI Core Agent." "Finished processing BDI message. Response sent in chunks to BDI Core Agent."
@@ -70,7 +74,7 @@ class LLMAgent(BaseAgent):
) )
await self.send(reply) await self.send(reply)
async def _query_llm(self, prompt: str) -> AsyncGenerator[str]: async def _query_llm(self, prompt: str, norms: str, goals: str) -> AsyncGenerator[str]:
""" """
Sends a chat completion request to the local LLM service and streams the response by Sends a chat completion request to the local LLM service and streams the response by
yielding fragments separated by punctuation like. yielding fragments separated by punctuation like.
@@ -78,15 +82,7 @@ class LLMAgent(BaseAgent):
:param prompt: Input text prompt to pass to the LLM. :param prompt: Input text prompt to pass to the LLM.
:yield: Fragments of the LLM-generated content. :yield: Fragments of the LLM-generated content.
""" """
instructions = LLMInstructions( instructions = LLMInstructions(norms, goals)
"- Be friendly and respectful.\n"
"- Make the conversation feel natural and engaging.\n"
"- Speak like a pirate.\n"
"- When the user asks what you can do, tell them.",
"- Try to learn the user's name during conversation.\n"
"- Suggest playing a game of asking yes or no questions where you think of a word "
"and the user must guess it.",
)
messages = [ messages = [
{ {
"role": "developer", "role": "developer",

View File

@@ -136,7 +136,7 @@ class RICommunicationAgent(BaseAgent):
settings.agent_settings.ri_command_agent_name settings.agent_settings.ri_command_agent_name
+ "@" + "@"
+ settings.agent_settings.host, + settings.agent_settings.host,
settings.agent_settings.ri_command_agent_name, "pohpu7-huqsyH-qutduk",
address=addr, address=addr,
bind=bind, bind=bind,
) )