diff --git a/src/control_backend/agents/bdi/bdi_core.py b/src/control_backend/agents/bdi/bdi_core.py index 4d68e26..1c267d3 100644 --- a/src/control_backend/agents/bdi/bdi_core.py +++ b/src/control_backend/agents/bdi/bdi_core.py @@ -1,3 +1,4 @@ +import json import logging import agentspeak @@ -37,28 +38,38 @@ class BDICoreAgent(BDIAgent): Registers custom AgentSpeak actions callable from plans. """ - @actions.add(".reply", 1) + @actions.add(".reply", 3) def _reply(agent: "BDICoreAgent", term, intention): """ Sends text to the LLM (AgentSpeak action). Example: .reply("Hello LLM!") """ message_text = agentspeak.grounded(term.args[0], intention.scope) + norm = agentspeak.grounded(term.args[1], intention.scope) + goal = agentspeak.grounded(term.args[2], intention.scope) + + self.logger.debug("Norms: %s", norm) + self.logger.debug("Goals: %s", goal) self.logger.debug("Reply action sending: %s", message_text) - self._send_to_llm(str(message_text)) + self._send_to_llm(str(message_text), str(norm), str(goal)) yield - def _send_to_llm(self, text: str): + def _send_to_llm(self, text: str, norm: str, goal: str): """ Sends a text query to the LLM Agent asynchronously. """ class SendBehaviour(OneShotBehaviour): async def run(self) -> None: + message_dict = { + "text": text, + "norms": norm, + "goals": goal, + } msg = Message( to=settings.agent_settings.llm_agent_name + "@" + settings.agent_settings.host, - body=text, + body=json.dumps(message_dict), ) await self.send(msg) diff --git a/src/control_backend/agents/bdi/behaviours/belief_setter.py b/src/control_backend/agents/bdi/behaviours/belief_setter.py index ba68b69..3a98f02 100644 --- a/src/control_backend/agents/bdi/behaviours/belief_setter.py +++ b/src/control_backend/agents/bdi/behaviours/belief_setter.py @@ -39,8 +39,13 @@ class BeliefSetterBehaviour(CyclicBehaviour): "Message is from the belief collector agent. Processing as belief message." ) self._process_belief_message(message) + case settings.agent_settings.program_manager_name: + self.agent.logger.debug( + "Processing message from the program manager. Processing as belief message." + ) + self._process_belief_message(message) case _: - self.agent.logger.debug("Not the belief agent, discarding message") + self.agent.logger.debug("Not from expected agents, discarding message") pass def _process_belief_message(self, message: Message): diff --git a/src/control_backend/agents/bdi/rules.asl b/src/control_backend/agents/bdi/rules.asl index 0001d3c..76369af 100644 --- a/src/control_backend/agents/bdi/rules.asl +++ b/src/control_backend/agents/bdi/rules.asl @@ -1,3 +1,6 @@ -+new_message : user_said(Message) <- +norms(test_norm). +goals(test_goal). + ++new_message : user_said(Message) & norms(Norms) & goals(Goals) <- -new_message; - .reply(Message). + .reply(Message, Norms, Goals). diff --git a/src/control_backend/agents/llm/llm.py b/src/control_backend/agents/llm/llm.py index e115509..3fa9adf 100644 --- a/src/control_backend/agents/llm/llm.py +++ b/src/control_backend/agents/llm/llm.py @@ -52,9 +52,13 @@ class LLMAgent(BaseAgent): Forwards user text from the BDI to the LLM and replies with the generated text in chunks separated by punctuation. """ - user_text = message.body + try: + message = json.loads(message.body) + except json.JSONDecodeError: + self.agent.logger.error("Could not process BDI message.", exc_info=True) + # Consume the streaming generator and send a reply for every chunk - async for chunk in self._query_llm(user_text): + async for chunk in self._query_llm(message["text"], message["norms"], message["goals"]): await self._reply(chunk) self.agent.logger.debug( "Finished processing BDI message. Response sent in chunks to BDI Core Agent." @@ -70,7 +74,7 @@ class LLMAgent(BaseAgent): ) await self.send(reply) - async def _query_llm(self, prompt: str) -> AsyncGenerator[str]: + async def _query_llm(self, prompt: str, norms: str, goals: str) -> AsyncGenerator[str]: """ Sends a chat completion request to the local LLM service and streams the response by yielding fragments separated by punctuation like. @@ -78,15 +82,7 @@ class LLMAgent(BaseAgent): :param prompt: Input text prompt to pass to the LLM. :yield: Fragments of the LLM-generated content. """ - instructions = LLMInstructions( - "- Be friendly and respectful.\n" - "- Make the conversation feel natural and engaging.\n" - "- Speak like a pirate.\n" - "- When the user asks what you can do, tell them.", - "- Try to learn the user's name during conversation.\n" - "- Suggest playing a game of asking yes or no questions where you think of a word " - "and the user must guess it.", - ) + instructions = LLMInstructions(norms, goals) messages = [ { "role": "developer", diff --git a/src/control_backend/agents/ri_communication_agent.py b/src/control_backend/agents/ri_communication_agent.py index 76d6431..2ea3b3d 100644 --- a/src/control_backend/agents/ri_communication_agent.py +++ b/src/control_backend/agents/ri_communication_agent.py @@ -136,7 +136,7 @@ class RICommunicationAgent(BaseAgent): settings.agent_settings.ri_command_agent_name + "@" + settings.agent_settings.host, - settings.agent_settings.ri_command_agent_name, + "pohpu7-huqsyH-qutduk", address=addr, bind=bind, )