feat: LLM agent

body:   added the llmAgent class and made it run at the start.
        modified the bdi_core to send a test message and recieve an awnser from LLM agent
        Added a connection to a local llm via lmstudio.

        Tests are Tba.

ref: N25B-207
This commit is contained in:
JobvAlewijk
2025-10-27 14:21:18 +01:00
parent 9b36982bf2
commit c5b71450fc
6 changed files with 222 additions and 16 deletions

View File

@@ -1,35 +1,96 @@
import logging
import agentspeak
from spade.behaviour import CyclicBehaviour, OneShotBehaviour
from spade.message import Message
from spade.template import Template
from spade_bdi.bdi import BDIAgent
from control_backend.agents.bdi.behaviours.belief_setter import BeliefSetter
from control_backend.core.config import settings
class BDICoreAgent(BDIAgent):
"""
This is the Brain agent that does the belief inference with AgentSpeak.
This is a continous process that happens automatically in the background.
This class contains all the actions that can be called from AgentSpeak plans.
It has the BeliefSetter behaviour.
It has the BeliefSetter behaviour and can aks and recieve requests from the LLM agent.
"""
logger = logging.getLogger("BDI Core")
async def setup(self):
belief_setter = BeliefSetter()
self.add_behaviour(belief_setter)
logger = logging.getLogger("bdi_core_agent")
async def setup(self) -> None:
"""
Initializes belief behaviors and message routing.
"""
self.logger.info("BDICoreAgent setup started")
self.add_behaviour(BeliefSetter())
self._add_llm_response_receiver()
await self._send_to_llm("Hello we are the Pepper plus team")
# This is the example message currently sent to the llm at the start of the Program
self.logger.info("BDICoreAgent setup complete")
def add_custom_actions(self, actions) -> None:
"""
Registers custom AgentSpeak actions callable from plans.
"""
def add_custom_actions(self, actions):
@actions.add(".reply", 1)
def _reply(agent, term, intention):
message = agentspeak.grounded(term.args[0], intention.scope)
self.logger.info(f"Replying to message: {message}")
reply = self._send_to_llm(message)
self.logger.info(f"Received reply: {reply}")
def _reply(agent: "BDICoreAgent", term, intention):
"""
Sends text to the LLM (AgentSpeak action).
Example: .reply("Hello LLM!")
"""
message_text = agentspeak.grounded(term.args[0], intention.scope)
self.logger.info("Reply action sending: %s", message_text)
self._send_to_llm(message_text)
yield
def _send_to_llm(self, message) -> str:
"""TODO: implement"""
return f"This is a reply to {message}"
async def _send_to_llm(self, text: str) -> str:
"""
Sends a text query to the LLM Agent asynchronously.
"""
class SendBehaviour(OneShotBehaviour):
async def run(self) -> None:
msg = Message(
to=f"{settings.agent_settings.test_agent_name}@"
f"{settings.agent_settings.host}",
body=text,
thread="llm_request",
)
msg.set_metadata("performative", "inform")
await self.send(msg)
self.agent.logger.debug("Message sent to LLM: %s", text)
self.add_behaviour(SendBehaviour())
return "LLM message dispatch scheduled"
def _add_llm_response_receiver(self) -> None:
"""
Adds behavior to receive responses from the LLM Agent.
"""
class ReceiveLLMResponseBehaviour(CyclicBehaviour):
async def run(self) -> None:
msg = await self.receive(timeout=2)
if not msg:
return
content = msg.body
self.agent.logger.info("Received LLM response: %s", content)
# TODO: Convert response into a belief (optional future feature)
# Example:
# self.agent.add_belief("llm_response", content)
# self.agent.logger.debug("Added belief: llm_response(%s)", content)
template = Template()
template.thread = "llm_response"
self.add_behaviour(ReceiveLLMResponseBehaviour(), template)

View File

@@ -33,6 +33,7 @@ class BeliefSetter(CyclicBehaviour):
self.logger.debug("Processing message from belief collector.")
self._process_belief_message(message)
case _:
self.logger.debug("Not the belief agent, discarding message")
pass
def _process_belief_message(self, message: Message):