feat: norms and goals to llm
base goals and norms can be defined in llm_instructions.py cleaned the code ref: N25B-215
This commit is contained in:
@@ -15,6 +15,7 @@ from spade.message import Message
|
||||
from spade.template import Template
|
||||
|
||||
from control_backend.core.config import settings
|
||||
from control_backend.agents.llm.llm_instructions import LLMInstructions
|
||||
|
||||
|
||||
class LLMAgent(Agent):
|
||||
@@ -32,7 +33,7 @@ class LLMAgent(Agent):
|
||||
the BDI Core Agent and handle them.
|
||||
"""
|
||||
|
||||
async def run(self) -> None:
|
||||
async def run(self):
|
||||
"""
|
||||
Receives SPADE messages and processes only those originating from the
|
||||
configured BDI agent.
|
||||
@@ -54,7 +55,7 @@ class LLMAgent(Agent):
|
||||
else:
|
||||
self.agent.logger.debug("Message ignored (not from BDI Core Agent)")
|
||||
|
||||
async def _process_bdi_message(self, message: Message) -> None:
|
||||
async def _process_bdi_message(self, message: Message):
|
||||
"""
|
||||
Forwards user text to the LLM and replies with the generated text.
|
||||
"""
|
||||
@@ -62,14 +63,13 @@ class LLMAgent(Agent):
|
||||
llm_response = await self._query_llm(user_text)
|
||||
await self._reply(llm_response)
|
||||
|
||||
async def _reply(self, msg: str) -> None:
|
||||
async def _reply(self, msg: str):
|
||||
"""
|
||||
Sends a response message back to the BDI Core Agent.
|
||||
"""
|
||||
reply = Message(
|
||||
to= settings.agent_settings.bdi_core_agent_name + '@' + settings.agent_settings.host,
|
||||
body= msg,
|
||||
thread= "llm_response",
|
||||
body= msg
|
||||
)
|
||||
await self.send(reply)
|
||||
self.agent.logger.info("Reply sent to BDI Core Agent")
|
||||
@@ -82,15 +82,30 @@ class LLMAgent(Agent):
|
||||
:return: LLM-generated content or fallback message.
|
||||
"""
|
||||
async with httpx.AsyncClient(timeout=120.0) as client:
|
||||
# Example dynamic content for future (optional)
|
||||
|
||||
instructions = LLMInstructions()
|
||||
developer_instruction = instructions.build_developer_instruction()
|
||||
|
||||
response = await client.post(
|
||||
settings.llm_settings.local_llm_url,
|
||||
headers={"Content-Type": "application/json"},
|
||||
json={
|
||||
"model": settings.llm_settings.local_llm_model,
|
||||
"messages": [{"role": "user", "content": prompt}],
|
||||
"temperature": 0.3,
|
||||
"messages": [
|
||||
{
|
||||
"role": "developer",
|
||||
"content": developer_instruction
|
||||
},
|
||||
{
|
||||
"role": "user",
|
||||
"content": prompt
|
||||
}
|
||||
],
|
||||
"temperature": 0.3
|
||||
},
|
||||
)
|
||||
|
||||
try:
|
||||
response.raise_for_status()
|
||||
data: dict[str, Any] = response.json()
|
||||
@@ -104,15 +119,12 @@ class LLMAgent(Agent):
|
||||
self.agent.logger.error("Unexpected error: %s", err)
|
||||
return "Error processing the request."
|
||||
|
||||
async def setup(self) -> None:
|
||||
async def setup(self):
|
||||
"""
|
||||
Sets up the SPADE behaviour to filter and process messages from the
|
||||
BDI Core Agent.
|
||||
"""
|
||||
self.logger.info("LLMAgent setup complete")
|
||||
|
||||
template = Template()
|
||||
template.sender = settings.agent_settings.bdi_core_agent_name + '@' + settings.agent_settings.host
|
||||
|
||||
behaviour = self.ReceiveMessageBehaviour()
|
||||
self.add_behaviour(behaviour, template)
|
||||
self.add_behaviour(behaviour)
|
||||
|
||||
Reference in New Issue
Block a user