feat: (hopefully) optional norms and goals
ref: N25B-200
This commit is contained in:
@@ -45,17 +45,45 @@ class BDICoreAgent(BDIAgent):
|
|||||||
Example: .reply("Hello LLM!")
|
Example: .reply("Hello LLM!")
|
||||||
"""
|
"""
|
||||||
message_text = agentspeak.grounded(term.args[0], intention.scope)
|
message_text = agentspeak.grounded(term.args[0], intention.scope)
|
||||||
norm = agentspeak.grounded(term.args[1], intention.scope)
|
norms = agentspeak.grounded(term.args[1], intention.scope)
|
||||||
goal = agentspeak.grounded(term.args[2], intention.scope)
|
goals = agentspeak.grounded(term.args[2], intention.scope)
|
||||||
|
|
||||||
self.logger.debug("Norms: %s", norm)
|
self.logger.debug("Norms: %s", norms)
|
||||||
self.logger.debug("Goals: %s", goal)
|
self.logger.debug("Goals: %s", goals)
|
||||||
self.logger.debug("Reply action sending: %s", message_text)
|
self.logger.debug("User text: %s", message_text)
|
||||||
|
|
||||||
self._send_to_llm(str(message_text), str(norm), str(goal))
|
self._send_to_llm(str(message_text), str(norms), str(goals))
|
||||||
yield
|
yield
|
||||||
|
|
||||||
def _send_to_llm(self, text: str, norm: str, goal: str):
|
@actions.add(".reply_no_norms", 2)
|
||||||
|
def _reply_no_norms(agent: "BDICoreAgent", term, intention):
|
||||||
|
message_text = agentspeak.grounded(term.args[0], intention.scope)
|
||||||
|
goals = agentspeak.grounded(term.args[1], intention.scope)
|
||||||
|
|
||||||
|
self.logger.debug("Goals: %s", goals)
|
||||||
|
self.logger.debug("User text: %s", message_text)
|
||||||
|
|
||||||
|
self._send_to_llm(str(message_text), goals=str(goals))
|
||||||
|
|
||||||
|
@actions.add(".reply_no_goals", 2)
|
||||||
|
def _reply_no_goals(agent: "BDICoreAgent", term, intention):
|
||||||
|
message_text = agentspeak.grounded(term.args[0], intention.scope)
|
||||||
|
norms = agentspeak.grounded(term.args[1], intention.scope)
|
||||||
|
|
||||||
|
self.logger.debug("Norms: %s", norms)
|
||||||
|
self.logger.debug("User text: %s", message_text)
|
||||||
|
|
||||||
|
self._send_to_llm(str(message_text), norms=str(norms))
|
||||||
|
|
||||||
|
@actions.add(".reply_no_goals_no_norms", 1)
|
||||||
|
def _reply_no_goals_no_norms(agent: "BDICoreAgent", term, intention):
|
||||||
|
message_text = agentspeak.grounded(term.args[0], intention.scope)
|
||||||
|
|
||||||
|
self.logger.debug("User text: %s", message_text)
|
||||||
|
|
||||||
|
self._send_to_llm(message_text)
|
||||||
|
|
||||||
|
def _send_to_llm(self, text: str, norms: str = None, goals: str = None):
|
||||||
"""
|
"""
|
||||||
Sends a text query to the LLM Agent asynchronously.
|
Sends a text query to the LLM Agent asynchronously.
|
||||||
"""
|
"""
|
||||||
@@ -64,8 +92,8 @@ class BDICoreAgent(BDIAgent):
|
|||||||
async def run(self) -> None:
|
async def run(self) -> None:
|
||||||
message_dict = {
|
message_dict = {
|
||||||
"text": text,
|
"text": text,
|
||||||
"norms": norm,
|
"norms": norms if norms else "",
|
||||||
"goals": goal,
|
"goals": goals if goals else "",
|
||||||
}
|
}
|
||||||
msg = Message(
|
msg = Message(
|
||||||
to=settings.agent_settings.llm_agent_name + "@" + settings.agent_settings.host,
|
to=settings.agent_settings.llm_agent_name + "@" + settings.agent_settings.host,
|
||||||
|
|||||||
@@ -1,6 +1,15 @@
|
|||||||
norms(test_norm).
|
|
||||||
goals(test_goal).
|
|
||||||
|
|
||||||
+new_message : user_said(Message) & norms(Norms) & goals(Goals) <-
|
+new_message : user_said(Message) & norms(Norms) & goals(Goals) <-
|
||||||
-new_message;
|
-new_message;
|
||||||
.reply(Message, Norms, Goals).
|
.reply(Message, Norms, Goals).
|
||||||
|
|
||||||
|
+new_message : user_said(Message) & norms(Norms) <-
|
||||||
|
-new_message;
|
||||||
|
.reply_no_goals(Message, Norms).
|
||||||
|
|
||||||
|
+new_message : user_said(Message) & goals(Goals) <-
|
||||||
|
-new_message;
|
||||||
|
.reply_no_norms(Message, Goals).
|
||||||
|
|
||||||
|
+new_message : user_said(Message) <-
|
||||||
|
-new_message;
|
||||||
|
.reply_no_goals_no_norms(Message).
|
||||||
@@ -82,7 +82,7 @@ class LLMAgent(BaseAgent):
|
|||||||
:param prompt: Input text prompt to pass to the LLM.
|
:param prompt: Input text prompt to pass to the LLM.
|
||||||
:yield: Fragments of the LLM-generated content.
|
:yield: Fragments of the LLM-generated content.
|
||||||
"""
|
"""
|
||||||
instructions = LLMInstructions(norms, goals)
|
instructions = LLMInstructions(norms if norms else None, goals if goals else None)
|
||||||
messages = [
|
messages = [
|
||||||
{
|
{
|
||||||
"role": "developer",
|
"role": "developer",
|
||||||
|
|||||||
Reference in New Issue
Block a user