Merge remote-tracking branch 'origin/dev' into feat/use-experiment-logs
# Conflicts: # src/control_backend/agents/llm/llm_agent.py
This commit is contained in:
@@ -348,7 +348,7 @@ class BDICoreAgent(BaseAgent):
|
||||
yield
|
||||
|
||||
@self.actions.add(".reply_with_goal", 3)
|
||||
def _reply_with_goal(agent: "BDICoreAgent", term, intention):
|
||||
def _reply_with_goal(agent, term, intention):
|
||||
"""
|
||||
Let the LLM generate a response to a user's utterance with the current norms and a
|
||||
specific goal.
|
||||
@@ -524,10 +524,6 @@ class BDICoreAgent(BaseAgent):
|
||||
|
||||
yield
|
||||
|
||||
@self.actions.add(".notify_ui", 0)
|
||||
def _notify_ui(agent, term, intention):
|
||||
pass
|
||||
|
||||
async def _send_to_llm(self, text: str, norms: str, goals: str):
|
||||
"""
|
||||
Sends a text query to the LLM agent asynchronously.
|
||||
|
||||
@@ -345,6 +345,9 @@ class TextBeliefExtractorAgent(BaseAgent):
|
||||
async with httpx.AsyncClient() as client:
|
||||
response = await client.post(
|
||||
settings.llm_settings.local_llm_url,
|
||||
headers={"Authorization": f"Bearer {settings.llm_settings.api_key}"}
|
||||
if settings.llm_settings.api_key
|
||||
else {},
|
||||
json={
|
||||
"model": settings.llm_settings.local_llm_model,
|
||||
"messages": [{"role": "user", "content": prompt}],
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
import asyncio
|
||||
import json
|
||||
import logging
|
||||
import re
|
||||
@@ -35,6 +36,10 @@ class LLMAgent(BaseAgent):
|
||||
def __init__(self, name: str):
|
||||
super().__init__(name)
|
||||
self.history = []
|
||||
self._querying = False
|
||||
self._interrupted = False
|
||||
self._interrupted_message = ""
|
||||
self._go_ahead = asyncio.Event()
|
||||
|
||||
async def setup(self):
|
||||
self.logger.info("Setting up %s.", self.name)
|
||||
@@ -53,13 +58,13 @@ class LLMAgent(BaseAgent):
|
||||
case "prompt_message":
|
||||
try:
|
||||
prompt_message = LLMPromptMessage.model_validate_json(msg.body)
|
||||
await self._process_bdi_message(prompt_message)
|
||||
self.add_behavior(self._process_bdi_message(prompt_message)) # no block
|
||||
except ValidationError:
|
||||
self.logger.debug("Prompt message from BDI core is invalid.")
|
||||
case "assistant_message":
|
||||
self.history.append({"role": "assistant", "content": msg.body})
|
||||
self._apply_conversation_message({"role": "assistant", "content": msg.body})
|
||||
case "user_message":
|
||||
self.history.append({"role": "user", "content": msg.body})
|
||||
self._apply_conversation_message({"role": "user", "content": msg.body})
|
||||
elif msg.sender == settings.agent_settings.bdi_program_manager_name:
|
||||
if msg.body == "clear_history":
|
||||
self.logger.debug("Clearing conversation history.")
|
||||
@@ -76,12 +81,45 @@ class LLMAgent(BaseAgent):
|
||||
|
||||
:param message: The parsed prompt message containing text, norms, and goals.
|
||||
"""
|
||||
if self._querying:
|
||||
self.logger.debug("Received another BDI prompt while processing previous message.")
|
||||
self._interrupted = True # interrupt the previous processing
|
||||
await self._go_ahead.wait() # wait until we get the go-ahead
|
||||
|
||||
message.text = f"{self._interrupted_message} {message.text}"
|
||||
|
||||
self._go_ahead.clear()
|
||||
self._querying = True
|
||||
full_message = ""
|
||||
async for chunk in self._query_llm(message.text, message.norms, message.goals):
|
||||
if self._interrupted:
|
||||
self._interrupted_message = message.text
|
||||
self.logger.debug("Interrupted processing of previous message.")
|
||||
break
|
||||
await self._send_reply(chunk)
|
||||
full_message += chunk
|
||||
self.logger.debug("Finished processing BDI message. Response sent in chunks to BDI core.")
|
||||
await self._send_full_reply(full_message)
|
||||
else:
|
||||
self._querying = False
|
||||
|
||||
self._apply_conversation_message(
|
||||
{
|
||||
"role": "assistant",
|
||||
"content": full_message,
|
||||
}
|
||||
)
|
||||
self.logger.debug(
|
||||
"Finished processing BDI message. Response sent in chunks to BDI core."
|
||||
)
|
||||
await self._send_full_reply(full_message)
|
||||
|
||||
self._go_ahead.set()
|
||||
self._interrupted = False
|
||||
|
||||
def _apply_conversation_message(self, message: dict[str, str]):
|
||||
if len(self.history) > 0 and message["role"] == self.history[-1]["role"]:
|
||||
self.history[-1]["content"] += " " + message["content"]
|
||||
return
|
||||
self.history.append(message)
|
||||
|
||||
async def _send_reply(self, msg: str):
|
||||
"""
|
||||
@@ -166,13 +204,6 @@ class LLMAgent(BaseAgent):
|
||||
full_message,
|
||||
extra={"role": "assistant", "reference": message_id, "partial": False},
|
||||
)
|
||||
|
||||
self.history.append(
|
||||
{
|
||||
"role": "assistant",
|
||||
"content": full_message,
|
||||
}
|
||||
)
|
||||
except httpx.HTTPError as err:
|
||||
self.logger.error("HTTP error.", exc_info=err)
|
||||
yield "LLM service unavailable."
|
||||
@@ -192,6 +223,9 @@ class LLMAgent(BaseAgent):
|
||||
async with client.stream(
|
||||
"POST",
|
||||
settings.llm_settings.local_llm_url,
|
||||
headers={"Authorization": f"Bearer {settings.llm_settings.api_key}"}
|
||||
if settings.llm_settings.api_key
|
||||
else {},
|
||||
json={
|
||||
"model": settings.llm_settings.local_llm_model,
|
||||
"messages": messages,
|
||||
|
||||
@@ -145,4 +145,6 @@ class OpenAIWhisperSpeechRecognizer(SpeechRecognizer):
|
||||
|
||||
def recognize_speech(self, audio: np.ndarray) -> str:
|
||||
self.load_model()
|
||||
return whisper.transcribe(self.model, audio, **self._get_decode_options(audio))["text"]
|
||||
return whisper.transcribe(self.model, audio, **self._get_decode_options(audio))[
|
||||
"text"
|
||||
].strip()
|
||||
|
||||
@@ -9,7 +9,7 @@ from control_backend.agents.bdi.agentspeak_generator import AgentSpeakGenerator
|
||||
from control_backend.core.agent_system import InternalMessage
|
||||
from control_backend.core.config import settings
|
||||
from control_backend.schemas.belief_message import Belief, BeliefMessage
|
||||
from control_backend.schemas.program import ConditionalNorm, Program
|
||||
from control_backend.schemas.program import ConditionalNorm, Goal, Program
|
||||
from control_backend.schemas.ri_message import (
|
||||
GestureCommand,
|
||||
PauseCommand,
|
||||
@@ -249,6 +249,16 @@ class UserInterruptAgent(BaseAgent):
|
||||
self._cond_norm_map = {}
|
||||
self._cond_norm_reverse_map = {}
|
||||
|
||||
def _register_goal(goal: Goal):
|
||||
"""Recursively register goals and their subgoals."""
|
||||
slug = AgentSpeakGenerator.slugify(goal)
|
||||
self._goal_map[str(goal.id)] = slug
|
||||
self._goal_reverse_map[slug] = str(goal.id)
|
||||
|
||||
for step in goal.plan.steps:
|
||||
if isinstance(step, Goal):
|
||||
_register_goal(step)
|
||||
|
||||
for phase in program.phases:
|
||||
for trigger in phase.triggers:
|
||||
slug = AgentSpeakGenerator.slugify(trigger)
|
||||
@@ -256,8 +266,7 @@ class UserInterruptAgent(BaseAgent):
|
||||
self._trigger_reverse_map[slug] = str(trigger.id)
|
||||
|
||||
for goal in phase.goals:
|
||||
self._goal_map[str(goal.id)] = AgentSpeakGenerator.slugify(goal)
|
||||
self._goal_reverse_map[AgentSpeakGenerator.slugify(goal)] = str(goal.id)
|
||||
_register_goal(goal)
|
||||
|
||||
for goal, id in self._goal_reverse_map.items():
|
||||
self.logger.debug(f"Goal mapping: UI ID {goal} -> {id}")
|
||||
|
||||
@@ -123,6 +123,7 @@ class LLMSettings(BaseModel):
|
||||
|
||||
local_llm_url: str = "http://localhost:1234/v1/chat/completions"
|
||||
local_llm_model: str = "gpt-oss"
|
||||
api_key: str = ""
|
||||
chat_temperature: float = 1.0
|
||||
code_temperature: float = 0.3
|
||||
n_parallel: int = 4
|
||||
|
||||
Reference in New Issue
Block a user