Merge remote-tracking branch 'origin/dev' into demo

# Conflicts:
#	src/control_backend/agents/bdi/behaviours/receive_llm_resp_behaviour.py
#	src/control_backend/agents/llm/llm.py
#	src/control_backend/agents/ri_command_agent.py
#	src/control_backend/agents/transcription/speech_recognizer.py
This commit is contained in:
Twirre Meulenbelt
2025-11-02 21:07:50 +01:00
32 changed files with 412 additions and 204 deletions

View File

@@ -2,10 +2,11 @@
LLM Agent module for routing text queries from the BDI Core Agent to a local LLM
service and returning its responses back to the BDI Core Agent.
"""
import json
import logging
import re
from typing import AsyncGenerator
from collections.abc import AsyncGenerator
import httpx
from spade.agent import Agent
@@ -62,16 +63,17 @@ class LLMAgent(Agent):
# Consume the streaming generator and send a reply for every chunk
async for chunk in self._query_llm(user_text):
await self._reply(chunk)
self.agent.logger.debug("Finished processing BDI message. "
"Response sent in chunks to BDI Core Agent.")
self.agent.logger.debug(
"Finished processing BDI message. Response sent in chunks to BDI Core Agent."
)
async def _reply(self, msg: str):
"""
Sends a response message back to the BDI Core Agent.
"""
reply = Message(
to=settings.agent_settings.bdi_core_agent_name + '@' + settings.agent_settings.host,
body=msg
to=settings.agent_settings.bdi_core_agent_name + "@" + settings.agent_settings.host,
body=msg,
)
await self.send(reply)
@@ -100,7 +102,7 @@ class LLMAgent(Agent):
{
"role": "user",
"content": prompt,
}
},
]
try:
@@ -111,8 +113,7 @@ class LLMAgent(Agent):
# Stream the message in chunks separated by punctuation.
# We include the delimiter in the emitted chunk for natural flow.
pattern = re.compile(
r".*?(?:,|;|:|—||-|\.{3}|…|\.|\?|!|\(|\)|\[|\]|/)\s*",
re.DOTALL
r".*?(?:,|;|:|—||-|\.{3}|…|\.|\?|!|\(|\)|\[|\]|/)\s*", re.DOTALL
)
for m in pattern.finditer(current_chunk):
chunk = m.group(0)
@@ -121,7 +122,8 @@ class LLMAgent(Agent):
current_chunk = ""
# Yield any remaining tail
if current_chunk: yield current_chunk
if current_chunk:
yield current_chunk
except httpx.HTTPError as err:
self.agent.logger.error("HTTP error.", exc_info=err)
yield "LLM service unavailable."
@@ -145,15 +147,18 @@ class LLMAgent(Agent):
response.raise_for_status()
async for line in response.aiter_lines():
if not line or not line.startswith("data: "): continue
if not line or not line.startswith("data: "):
continue
data = line[len("data: "):]
if data.strip() == "[DONE]": break
data = line[len("data: ") :]
if data.strip() == "[DONE]":
break
try:
event = json.loads(data)
delta = event.get("choices", [{}])[0].get("delta", {}).get("content")
if delta: yield delta
if delta:
yield delta
except json.JSONDecodeError:
self.agent.logger.error("Failed to parse LLM response: %s", data)