feat: (maybe) stop response when new user message

If we get a new message before the LLM is done responding, interrupt it.

ref: N25B-452
This commit is contained in:
2026-01-19 14:08:26 +01:00
parent db64eaeb0b
commit 04d19cee5c
4 changed files with 39 additions and 12 deletions

View File

@@ -338,7 +338,7 @@ class BDICoreAgent(BaseAgent):
yield
@self.actions.add(".reply_with_goal", 3)
def _reply_with_goal(agent: "BDICoreAgent", term, intention):
def _reply_with_goal(agent, term, intention):
"""
Let the LLM generate a response to a user's utterance with the current norms and a
specific goal.

View File

@@ -318,6 +318,9 @@ class TextBeliefExtractorAgent(BaseAgent):
async with httpx.AsyncClient() as client:
response = await client.post(
settings.llm_settings.local_llm_url,
headers={"Authorization": f"Bearer {settings.llm_settings.api_key}"}
if settings.llm_settings.api_key
else {},
json={
"model": settings.llm_settings.local_llm_model,
"messages": [{"role": "user", "content": prompt}],