fix: allow interrupting on each token stream

ref: N25B-452
This commit is contained in:
2026-01-29 15:35:42 +01:00
parent d9e5de6e51
commit 3579aee114

View File

@@ -179,6 +179,9 @@ class LLMAgent(BaseAgent):
full_message = ""
current_chunk = ""
async for token in self._stream_query_llm(messages):
if self._interrupted:
return
full_message += token
current_chunk += token