diff --git a/src/control_backend/agents/llm/llm_agent.py b/src/control_backend/agents/llm/llm_agent.py index 7cac097..ca0cd78 100644 --- a/src/control_backend/agents/llm/llm_agent.py +++ b/src/control_backend/agents/llm/llm_agent.py @@ -35,6 +35,7 @@ class LLMAgent(BaseAgent): self.history = [] self._querying = False self._interrupted = False + self._interrupted_message = "" self._go_ahead = asyncio.Event() async def setup(self): @@ -82,11 +83,14 @@ class LLMAgent(BaseAgent): self._interrupted = True # interrupt the previous processing await self._go_ahead.wait() # wait until we get the go-ahead + message.text = f"{self._interrupted_message} {message.text}" + self._go_ahead.clear() self._querying = True full_message = "" async for chunk in self._query_llm(message.text, message.norms, message.goals): if self._interrupted: + self._interrupted_message = message self.logger.debug("Interrupted processing of previous message.") break await self._send_reply(chunk) @@ -105,6 +109,7 @@ class LLMAgent(BaseAgent): ) await self._send_full_reply(full_message) + self._go_ahead.set() self._interrupted = False async def _send_reply(self, msg: str):