perf: improved speed of BDI

By efficiently checking when the next work has to be done, we can
increase performance not having to "busy loop". Time from transcription
-> message to LLM agent is now down to sub 1 millisecond.

ref: N25B-316
This commit is contained in:
2025-11-22 19:53:19 +01:00
parent 1f9926fe00
commit 4d076eac48
5 changed files with 35 additions and 11 deletions

View File

@@ -63,7 +63,7 @@ class VADAgent(BaseAgent):
self.audio_buffer = np.array([], dtype=np.float32)
self.i_since_speech = settings.behaviour_settings.vad_initial_since_speech
self._ready = False
self._ready = asyncio.Event()
self.model = None
async def setup(self):
@@ -141,14 +141,11 @@ class VADAgent(BaseAgent):
while await self.audio_in_poller.poll(1) is not None:
discarded += 1
self.logger.info(f"Discarded {discarded} audio packets before starting.")
self._ready = True
self._ready.set()
async def _streaming_loop(self):
await self._ready.wait()
while self._running:
if not self._ready:
await asyncio.sleep(0.1)
continue
assert self.audio_in_poller is not None
data = await self.audio_in_poller.poll()
if data is None: