chore: making tests pass after merge

I had to make use of monkeypatch to simulate using the config file

ref: N25B-236
This commit is contained in:
Pim Hutting
2025-11-09 14:45:08 +01:00
5 changed files with 246 additions and 25 deletions

View File

@@ -1,4 +1,5 @@
import numpy as np
import pytest
from control_backend.agents.transcription.speech_recognizer import (
OpenAIWhisperSpeechRecognizer,
@@ -6,6 +7,24 @@ from control_backend.agents.transcription.speech_recognizer import (
)
@pytest.fixture(autouse=True)
def patch_sr_settings(monkeypatch):
# Patch the *module-local* settings that SpeechRecognizer imported
from control_backend.agents.transcription import speech_recognizer as sr
# Provide real numbers for everything _estimate_max_tokens() reads
monkeypatch.setattr(sr.settings.vad_settings, "sample_rate_hz", 16_000, raising=False)
monkeypatch.setattr(
sr.settings.behaviour_settings, "transcription_words_per_minute", 450, raising=False
)
monkeypatch.setattr(
sr.settings.behaviour_settings, "transcription_words_per_token", 0.75, raising=False
)
monkeypatch.setattr(
sr.settings.behaviour_settings, "transcription_token_buffer", 10, raising=False
)
def test_estimate_max_tokens():
"""Inputting one minute of audio, assuming 450 words per minute and adding a 10 token padding,
expecting 610 tokens."""