feat: (maybe) stop response when new user message

If we get a new message before the LLM is done responding, interrupt it.

ref: N25B-452
This commit is contained in:
2026-01-19 14:08:26 +01:00
parent db64eaeb0b
commit 04d19cee5c
4 changed files with 39 additions and 12 deletions

View File

@@ -117,6 +117,7 @@ class LLMSettings(BaseModel):
local_llm_url: str = "http://localhost:1234/v1/chat/completions"
local_llm_model: str = "gpt-oss"
api_key: str = ""
chat_temperature: float = 1.0
code_temperature: float = 0.3
n_parallel: int = 4