feat: (maybe) stop response when new user message
If we get a new message before the LLM is done responding, interrupt it. ref: N25B-452
This commit is contained in:
@@ -117,6 +117,7 @@ class LLMSettings(BaseModel):
|
||||
|
||||
local_llm_url: str = "http://localhost:1234/v1/chat/completions"
|
||||
local_llm_model: str = "gpt-oss"
|
||||
api_key: str = ""
|
||||
chat_temperature: float = 1.0
|
||||
code_temperature: float = 0.3
|
||||
n_parallel: int = 4
|
||||
|
||||
Reference in New Issue
Block a user