Merge branch 'feat/semantic-beliefs' into feat/pause-functionality

This commit is contained in:
Storm
2025-12-30 15:52:12 +02:00
24 changed files with 2760 additions and 270 deletions

View File

@@ -65,6 +65,7 @@ class BehaviourSettings(BaseModel):
:ivar transcription_words_per_minute: Estimated words per minute for transcription timing.
:ivar transcription_words_per_token: Estimated words per token for transcription timing.
:ivar transcription_token_buffer: Buffer for transcription tokens.
:ivar conversation_history_length_limit: The maximum amount of messages to extract beliefs from.
"""
sleep_s: float = 1.0
@@ -82,6 +83,9 @@ class BehaviourSettings(BaseModel):
transcription_words_per_token: float = 0.75 # (3 words = 4 tokens)
transcription_token_buffer: int = 10
# Text belief extractor settings
conversation_history_length_limit: int = 10
class LLMSettings(BaseModel):
"""
@@ -89,10 +93,21 @@ class LLMSettings(BaseModel):
:ivar local_llm_url: URL for the local LLM API.
:ivar local_llm_model: Name of the local LLM model to use.
:ivar chat_temperature: The temperature to use while generating chat responses.
:ivar code_temperature: The temperature to use while generating code-like responses like during
belief inference.
:ivar n_parallel: The number of parallel calls allowed to be made to the LLM.
"""
local_llm_url: str = "http://localhost:1234/v1/chat/completions"
<<<<<<< HEAD
local_llm_model: str = "google/gemma-3-1b"
=======
local_llm_model: str = "gpt-oss"
chat_temperature: float = 1.0
code_temperature: float = 0.3
n_parallel: int = 4
>>>>>>> feat/semantic-beliefs
class VADSettings(BaseModel):