Merge remote-tracking branch 'origin/feat/reset-experiment-and-phase' into feat/visual-emotion-recognition
This commit is contained in:
@@ -74,10 +74,12 @@ class BehaviourSettings(BaseModel):
|
||||
:ivar vad_prob_threshold: Probability threshold for Voice Activity Detection.
|
||||
:ivar vad_initial_since_speech: Initial value for 'since speech' counter in VAD.
|
||||
:ivar vad_non_speech_patience_chunks: Number of non-speech chunks to wait before speech ended.
|
||||
:ivar vad_begin_silence_chunks: The number of chunks of silence to prepend to speech chunks.
|
||||
:ivar transcription_max_concurrent_tasks: Maximum number of concurrent transcription tasks.
|
||||
:ivar transcription_words_per_minute: Estimated words per minute for transcription timing.
|
||||
:ivar transcription_words_per_token: Estimated words per token for transcription timing.
|
||||
:ivar transcription_token_buffer: Buffer for transcription tokens.
|
||||
:ivar conversation_history_length_limit: The maximum amount of messages to extract beliefs from.
|
||||
"""
|
||||
|
||||
# ATTENTION: When adding/removing settings, make sure to update the .env.example file
|
||||
@@ -89,7 +91,8 @@ class BehaviourSettings(BaseModel):
|
||||
# VAD settings
|
||||
vad_prob_threshold: float = 0.5
|
||||
vad_initial_since_speech: int = 100
|
||||
vad_non_speech_patience_chunks: int = 3
|
||||
vad_non_speech_patience_chunks: int = 15
|
||||
vad_begin_silence_chunks: int = 6
|
||||
|
||||
# transcription behaviour
|
||||
transcription_max_concurrent_tasks: int = 3
|
||||
@@ -97,6 +100,9 @@ class BehaviourSettings(BaseModel):
|
||||
transcription_words_per_token: float = 0.75 # (3 words = 4 tokens)
|
||||
transcription_token_buffer: int = 10
|
||||
|
||||
# Text belief extractor settings
|
||||
conversation_history_length_limit: int = 10
|
||||
|
||||
|
||||
class LLMSettings(BaseModel):
|
||||
"""
|
||||
@@ -104,12 +110,19 @@ class LLMSettings(BaseModel):
|
||||
|
||||
:ivar local_llm_url: URL for the local LLM API.
|
||||
:ivar local_llm_model: Name of the local LLM model to use.
|
||||
:ivar chat_temperature: The temperature to use while generating chat responses.
|
||||
:ivar code_temperature: The temperature to use while generating code-like responses like during
|
||||
belief inference.
|
||||
:ivar n_parallel: The number of parallel calls allowed to be made to the LLM.
|
||||
"""
|
||||
|
||||
# ATTENTION: When adding/removing settings, make sure to update the .env.example file
|
||||
|
||||
local_llm_url: str = "http://localhost:1234/v1/chat/completions"
|
||||
local_llm_model: str = "gpt-oss"
|
||||
chat_temperature: float = 1.0
|
||||
code_temperature: float = 0.3
|
||||
n_parallel: int = 4
|
||||
|
||||
|
||||
class VADSettings(BaseModel):
|
||||
|
||||
Reference in New Issue
Block a user