21 lines
1.1 KiB
Plaintext
21 lines
1.1 KiB
Plaintext
# Example .env file. To use, make a copy, call it ".env" (i.e. removing the ".example" suffix), then you edit values.
|
|
|
|
# The hostname of the Robot Interface. Change if the Control Backend and Robot Interface are running on different computers.
|
|
RI_HOST="localhost"
|
|
|
|
# URL for the local LLM API. Must be an API that implements the OpenAI Chat Completions API, but most do.
|
|
LLM_SETTINGS__LOCAL_LLM_URL="http://localhost:1234/v1/chat/completions"
|
|
|
|
# Name of the local LLM model to use.
|
|
LLM_SETTINGS__LOCAL_LLM_MODEL="gpt-oss"
|
|
|
|
# Number of non-speech chunks to wait before speech ended. A chunk is approximately 31 ms. Increasing this number allows longer pauses in speech, but also increases response time.
|
|
BEHAVIOUR_SETTINGS__VAD_NON_SPEECH_PATIENCE_CHUNKS=15
|
|
|
|
# Timeout in milliseconds for socket polling. Increase this number if network latency/jitter is high, often the case when using Wi-Fi. Perhaps 500 ms. A symptom of this issue is transcriptions getting cut off.
|
|
BEHAVIOUR_SETTINGS__SOCKET_POLLER_TIMEOUT_MS=100
|
|
|
|
|
|
|
|
# For an exhaustive list of options, see the control_backend.core.config module in the docs.
|