# Example .env file. To use, make a copy, call it ".env" (i.e. removing the ".example" suffix), then you edit values. # The hostname of the Robot Interface. Change if the Control Backend and Robot Interface are running on different computers. RI_HOST="localhost" # The hostname of the User Interface. This is what the browser displays in the URL bar. Strangely, even if the UI is running on a different host than the backend, if the computer with the browser is also hosting the UI itself, this value should be http://localhost. UI_HOST="http://localhost:5173" # URL for the local LLM API. Must be an API that implements the OpenAI Chat Completions API, but most do. LLM_SETTINGS__LOCAL_LLM_URL="http://localhost:1234/v1/chat/completions" # Name of the local LLM model to use. LLM_SETTINGS__LOCAL_LLM_MODEL="gpt-oss" # Number of non-speech chunks to wait before speech ended. A chunk is approximately 31 ms. Increasing this number allows longer pauses in speech, but also increases response time. BEHAVIOUR_SETTINGS__VAD_NON_SPEECH_PATIENCE_CHUNKS=15 # Timeout in milliseconds for socket polling. Increase this number if network latency/jitter is high, often the case when using Wi-Fi. Perhaps 500 ms or more. A symptom of this issue is transcriptions getting cut off. BEHAVIOUR_SETTINGS__SOCKET_POLLER_TIMEOUT_MS=400 # For an exhaustive list of options, see the control_backend.core.config module in the docs.