From 4480469c4bb9dc16e8c97236b7dad68b334534e6 Mon Sep 17 00:00:00 2001 From: Remi <73385395+itsrubberduck@users.noreply.github.com> Date: Thu, 16 Oct 2025 11:15:31 +0200 Subject: [PATCH] Allow configuring OpenAI base URL --- .env.example | 2 ++ nuxt.config.ts | 1 + server/utils/normalize.ts | 5 ++++- server/utils/openai.ts | 5 ++++- server/utils/runtimeConfig.ts | 7 ++++++- 5 files changed, 17 insertions(+), 3 deletions(-) diff --git a/.env.example b/.env.example index b496ddc..5d3833a 100644 --- a/.env.example +++ b/.env.example @@ -9,6 +9,8 @@ JWT_REFRESH_SECRET=changeme # OpenAI OPENAI_API_KEY=sk-your-openai-key OPENAI_PROJECT= +OPENAI_BASE_URL= +# Optional: specify a custom API endpoint, e.g. http://localhost:1234/v1 LLM_MODEL=gpt-5-nano TTS_MODEL=tts-1 VOICE_ID=alloy diff --git a/nuxt.config.ts b/nuxt.config.ts index cdf4fd1..5c5f67d 100644 --- a/nuxt.config.ts +++ b/nuxt.config.ts @@ -16,6 +16,7 @@ export default defineNuxtConfig({ runtimeConfig: { openaiKey: process.env.OPENAI_API_KEY, openaiProject: process.env.OPENAI_PROJECT, + openaiBaseUrl: process.env.OPENAI_BASE_URL, llmModel: process.env.LLM_MODEL || 'gpt-5-nano', ttsModel: process.env.TTS_MODEL || 'tts-1', defaultVoiceId: process.env.VOICE_ID || 'alloy', diff --git a/server/utils/normalize.ts b/server/utils/normalize.ts index 0705eb2..30ee7af 100644 --- a/server/utils/normalize.ts +++ b/server/utils/normalize.ts @@ -4,11 +4,14 @@ import fs from "node:fs"; import { normalizeRadioPhrase } from "../../shared/utils/radioSpeech"; import { getServerRuntimeConfig } from "./runtimeConfig"; -const { openaiKey, openaiProject, llmModel, ttsModel } = getServerRuntimeConfig(); +const { openaiKey, openaiProject, openaiBaseUrl, llmModel, ttsModel } = getServerRuntimeConfig(); const normalizeClientOptions: ConstructorParameters[0] = { apiKey: openaiKey }; if (openaiProject) { normalizeClientOptions.project = openaiProject; } +if (openaiBaseUrl) { + normalizeClientOptions.baseURL = openaiBaseUrl; +} export const normalize = new OpenAI(normalizeClientOptions); diff --git a/server/utils/openai.ts b/server/utils/openai.ts index 0627ea3..c35f02f 100644 --- a/server/utils/openai.ts +++ b/server/utils/openai.ts @@ -28,7 +28,7 @@ const httpsAgent = new https.Agent({ function ensureOpenAI(): OpenAI { if (!openaiClient) { - const {openaiKey, openaiProject, llmModel} = getServerRuntimeConfig() + const {openaiKey, openaiProject, openaiBaseUrl, llmModel} = getServerRuntimeConfig() if (!openaiKey) { throw new Error('OPENAI_API_KEY is missing. Please set the key before using AI features.') } @@ -39,6 +39,9 @@ function ensureOpenAI(): OpenAI { if (openaiProject) { clientOptions.project = openaiProject } + if (openaiBaseUrl) { + clientOptions.baseURL = openaiBaseUrl + } console.log("using connection opened client") openaiClient = new OpenAI(clientOptions) cachedModel = llmModel diff --git a/server/utils/runtimeConfig.ts b/server/utils/runtimeConfig.ts index 18a8945..627fdf0 100644 --- a/server/utils/runtimeConfig.ts +++ b/server/utils/runtimeConfig.ts @@ -3,6 +3,7 @@ import { useRuntimeConfig } from '#imports' export interface ServerRuntimeConfig { openaiKey: string openaiProject?: string + openaiBaseUrl?: string llmModel: string ttsModel: string voiceId: string @@ -62,9 +63,13 @@ export function getServerRuntimeConfig(): ServerRuntimeConfig { warnedMissingOpenAIKey = true } + const openaiProject = String(runtimeConfig.openaiProject || '').trim() || undefined + const openaiBaseUrl = String(runtimeConfig.openaiBaseUrl || '').trim() || undefined + const config: ServerRuntimeConfig = { openaiKey, - openaiProject: String(runtimeConfig.openaiProject || '').trim() || undefined, + openaiProject, + openaiBaseUrl, llmModel: String(runtimeConfig.llmModel || '').trim() || 'gpt-5-nano', ttsModel: String(runtimeConfig.ttsModel || '').trim() || 'tts-1', voiceId: String(runtimeConfig.defaultVoiceId || '').trim() || 'alloy',