diff --git a/.env.example b/.env.example index 553d245e8..9860928a5 100644 --- a/.env.example +++ b/.env.example @@ -30,6 +30,12 @@ # OPENAI_MODEL=gpt-4o # OPENAI_BASE_URL=https://api.openai.com/v1 (optional) # +# Option 2a — AI/ML API: +# CLAUDE_CODE_USE_OPENAI=1 +# AIMLAPI_API_KEY=your-aimlapi-key-here +# OPENAI_BASE_URL=https://api.aimlapi.com/v1 +# OPENAI_MODEL=gpt-4o +# # Option 3 — Google Gemini: # CLAUDE_CODE_USE_GEMINI=1 # GEMINI_API_KEY=your-gemini-key-here @@ -150,6 +156,19 @@ ANTHROPIC_API_KEY=sk-ant-your-key-here # OPENAI_BASE_URL=https://api.openai.com/v1 +# ----------------------------------------------------------------------------- +# Option 2a: AI/ML API +# ----------------------------------------------------------------------------- +# AI/ML API provides an OpenAI-compatible endpoint for 400+ models across +# chat, code, image, video, voice, embeddings, and more. OpenClaude uses +# chat/tool-capable models through the chat completions API. +# +# CLAUDE_CODE_USE_OPENAI=1 +# AIMLAPI_API_KEY=your-aimlapi-key-here +# OPENAI_BASE_URL=https://api.aimlapi.com/v1 +# OPENAI_MODEL=gpt-4o + + # ----------------------------------------------------------------------------- # Option 3: Google Gemini # ----------------------------------------------------------------------------- diff --git a/README.md b/README.md index 3360ab46b..a2e4bcb12 100644 --- a/README.md +++ b/README.md @@ -70,6 +70,34 @@ $env:OPENAI_MODEL="gpt-4o" openclaude ``` +### Fastest AI/ML API setup + +AI/ML API provides 400+ models behind an OpenAI-compatible API. OpenClaude uses chat/tool-capable models through `https://api.aimlapi.com/v1/chat/completions`. + +macOS / Linux: + +```bash +export CLAUDE_CODE_USE_OPENAI=1 +export AIMLAPI_API_KEY=your-aimlapi-key-here +export OPENAI_BASE_URL=https://api.aimlapi.com/v1 +export OPENAI_MODEL=gpt-4o + +openclaude +``` + +Windows PowerShell: + +```powershell +$env:CLAUDE_CODE_USE_OPENAI="1" +$env:AIMLAPI_API_KEY="your-aimlapi-key-here" +$env:OPENAI_BASE_URL="https://api.aimlapi.com/v1" +$env:OPENAI_MODEL="gpt-4o" + +openclaude +``` + +You can also run `/provider`, choose `AI/ML API`, and paste your AI/ML API key. Browse available models at [aimlapi.com/models](https://aimlapi.com/models) and provider docs at [docs.aimlapi.com](https://docs.aimlapi.com/). + ### Fastest local Ollama setup macOS / Linux: @@ -112,6 +140,7 @@ Beginner-friendly guides: Advanced and source-build guides: +- [AI/ML API Setup](docs/aimlapi-setup.md) - [Advanced Setup](docs/advanced-setup.md) - [Android Install](ANDROID_INSTALL.md) @@ -119,6 +148,7 @@ Advanced and source-build guides: | Provider | Setup Path | Notes | | --- | --- | --- | +| AI/ML API | `/provider` or env vars | First-class OpenAI-compatible setup for AI/ML API's 400+ model catalog; OpenClaude uses chat/tool-capable models | | OpenAI-compatible | `/provider` or env vars | Works with OpenAI, OpenRouter, DeepSeek, Groq, Mistral, LM Studio, and other compatible `/v1` servers | | Gemini | `/provider` or env vars | Supports API key, access token, or local ADC workflow on current `main` | | GitHub Models | `/onboard-github` | Interactive onboarding with saved credentials | diff --git a/docs/aimlapi-setup.md b/docs/aimlapi-setup.md new file mode 100644 index 000000000..43f4744c0 --- /dev/null +++ b/docs/aimlapi-setup.md @@ -0,0 +1,35 @@ +# AI/ML API Setup + +OpenClaude can run through AI/ML API as a first-class OpenAI-compatible provider. AI/ML API exposes `https://api.aimlapi.com/v1` and OpenClaude sends chat requests to `https://api.aimlapi.com/v1/chat/completions`. + +## Setup with `/provider` + +1. Start OpenClaude. +2. Run `/provider`. +3. Choose `AI/ML API`. +4. Paste your AI/ML API key. +5. Keep the default model `gpt-4o` or enter another chat-capable model ID. + +## Setup with environment variables + +macOS / Linux: + +```bash +export CLAUDE_CODE_USE_OPENAI=1 +export AIMLAPI_API_KEY=your-aimlapi-key-here +export OPENAI_BASE_URL=https://api.aimlapi.com/v1 +export OPENAI_MODEL=gpt-4o +``` + +Windows PowerShell: + +```powershell +$env:CLAUDE_CODE_USE_OPENAI="1" +$env:AIMLAPI_API_KEY="your-aimlapi-key-here" +$env:OPENAI_BASE_URL="https://api.aimlapi.com/v1" +$env:OPENAI_MODEL="gpt-4o" +``` + +OpenClaude uses chat/tool-capable models for coding-agent workflows. AI/ML API also offers other modalities such as image, video, voice, music, embeddings, OCR, and 3D generation; those models are available through AI/ML API but are outside OpenClaude's core chat/tool loop. + +Browse models at [aimlapi.com/models](https://aimlapi.com/models) and read provider docs at [docs.aimlapi.com](https://docs.aimlapi.com/). diff --git a/src/commands/provider/provider.tsx b/src/commands/provider/provider.tsx index 6d954d2a0..2aacc3370 100644 --- a/src/commands/provider/provider.tsx +++ b/src/commands/provider/provider.tsx @@ -23,6 +23,10 @@ import { resolveCodexApiCredentials, resolveProviderRequest, } from '../../services/api/providerConfig.js' +import { + AIMLAPI_LABEL, + isAimlapiBaseUrl, +} from '../../providers/aimlapi/index.js' import { applySavedProfileToCurrentSession as applySharedProfileToCurrentSession, buildCodexOAuthProfileEnv as buildSharedCodexOAuthProfileEnv, @@ -207,10 +211,10 @@ export function getProviderWizardDefaults( sanitizeProviderConfigValue(processEnv.GEMINI_MODEL, secretSource) || DEFAULT_GEMINI_MODEL const safeMistralModel = - sanitizeProviderConfigValue(processEnv.MISTRAL_MODEL, processEnv) || + sanitizeProviderConfigValue(processEnv.MISTRAL_MODEL, secretSource) || DEFAULT_MISTRAL_MODEL const safeMistralBaseUrl = - sanitizeProviderConfigValue(processEnv.MISTRAL_BASE_URL, processEnv) || + sanitizeProviderConfigValue(processEnv.MISTRAL_BASE_URL, secretSource) || DEFAULT_MISTRAL_BASE_URL return { @@ -287,6 +291,8 @@ export function buildCurrentProviderSummary(options?: { let providerLabel = 'OpenAI-compatible' if (request.transport === 'codex_responses') { providerLabel = 'Codex' + } else if (isAimlapiBaseUrl(request.baseUrl)) { + providerLabel = AIMLAPI_LABEL } else if (isLocalProviderUrl(request.baseUrl)) { providerLabel = getLocalOpenAICompatibleProviderLabel(request.baseUrl) } diff --git a/src/components/ProviderManager.test.tsx b/src/components/ProviderManager.test.tsx index 44f75adc1..4efc43108 100644 --- a/src/components/ProviderManager.test.tsx +++ b/src/components/ProviderManager.test.tsx @@ -105,6 +105,7 @@ async function waitForCondition( // Order matches ProviderManager.renderPresetSelection() when // canUseCodexOAuth === true (default in mocked tests). const PRESET_ORDER = [ + 'AI/ML API', 'Alibaba Coding Plan', 'Alibaba Coding Plan (China)', 'Anthropic', @@ -437,6 +438,23 @@ test('ProviderManager resolves GitHub virtual provider from async storage withou expect(asyncRead).toHaveBeenCalled() }) +test('ProviderManager preset picker includes AI/ML API', async () => { + mockProviderManagerDependencies( + () => undefined, + async () => undefined, + ) + + const nonce = `${Date.now()}-${Math.random()}` + const { ProviderManager } = await import(`./ProviderManager.js?ts=${nonce}`) + const output = await renderProviderManagerFrame(ProviderManager, { + mode: 'first-run', + waitForOutput: frame => + frame.includes('Set up provider') && frame.includes('AI/ML API'), + }) + + expect(output).toContain('AI/ML API') +}) + test('ProviderManager avoids first-frame false negative while stored-token lookup is pending', async () => { delete process.env.CLAUDE_CODE_USE_GITHUB delete process.env.GITHUB_TOKEN diff --git a/src/components/ProviderManager.tsx b/src/components/ProviderManager.tsx index 10b40b5df..4f800df9b 100644 --- a/src/components/ProviderManager.tsx +++ b/src/components/ProviderManager.tsx @@ -42,6 +42,9 @@ import { type AtomicChatReadiness, type OllamaGenerationReadiness, } from '../utils/providerDiscovery.js' +import { + AIMLAPI_PROVIDER_PRESET_OPTION, +} from '../providers/aimlapi/index.js' import { rankOllamaModels, recommendOllamaModel, @@ -1254,6 +1257,7 @@ export function ProviderManager({ mode, onDone }: Props): React.ReactNode { // should always find known providers first. `Skip for now` (first-run // only) comes last, after Custom. const options = [ + AIMLAPI_PROVIDER_PRESET_OPTION, { value: 'dashscope-intl', label: 'Alibaba Coding Plan', diff --git a/src/providers/aimlapi/index.test.ts b/src/providers/aimlapi/index.test.ts new file mode 100644 index 000000000..20685d2bf --- /dev/null +++ b/src/providers/aimlapi/index.test.ts @@ -0,0 +1,141 @@ +import { describe, expect, test } from 'bun:test' +import { + AIMLAPI_ATTRIBUTION_HEADERS, + AIMLAPI_DEFAULT_BASE_URL, + AIMLAPI_DEFAULT_MODEL, + AIMLAPI_LABEL, + AIMLAPI_PROVIDER_ID, + AIMLAPI_PROVIDER_PRESET_OPTION, + getAimlapiApiKey, + getAimlapiAttributionHeaders, + getAimlapiOpenAICompatibleApiKey, + getAimlapiPresetDefaults, + hasAimlapiApiKey, + isAimlapiBaseUrl, + mapAimlapiModelCatalog, + syncAimlapiOpenAIEnv, +} from './index.js' + +describe('AI/ML API provider module', () => { + test('detects only api.aimlapi.com base URLs', () => { + expect(isAimlapiBaseUrl('https://api.aimlapi.com/v1')).toBe(true) + expect(isAimlapiBaseUrl('https://api.aimlapi.com/v1/')).toBe(true) + expect(isAimlapiBaseUrl('https://example.com/api.aimlapi.com/v1')).toBe(false) + expect(isAimlapiBaseUrl('not a url')).toBe(false) + expect(isAimlapiBaseUrl(undefined)).toBe(false) + }) + + test('exposes preset defaults and picker option metadata', () => { + expect(getAimlapiPresetDefaults({ + AIMLAPI_API_KEY: 'aiml-key', + OPENAI_API_KEY: 'openai-key', + })).toEqual({ + provider: 'openai', + name: AIMLAPI_LABEL, + baseUrl: AIMLAPI_DEFAULT_BASE_URL, + model: AIMLAPI_DEFAULT_MODEL, + apiKey: 'aiml-key', + requiresApiKey: true, + }) + + expect(AIMLAPI_PROVIDER_PRESET_OPTION).toEqual({ + value: AIMLAPI_PROVIDER_ID, + label: AIMLAPI_LABEL, + description: 'AI/ML API OpenAI-compatible endpoint', + }) + }) + + test('resolves provider-specific auth without affecting other base URLs', () => { + const env = { + AIMLAPI_API_KEY: 'aiml-key', + OPENAI_API_KEY: 'openai-key', + } + + expect(getAimlapiApiKey(env)).toBe('aiml-key') + expect(getAimlapiApiKey({ OPENAI_API_KEY: 'openai-key' })).toBe('openai-key') + expect(getAimlapiOpenAICompatibleApiKey( + 'https://api.aimlapi.com/v1', + env, + )).toBe('aiml-key') + expect(getAimlapiOpenAICompatibleApiKey( + 'https://openrouter.ai/api/v1', + env, + )).toBeUndefined() + expect(hasAimlapiApiKey('https://api.aimlapi.com/v1', env)).toBe(true) + }) + + test('adds attribution headers only for AI/ML API', () => { + expect(getAimlapiAttributionHeaders('https://api.aimlapi.com/v1')).toEqual( + AIMLAPI_ATTRIBUTION_HEADERS, + ) + expect(getAimlapiAttributionHeaders('https://api.openai.com/v1')).toEqual({}) + }) + + test('syncs AIMLAPI_API_KEY into OPENAI_API_KEY only for AI/ML API env', () => { + const aimlEnv = { + OPENAI_BASE_URL: 'https://api.aimlapi.com/v1', + AIMLAPI_API_KEY: 'aiml-key', + } + syncAimlapiOpenAIEnv(aimlEnv) + expect(aimlEnv.OPENAI_API_KEY).toBe('aiml-key') + + const existingOpenAIEnv = { + OPENAI_BASE_URL: 'https://api.aimlapi.com/v1', + AIMLAPI_API_KEY: 'aiml-key', + OPENAI_API_KEY: 'openai-key', + } + syncAimlapiOpenAIEnv(existingOpenAIEnv) + expect(existingOpenAIEnv.OPENAI_API_KEY).toBe('openai-key') + + const otherEnv = { + OPENAI_BASE_URL: 'https://api.openai.com/v1', + AIMLAPI_API_KEY: 'aiml-key', + } + syncAimlapiOpenAIEnv(otherEnv) + expect(otherEnv.OPENAI_API_KEY).toBeUndefined() + }) + + test('maps chat-completions models with metadata and deduplication', () => { + expect(mapAimlapiModelCatalog({ + data: [ + { + id: 'gpt-4o', + type: 'openai/chat-completions', + info: { + name: 'GPT 4o', + developer: 'OpenAI', + contextLength: 128000, + }, + }, + { + id: 'gpt-4o', + type: 'openai/chat-completions', + info: { name: 'Duplicate GPT 4o' }, + }, + { + id: 'image-model', + type: 'openai/images', + }, + { + id: 'deepseek-chat', + type: 'openai/chat-completions', + info: { + name: 'DeepSeek Chat', + developer: 'DeepSeek', + }, + }, + ], + })).toEqual([ + { + value: 'gpt-4o', + label: 'GPT 4o', + description: 'OpenAI - 128000 context', + }, + { + value: 'deepseek-chat', + label: 'DeepSeek Chat', + description: 'DeepSeek', + }, + ]) + }) +}) diff --git a/src/providers/aimlapi/index.ts b/src/providers/aimlapi/index.ts new file mode 100644 index 000000000..5df67dcf2 --- /dev/null +++ b/src/providers/aimlapi/index.ts @@ -0,0 +1,129 @@ +export const AIMLAPI_PROVIDER_ID = 'aimlapi' as const +export const AIMLAPI_LABEL = 'AI/ML API' +export const AIMLAPI_DEFAULT_BASE_URL = 'https://api.aimlapi.com/v1' +export const AIMLAPI_DEFAULT_MODEL = 'gpt-4o' +export const AIMLAPI_API_KEY_ENV = 'AIMLAPI_API_KEY' + +export const AIMLAPI_ATTRIBUTION_HEADERS: Record = { + 'HTTP-Referer': 'OpenClaude', + 'X-Title': 'OpenClaude', +} + +export const AIMLAPI_PROVIDER_PRESET_OPTION = { + value: AIMLAPI_PROVIDER_ID, + label: AIMLAPI_LABEL, + description: 'AI/ML API OpenAI-compatible endpoint', +} as const + +export type AimlapiEnv = { + AIMLAPI_API_KEY?: string + OPENAI_API_KEY?: string + OPENAI_BASE_URL?: string +} + +export type AimlapiModelCatalogPayload = { + data?: Array<{ + id?: string + type?: string + info?: { + name?: string + developer?: string + contextLength?: number + } + }> +} + +export type AimlapiModelOption = { + value: string + label: string + description: string +} + +export function isAimlapiBaseUrl(baseUrl: string | undefined): boolean { + if (!baseUrl) return false + try { + const hostname = new URL(baseUrl).hostname.toLowerCase() + return hostname === 'api.aimlapi.com' + } catch { + return false + } +} + +export function getAimlapiApiKey(env: AimlapiEnv = process.env): string { + return env.AIMLAPI_API_KEY ?? env.OPENAI_API_KEY ?? '' +} + +export function getAimlapiOpenAICompatibleApiKey( + baseUrl: string | undefined, + env: AimlapiEnv = process.env, +): string | undefined { + if (!isAimlapiBaseUrl(baseUrl)) return undefined + return env.AIMLAPI_API_KEY +} + +export function getAimlapiAttributionHeaders( + baseUrl: string | undefined, +): Record { + return isAimlapiBaseUrl(baseUrl) ? AIMLAPI_ATTRIBUTION_HEADERS : {} +} + +export function hasAimlapiApiKey( + baseUrl: string | undefined, + env: AimlapiEnv = process.env, +): boolean { + return isAimlapiBaseUrl(baseUrl) && !!env.AIMLAPI_API_KEY?.trim() +} + +export function syncAimlapiOpenAIEnv(env: AimlapiEnv = process.env): void { + if ( + isAimlapiBaseUrl(env.OPENAI_BASE_URL) && + !env.OPENAI_API_KEY && + env.AIMLAPI_API_KEY + ) { + env.OPENAI_API_KEY = env.AIMLAPI_API_KEY + } +} + +export function getAimlapiPresetDefaults(env: AimlapiEnv = process.env) { + return { + provider: 'openai' as const, + name: AIMLAPI_LABEL, + baseUrl: AIMLAPI_DEFAULT_BASE_URL, + model: AIMLAPI_DEFAULT_MODEL, + apiKey: getAimlapiApiKey(env), + requiresApiKey: true, + } +} + +export function mapAimlapiModelCatalog( + payload: AimlapiModelCatalogPayload, +): AimlapiModelOption[] { + const seen = new Set() + const models: AimlapiModelOption[] = [] + + for (const model of payload.data ?? []) { + if (!model.id || seen.has(model.id)) { + continue + } + if (model.type !== 'openai/chat-completions') { + continue + } + + seen.add(model.id) + const details = [ + model.info?.developer, + typeof model.info?.contextLength === 'number' + ? `${model.info.contextLength} context` + : undefined, + ].filter((part): part is string => Boolean(part)) + + models.push({ + value: model.id, + label: model.info?.name || model.id, + description: + details.length > 0 ? details.join(' - ') : `Detected from ${AIMLAPI_LABEL}`, + }) + } + + return models +} diff --git a/src/services/api/bootstrap.ts b/src/services/api/bootstrap.ts index 8e5d4b4d2..489799806 100644 --- a/src/services/api/bootstrap.ts +++ b/src/services/api/bootstrap.ts @@ -17,11 +17,12 @@ import { isEssentialTrafficOnly } from '../../utils/privacyLevel.js' import type { ModelOption } from '../../utils/model/modelOptions.js' import { getLocalOpenAICompatibleProviderLabel, - listOpenAICompatibleModels, + listOpenAICompatibleModelOptions, } from '../../utils/providerDiscovery.js' import { getClaudeCodeUserAgent } from '../../utils/userAgent.js' import { getAdditionalModelOptionsCacheScope, + isAimlapiBaseUrl, resolveProviderRequest, } from './providerConfig.js' @@ -142,13 +143,19 @@ async function fetchLocalOpenAIModelOptions(): Promise ({ - value: model, - label: model, - description: `Detected from ${providerLabel}`, + value: model.value, + label: model.label, + description: + model.description || `Detected from ${providerLabel}`, })), } } diff --git a/src/services/api/openaiShim.test.ts b/src/services/api/openaiShim.test.ts index 5d3cb5525..8db078300 100644 --- a/src/services/api/openaiShim.test.ts +++ b/src/services/api/openaiShim.test.ts @@ -6,6 +6,7 @@ type FetchType = typeof globalThis.fetch const originalEnv = { OPENAI_BASE_URL: process.env.OPENAI_BASE_URL, OPENAI_API_KEY: process.env.OPENAI_API_KEY, + AIMLAPI_API_KEY: process.env.AIMLAPI_API_KEY, OPENAI_MODEL: process.env.OPENAI_MODEL, CLAUDE_CODE_USE_GITHUB: process.env.CLAUDE_CODE_USE_GITHUB, GITHUB_TOKEN: process.env.GITHUB_TOKEN, @@ -75,6 +76,7 @@ beforeEach(() => { process.env.OPENAI_BASE_URL = 'http://example.test/v1' process.env.OPENAI_API_KEY = 'test-key' delete process.env.OPENAI_MODEL + delete process.env.AIMLAPI_API_KEY delete process.env.CLAUDE_CODE_USE_GITHUB delete process.env.GITHUB_TOKEN delete process.env.GH_TOKEN @@ -93,6 +95,7 @@ beforeEach(() => { afterEach(() => { restoreEnv('OPENAI_BASE_URL', originalEnv.OPENAI_BASE_URL) restoreEnv('OPENAI_API_KEY', originalEnv.OPENAI_API_KEY) + restoreEnv('AIMLAPI_API_KEY', originalEnv.AIMLAPI_API_KEY) restoreEnv('OPENAI_MODEL', originalEnv.OPENAI_MODEL) restoreEnv('CLAUDE_CODE_USE_GITHUB', originalEnv.CLAUDE_CODE_USE_GITHUB) restoreEnv('GITHUB_TOKEN', originalEnv.GITHUB_TOKEN) @@ -172,6 +175,99 @@ test('strips canonical Anthropic headers from direct shim defaultHeaders', async expect(capturedHeaders?.get('x-safe-header')).toBe('keep-me') }) +test('adds AI/ML API attribution headers and uses AIMLAPI_API_KEY fallback', async () => { + let capturedHeaders: Headers | undefined + + process.env.OPENAI_BASE_URL = 'https://api.aimlapi.com/v1' + delete process.env.OPENAI_API_KEY + process.env.AIMLAPI_API_KEY = 'aiml-test-key' + + globalThis.fetch = (async (_input, init) => { + capturedHeaders = new Headers(init?.headers) + + return new Response( + JSON.stringify({ + id: 'chatcmpl-aiml', + model: 'gpt-4o', + choices: [ + { + message: { + role: 'assistant', + content: 'hello', + }, + finish_reason: 'stop', + }, + ], + }), + { + headers: { + 'Content-Type': 'application/json', + }, + }, + ) + }) as FetchType + + const client = createOpenAIShimClient({}) as OpenAIShimClient + + await client.beta.messages.create({ + model: 'gpt-4o', + system: 'test system', + messages: [{ role: 'user', content: 'hello' }], + max_tokens: 16, + stream: false, + }) + + expect(capturedHeaders?.get('authorization')).toBe('Bearer aiml-test-key') + expect(capturedHeaders?.get('http-referer')).toBe('OpenClaude') + expect(capturedHeaders?.get('x-title')).toBe('OpenClaude') +}) + +test('does not add AI/ML API attribution headers to other OpenAI-compatible providers', async () => { + let capturedHeaders: Headers | undefined + + process.env.OPENAI_BASE_URL = 'https://openrouter.ai/api/v1' + process.env.OPENAI_API_KEY = 'openrouter-test-key' + + globalThis.fetch = (async (_input, init) => { + capturedHeaders = new Headers(init?.headers) + + return new Response( + JSON.stringify({ + id: 'chatcmpl-openrouter', + model: 'openai/gpt-4o', + choices: [ + { + message: { + role: 'assistant', + content: 'hello', + }, + finish_reason: 'stop', + }, + ], + }), + { + headers: { + 'Content-Type': 'application/json', + }, + }, + ) + }) as FetchType + + const client = createOpenAIShimClient({}) as OpenAIShimClient + + await client.beta.messages.create({ + model: 'openai/gpt-4o', + system: 'test system', + messages: [{ role: 'user', content: 'hello' }], + max_tokens: 16, + stream: false, + }) + + expect(capturedHeaders?.get('authorization')).toBe('Bearer openrouter-test-key') + expect(capturedHeaders?.get('http-referer')).toBeNull() + expect(capturedHeaders?.get('x-title')).toBeNull() +}) + test('strips canonical Anthropic headers from per-request shim headers too', async () => { let capturedHeaders: Headers | undefined diff --git a/src/services/api/openaiShim.ts b/src/services/api/openaiShim.ts index c62b7f211..507aed66d 100644 --- a/src/services/api/openaiShim.ts +++ b/src/services/api/openaiShim.ts @@ -56,6 +56,11 @@ import { resolveProviderRequest, shouldAttemptLocalToollessRetry, } from './providerConfig.js' +import { + getAimlapiAttributionHeaders, + getAimlapiOpenAICompatibleApiKey, + syncAimlapiOpenAIEnv, +} from '../../providers/aimlapi/index.js' import { buildOpenAICompatibilityErrorMessage, classifyOpenAIHttpFailure, @@ -72,6 +77,7 @@ import { createStreamState, processStreamChunk, getStreamStats } from '../../uti type SecretValueSource = Partial<{ OPENAI_API_KEY: string + AIMLAPI_API_KEY: string CODEX_API_KEY: string GEMINI_API_KEY: string GOOGLE_API_KEY: string @@ -1554,6 +1560,7 @@ class OpenAIShimMessages { const apiKey = this.providerOverride?.apiKey ?? process.env.OPENAI_API_KEY ?? + getAimlapiOpenAICompatibleApiKey(request.baseUrl) ?? (isMiniMax ? process.env.MINIMAX_API_KEY : '') // Detect Azure endpoints by hostname (not raw URL) to prevent bypass via // path segments like https://evil.com/cognitiveservices.azure.com/ @@ -1581,6 +1588,8 @@ class OpenAIShimMessages { } } + Object.assign(headers, getAimlapiAttributionHeaders(request.baseUrl)) + if (isGithubCopilot) { Object.assign(headers, COPILOT_HEADERS) } else if (isGithubModels) { @@ -2120,6 +2129,8 @@ export function createOpenAIShimClient(options: { process.env.OPENAI_BASE_URL ??= GITHUB_COPILOT_BASE process.env.OPENAI_API_KEY ??= process.env.GITHUB_TOKEN ?? process.env.GH_TOKEN ?? '' + } else { + syncAimlapiOpenAIEnv(process.env) } const beta = new OpenAIShimBeta({ diff --git a/src/services/api/providerConfig.ts b/src/services/api/providerConfig.ts index 27d55cfa7..3997c435d 100644 --- a/src/services/api/providerConfig.ts +++ b/src/services/api/providerConfig.ts @@ -15,10 +15,16 @@ import { parseChatgptAccountId, } from './codexOAuthShared.js' import { DEFAULT_GEMINI_BASE_URL } from 'src/utils/providerProfile.js' +import { + AIMLAPI_DEFAULT_BASE_URL, + isAimlapiBaseUrl, +} from '../../providers/aimlapi/index.js' +export { isAimlapiBaseUrl } from '../../providers/aimlapi/index.js' export const DEFAULT_OPENAI_BASE_URL = 'https://api.openai.com/v1' export const DEFAULT_CODEX_BASE_URL = 'https://chatgpt.com/backend-api/codex' export const DEFAULT_MISTRAL_BASE_URL = 'https://api.mistral.ai/v1' +export const DEFAULT_AIMLAPI_BASE_URL = AIMLAPI_DEFAULT_BASE_URL /** Default GitHub Copilot API model when user selects copilot / github:copilot */ export const DEFAULT_GITHUB_MODELS_API_MODEL = 'gpt-4o' const warnedUndefinedEnvNames = new Set() @@ -617,7 +623,7 @@ export function getAdditionalModelOptionsCacheScope(): string | null { return null } - if (!isLocalProviderUrl(request.baseUrl)) { + if (!isLocalProviderUrl(request.baseUrl) && !isAimlapiBaseUrl(request.baseUrl)) { return null } diff --git a/src/utils/providerDiscovery.test.ts b/src/utils/providerDiscovery.test.ts index 3fcf5e0bf..1ae730a12 100644 --- a/src/utils/providerDiscovery.test.ts +++ b/src/utils/providerDiscovery.test.ts @@ -60,6 +60,72 @@ test('returns null when a local openai-compatible /models request fails', async ).resolves.toBeNull() }) +test('lists AI/ML API chat completion model options with metadata', async () => { + const { listOpenAICompatibleModelOptions } = await loadProviderDiscoveryModule() + + globalThis.fetch = mock((input, init) => { + const url = typeof input === 'string' ? input : input.url + expect(url).toBe('https://api.aimlapi.com/v1/models') + expect(init?.headers).toEqual({ + 'HTTP-Referer': 'OpenClaude', + 'X-Title': 'OpenClaude', + Authorization: 'Bearer aiml-key', + }) + + return Promise.resolve( + new Response( + JSON.stringify({ + data: [ + { + id: 'gpt-4o', + type: 'openai/responses/submit', + info: { name: 'GPT 4o', developer: 'OpenAI' }, + }, + { + id: 'gpt-4o', + type: 'openai/chat-completions', + info: { + name: 'GPT 4o', + developer: 'OpenAI', + contextLength: 128000, + }, + }, + { + id: 'deepseek-chat', + type: 'openai/chat-completions', + info: { name: 'DeepSeek Chat', developer: 'DeepSeek' }, + }, + { + id: 'image-model', + type: 'openai/images/generations', + info: { name: 'Image Model', developer: 'Example' }, + }, + ], + }), + { status: 200 }, + ), + ) + }) as typeof globalThis.fetch + + await expect( + listOpenAICompatibleModelOptions({ + baseUrl: 'https://api.aimlapi.com/v1', + apiKey: 'aiml-key', + }), + ).resolves.toEqual([ + { + value: 'gpt-4o', + label: 'GPT 4o', + description: 'OpenAI - 128000 context', + }, + { + value: 'deepseek-chat', + label: 'DeepSeek Chat', + description: 'DeepSeek', + }, + ]) +}) + test('detects LM Studio from the default localhost port', async () => { const { getLocalOpenAICompatibleProviderLabel } = await loadProviderDiscoveryModule() @@ -360,4 +426,4 @@ test('atomic chat readiness returns loaded model ids when ready', async () => { state: 'ready', models: ['Qwen3_5-4B_Q4_K_M', 'llama-3.1-8b-instruct'], }) -}) \ No newline at end of file +}) diff --git a/src/utils/providerDiscovery.ts b/src/utils/providerDiscovery.ts index bd0e90c1f..566320425 100644 --- a/src/utils/providerDiscovery.ts +++ b/src/utils/providerDiscovery.ts @@ -1,5 +1,12 @@ import type { OllamaModelDescriptor } from './providerRecommendation.ts' import { DEFAULT_OPENAI_BASE_URL } from '../services/api/providerConfig.js' +import { + AIMLAPI_LABEL, + getAimlapiAttributionHeaders, + isAimlapiBaseUrl, + mapAimlapiModelCatalog, + type AimlapiModelCatalogPayload, +} from '../providers/aimlapi/index.js' export const DEFAULT_OLLAMA_BASE_URL = 'http://localhost:11434' export const DEFAULT_ATOMIC_CHAT_BASE_URL = 'http://127.0.0.1:1337' @@ -152,6 +159,9 @@ export function getLocalOpenAICompatibleProviderLabel(baseUrl?: string): string const path = parsed.pathname.toLowerCase() const haystack = `${hostname} ${path}` + if (isAimlapiBaseUrl(baseUrl)) { + return AIMLAPI_LABEL + } if ( host.endsWith(':1234') || haystack.includes('lmstudio') || @@ -224,17 +234,45 @@ export async function listOpenAICompatibleModels(options?: { baseUrl?: string apiKey?: string }): Promise { + const modelOptions = await listOpenAICompatibleModelOptions(options) + return modelOptions?.map(model => model.value) ?? null +} + +type OpenAICompatibleModelsPayload = AimlapiModelCatalogPayload & { + data?: Array<{ + id?: string + type?: string + info?: { + name?: string + developer?: string + contextLength?: number + } + }> +} + +export type OpenAICompatibleModelOption = { + value: string + label: string + description: string +} + +export async function listOpenAICompatibleModelOptions(options?: { + baseUrl?: string + apiKey?: string +}): Promise { const { signal, clear } = withTimeoutSignal(5000) + const baseUrl = getOpenAICompatibleModelsBaseUrl(options?.baseUrl) + const aimlapi = isAimlapiBaseUrl(baseUrl) + const headers: Record = { + ...getAimlapiAttributionHeaders(baseUrl), + ...(options?.apiKey ? { Authorization: `Bearer ${options.apiKey}` } : {}), + } try { const response = await fetch( - `${getOpenAICompatibleModelsBaseUrl(options?.baseUrl)}/models`, + `${baseUrl}/models`, { method: 'GET', - headers: options?.apiKey - ? { - Authorization: `Bearer ${options.apiKey}`, - } - : undefined, + headers: Object.keys(headers).length > 0 ? headers : undefined, signal, }, ) @@ -242,17 +280,36 @@ export async function listOpenAICompatibleModels(options?: { return null } - const data = (await response.json()) as { - data?: Array<{ id?: string }> + const data = (await response.json()) as OpenAICompatibleModelsPayload + if (aimlapi) { + return mapAimlapiModelCatalog(data) } - return Array.from( - new Set( - (data.data ?? []) - .filter(model => Boolean(model.id)) - .map(model => model.id!), - ), - ) + const seen = new Set() + const models: OpenAICompatibleModelOption[] = [] + + for (const model of data.data ?? []) { + if (!model.id || seen.has(model.id)) { + continue + } + seen.add(model.id) + const details = [ + model.info?.developer, + typeof model.info?.contextLength === 'number' + ? `${model.info.contextLength} context` + : undefined, + ].filter((part): part is string => Boolean(part)) + models.push({ + value: model.id, + label: model.info?.name || model.id, + description: + details.length > 0 + ? details.join(' · ') + : `Detected from ${getLocalOpenAICompatibleProviderLabel(baseUrl)}`, + }) + } + + return models } catch { return null } finally { diff --git a/src/utils/providerProfile.ts b/src/utils/providerProfile.ts index df3aafaf8..642775bb6 100644 --- a/src/utils/providerProfile.ts +++ b/src/utils/providerProfile.ts @@ -50,6 +50,7 @@ const PROFILE_ENV_KEYS = [ 'OPENAI_BASE_URL', 'OPENAI_MODEL', 'OPENAI_API_KEY', + 'AIMLAPI_API_KEY', 'CODEX_API_KEY', 'CODEX_CREDENTIAL_SOURCE', 'CHATGPT_ACCOUNT_ID', @@ -73,6 +74,7 @@ const PROFILE_ENV_KEYS = [ const SECRET_ENV_KEYS = [ 'OPENAI_API_KEY', + 'AIMLAPI_API_KEY', 'CODEX_API_KEY', 'GEMINI_API_KEY', 'GOOGLE_API_KEY', @@ -87,6 +89,7 @@ export type ProfileEnv = { OPENAI_BASE_URL?: string OPENAI_MODEL?: string OPENAI_API_KEY?: string + AIMLAPI_API_KEY?: string CODEX_API_KEY?: string CODEX_CREDENTIAL_SOURCE?: 'oauth' | 'existing' CHATGPT_ACCOUNT_ID?: string @@ -115,6 +118,7 @@ export type ProfileFile = { type SecretValueSource = Partial< Record< | 'OPENAI_API_KEY' + | 'AIMLAPI_API_KEY' | 'CODEX_API_KEY' | 'GEMINI_API_KEY' | 'GOOGLE_API_KEY' diff --git a/src/utils/providerProfiles.test.ts b/src/utils/providerProfiles.test.ts index d27facf50..3ec85d246 100644 --- a/src/utils/providerProfiles.test.ts +++ b/src/utils/providerProfiles.test.ts @@ -22,6 +22,7 @@ const RESTORED_KEYS = [ 'OPENAI_API_BASE', 'OPENAI_MODEL', 'OPENAI_API_KEY', + 'AIMLAPI_API_KEY', 'ANTHROPIC_BASE_URL', 'ANTHROPIC_MODEL', 'ANTHROPIC_API_KEY', @@ -518,6 +519,21 @@ describe('persistActiveProviderProfileModel', () => { }) describe('getProviderPresetDefaults', () => { + test('aimlapi preset defaults to AI/ML API with AIMLAPI_API_KEY fallback', async () => { + const { getProviderPresetDefaults } = await importFreshProviderProfileModules() + process.env.AIMLAPI_API_KEY = 'aiml-test-key' + delete process.env.OPENAI_API_KEY + + const defaults = getProviderPresetDefaults('aimlapi') + + expect(defaults.provider).toBe('openai') + expect(defaults.name).toBe('AI/ML API') + expect(defaults.baseUrl).toBe('https://api.aimlapi.com/v1') + expect(defaults.model).toBe('gpt-4o') + expect(defaults.apiKey).toBe('aiml-test-key') + expect(defaults.requiresApiKey).toBe(true) + }) + test('ollama preset defaults to a local Ollama model', async () => { const { getProviderPresetDefaults } = await importFreshProviderProfileModules() delete process.env.OPENAI_MODEL diff --git a/src/utils/providerProfiles.ts b/src/utils/providerProfiles.ts index 6da783299..f2323cb2f 100644 --- a/src/utils/providerProfiles.ts +++ b/src/utils/providerProfiles.ts @@ -14,8 +14,14 @@ import { buildOpenAIProfileEnv, type ProviderProfile as ProviderProfileStartup, } from './providerProfile.js' +import { normalizeRecommendationGoal } from './providerRecommendation.js' +import { + getAimlapiPresetDefaults, + isAimlapiBaseUrl, +} from '../providers/aimlapi/index.js' export type ProviderPreset = + | 'aimlapi' | 'anthropic' | 'ollama' | 'openai' @@ -139,6 +145,8 @@ export function getProviderPresetDefaults( preset: ProviderPreset, ): ProviderPresetDefaults { switch (preset) { + case 'aimlapi': + return getAimlapiPresetDefaults() case 'anthropic': return { provider: 'anthropic', @@ -529,6 +537,7 @@ export function clearProviderProfileEnvFromProcessEnv( delete processEnv.MISTRAL_API_KEY // Clear provider-specific API keys + delete processEnv.AIMLAPI_API_KEY delete processEnv.MINIMAX_API_KEY delete processEnv.NVIDIA_API_KEY delete processEnv.NVIDIA_NIM @@ -553,6 +562,7 @@ export function applyProviderProfileToProcessEnv(profile: ProviderProfile): void delete process.env.OPENAI_API_BASE delete process.env.OPENAI_MODEL delete process.env.OPENAI_API_KEY + delete process.env.AIMLAPI_API_KEY return } @@ -604,6 +614,9 @@ export function applyProviderProfileToProcessEnv(profile: ProviderProfile): void if (baseUrl.includes('nvidia') || baseUrl.includes('integrate.api.nvidia')) { process.env.NVIDIA_API_KEY = profile.apiKey } + if (isAimlapiBaseUrl(profile.baseUrl)) { + process.env.AIMLAPI_API_KEY = profile.apiKey + } } else { delete process.env.OPENAI_API_KEY } @@ -893,6 +906,7 @@ export function setActiveProviderProfile( // anthropic and all openai-compatible providers return ( buildOpenAIProfileEnv({ + goal: normalizeRecommendationGoal(process.env.OPENCLAUDE_PROFILE_GOAL), model: activeProfile.model, baseUrl: activeProfile.baseUrl, apiKey: activeProfile.apiKey, diff --git a/src/utils/providerSecrets.ts b/src/utils/providerSecrets.ts index 8f90d1636..e66911cd9 100644 --- a/src/utils/providerSecrets.ts +++ b/src/utils/providerSecrets.ts @@ -1,5 +1,6 @@ const SECRET_ENV_KEYS = [ 'OPENAI_API_KEY', + 'AIMLAPI_API_KEY', 'CODEX_API_KEY', 'GEMINI_API_KEY', 'GOOGLE_API_KEY', diff --git a/src/utils/providerValidation.ts b/src/utils/providerValidation.ts index b726ee9de..6d0ea9f24 100644 --- a/src/utils/providerValidation.ts +++ b/src/utils/providerValidation.ts @@ -5,6 +5,12 @@ import { resolveCodexApiCredentials, resolveProviderRequest, } from '../services/api/providerConfig.js' +import { + AIMLAPI_API_KEY_ENV, + AIMLAPI_LABEL, + hasAimlapiApiKey, + isAimlapiBaseUrl, +} from '../providers/aimlapi/index.js' import { getGlobalClaudeFile } from './env.js' import { isBareMode } from './envUtils.js' import { @@ -12,7 +18,10 @@ import { resolveGeminiCredential, } from './geminiAuth.js' import { PROFILE_FILE_NAME } from './providerProfile.js' -import { redactSecretValueForDisplay } from './providerSecrets.js' +import { + type SecretValueSource, + redactSecretValueForDisplay, +} from './providerSecrets.js' function isEnvTruthy(value: string | undefined): boolean { if (!value) return false @@ -65,12 +74,17 @@ function checkGithubTokenStatus( return 'valid' } -function getOpenAIMissingKeyMessage(): string { +function getOpenAIMissingKeyMessage(options?: { + providerLabel?: string + acceptedKeys?: string +}): string { const globalConfigPath = getGlobalClaudeFile() const profilePath = resolve(process.cwd(), PROFILE_FILE_NAME) + const providerLabel = options?.providerLabel ?? 'OpenAI-compatible provider' + const acceptedKeys = options?.acceptedKeys ?? 'OPENAI_API_KEY' return [ - 'OPENAI_API_KEY is required when CLAUDE_CODE_USE_OPENAI=1 and OPENAI_BASE_URL is not local.', + `${acceptedKeys} is required for ${providerLabel} when CLAUDE_CODE_USE_OPENAI=1 and OPENAI_BASE_URL is not local.`, `To recover, run /provider and switch provider, or set CLAUDE_CODE_USE_OPENAI=0 in your shell environment.`, `Saved startup settings can come from ${globalConfigPath} or ${profilePath}.`, ].join('\n') @@ -84,7 +98,7 @@ export async function getProviderValidationError( ) => Promise }, ): Promise { - const secretSource = env + const secretSource = env as SecretValueSource const useOpenAI = isEnvTruthy(env.CLAUDE_CODE_USE_OPENAI) const useGithub = isEnvTruthy(env.CLAUDE_CODE_USE_GITHUB) @@ -149,12 +163,20 @@ export async function getProviderValidationError( return null } - if (!env.OPENAI_API_KEY && !isLocalProviderUrl(request.baseUrl)) { + const hasAimlapiKey = hasAimlapiApiKey(request.baseUrl, env) + if (!env.OPENAI_API_KEY && !hasAimlapiKey && !isLocalProviderUrl(request.baseUrl)) { const hasGithubToken = !!(env.GITHUB_TOKEN?.trim() || env.GH_TOKEN?.trim()) if (useGithub && hasGithubToken) { return null } - return getOpenAIMissingKeyMessage() + return getOpenAIMissingKeyMessage( + isAimlapiBaseUrl(request.baseUrl) + ? { + providerLabel: AIMLAPI_LABEL, + acceptedKeys: `${AIMLAPI_API_KEY_ENV} or OPENAI_API_KEY`, + } + : undefined, + ) } return null