Skip to content
Draft
Show file tree
Hide file tree
Changes from 16 commits
Commits
Show all changes
17 commits
Select commit Hold shift + click to select a range
1c38084
feat(github): add GITHUB_MODEL env var, show resolved model, surface …
LoackyBit Apr 24, 2026
de4c1b2
feat: update GitHub provider to support GITHUB_MODEL environment vari…
LoackyBit Apr 25, 2026
db1f9f3
Merge pull request #1 from LoackyBit/feature/github-model-env-var
LoackyBit Apr 25, 2026
65c00e1
Merge branch 'Gitlawb:main' into main
LoackyBit Apr 25, 2026
c451548
feat: implement dynamic model fetching from GitHub Models API with pr…
LoackyBit Apr 25, 2026
144cc6e
Merge pull request #2 from LoackyBit/feature/github-model-env-var
LoackyBit Apr 25, 2026
b3e1a61
chore(github): remove obsolete claude pseudo-models for github
LoackyBit Apr 25, 2026
d1c1829
feat(github): integrate GitHub model fetching and caching mechanisms
LoackyBit Apr 26, 2026
ee6f94c
refactor: add support for GitHub Models Azure endpoint and implement …
LoackyBit Apr 27, 2026
20599b2
refactor: replace hardcoded Copilot models with dynamic fetching from…
LoackyBit Apr 27, 2026
3f8d867
chore(file): remove local prompt file
LoackyBit Apr 28, 2026
6d02223
feat(copilot): implement github copilot suggestions
LoackyBit Apr 28, 2026
9cc8902
chore(sync): sync PR with upstream
LoackyBit Apr 28, 2026
b717f8f
Merge branch 'main' into main
LoackyBit Apr 28, 2026
a42bf48
fix(models): fix github model list by retrieving code from commit 205…
LoackyBit Apr 29, 2026
29130a8
fix(suggestion): implement @Meetpatel006 suggestions
LoackyBit Apr 29, 2026
faaa5e6
Filter models by model_picker_enabled state
LoackyBit May 1, 2026
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
42 changes: 40 additions & 2 deletions src/commands/model/model.test.tsx
Original file line number Diff line number Diff line change
@@ -1,7 +1,6 @@
import { afterEach, expect, mock, test } from 'bun:test'

import { getAdditionalModelOptionsCacheScope } from '../../services/api/providerConfig.js'
import { getAPIProvider } from '../../utils/model/providers.js'

const originalEnv = {
CLAUDE_CODE_USE_OPENAI: process.env.CLAUDE_CODE_USE_OPENAI,
Expand Down Expand Up @@ -56,7 +55,8 @@ test('opens the model picker without awaiting local model discovery refresh', as

expect(getAdditionalModelOptionsCacheScope()).toBe('openai:http://127.0.0.1:8080/v1')

const { call } = await import('./model.js')
const nonce = `${Date.now()}-${Math.random()}`
const { call } = await import(`./model.js?ts=${nonce}`)
const result = await Promise.race([
call(() => {}, {} as never, ''),
new Promise(resolve => setTimeout(() => resolve('timeout'), 50)),
Expand All @@ -66,3 +66,41 @@ test('opens the model picker without awaiting local model discovery refresh', as

expect(result).not.toBe('timeout')
})

test('awaits GitHub model refresh before opening picker when cache is empty', async () => {
process.env.CLAUDE_CODE_USE_GITHUB = '1'
delete process.env.CLAUDE_CODE_USE_OPENAI
delete process.env.CLAUDE_CODE_USE_GEMINI
delete process.env.CLAUDE_CODE_USE_MISTRAL
delete process.env.CLAUDE_CODE_USE_BEDROCK
delete process.env.CLAUDE_CODE_USE_VERTEX
delete process.env.CLAUDE_CODE_USE_FOUNDRY

let resolveRefresh: (() => void) | undefined
const refreshGithubModelsCache = mock(
() =>
new Promise<void>(resolve => {
resolveRefresh = resolve
}),
)

mock.module('../../utils/model/githubModels.js', () => ({
getCachedGithubModelOptions: () => [],
refreshGithubModelsCache,
}))

const nonce = `${Date.now()}-${Math.random()}`
const { call } = await import(`./model.js?ts=${nonce}`)
const pendingCall = call(() => {}, {} as never, '')
const result = await Promise.race([
pendingCall,
new Promise(resolve => setTimeout(() => resolve('timeout'), 50)),
])

expect(result).toBe('timeout')

resolveRefresh?.()
await pendingCall

expect(refreshGithubModelsCache).toHaveBeenCalled()
})
23 changes: 23 additions & 0 deletions src/commands/model/model.tsx
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,7 @@ import { checkOpus1mAccess, checkSonnet1mAccess } from '../../utils/model/check1
import type { ModelOption } from '../../utils/model/modelOptions.js';
import { discoverOpenAICompatibleModelOptions } from '../../utils/model/openaiModelDiscovery.js';
import { getAPIProvider } from '../../utils/model/providers.js';
import { getCachedGithubModelOptions, refreshGithubModelsCache } from '../../utils/model/githubModels.js';
import { getActiveOpenAIModelOptionsCache, setActiveOpenAIModelOptionsCache } from '../../utils/providerProfiles.js';
import { getDefaultMainLoopModelSetting, isOpus1mMergeEnabled, renderDefaultModelSetting } from '../../utils/model/model.js';
import { isModelAllowed } from '../../utils/model/modelAllowlist.js';
Expand Down Expand Up @@ -301,6 +302,23 @@ async function refreshOpenAIModelOptionsCache(): Promise<void> {
// Keep /model usable even if endpoint discovery fails.
}
}

async function refreshGithubModelOptionsCache(): Promise<void> {
if (getAPIProvider() !== 'github') {
return
}

if (getCachedGithubModelOptions().length > 0) {
return
}

try {
await refreshGithubModelsCache()
} catch {
// Keep /model usable even if Copilot model discovery fails.
}
}

export const call: LocalJSXCommandCall = async (onDone, _context, args) => {
args = args?.trim() || '';
if (COMMON_INFO_ARGS.includes(args)) {
Expand All @@ -324,6 +342,11 @@ export const call: LocalJSXCommandCall = async (onDone, _context, args) => {
if (getAdditionalModelOptionsCacheScope()?.startsWith('openai:')) {
void refreshOpenAIModelOptionsCache();
}

if (getAPIProvider() === 'github') {
await refreshGithubModelOptionsCache()
}

return <ModelPickerWrapper onDone={onDone} />;
};
function renderModelLabel(model: string | null): string {
Expand Down
35 changes: 22 additions & 13 deletions src/components/ProviderManager.tsx
Original file line number Diff line number Diff line change
Expand Up @@ -36,6 +36,7 @@ import {
readGithubModelsToken,
readGithubModelsTokenAsync,
} from '../utils/githubModelsCredentials.js'
import { refreshGithubModelsCache } from '../utils/model/githubModels.js'
import {
probeAtomicChatReadiness,
probeOllamaGenerationReadiness,
Expand Down Expand Up @@ -168,9 +169,8 @@ const FORM_STEPS: Array<{
]

const GITHUB_PROVIDER_ID = '__github_models__'
const GITHUB_PROVIDER_LABEL = 'GitHub Models'
const GITHUB_PROVIDER_DEFAULT_MODEL = 'github:copilot'
const GITHUB_PROVIDER_DEFAULT_BASE_URL = 'https://models.github.ai/inference'
const GITHUB_PROVIDER_LABEL = 'GitHub Copilot'
const GITHUB_PROVIDER_DEFAULT_BASE_URL = 'https://api.githubcopilot.com'
const CODEX_OAUTH_PROVIDER_NAME = 'Codex OAuth'
const CODEX_OAUTH_PROVIDER_MODEL = 'codexplan'

Expand Down Expand Up @@ -260,9 +260,9 @@ function getGithubProviderModel(
processEnv: NodeJS.ProcessEnv = process.env,
): string {
if (isEnvTruthy(processEnv.CLAUDE_CODE_USE_GITHUB)) {
return processEnv.OPENAI_MODEL?.trim() || GITHUB_PROVIDER_DEFAULT_MODEL
return ''
}
return GITHUB_PROVIDER_DEFAULT_MODEL
return ''
}
Comment on lines 259 to 266
Copy link

Copilot AI Apr 29, 2026

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

getGithubProviderModel() now always returns an empty string, so GitHub provider summaries never show the configured model (and the function becomes dead code). If the intent is to hide the model, consider inlining/removing this helper; otherwise, return the resolved model from GITHUB_MODEL/OPENAI_MODEL/settings so the summary remains informative.

Copilot uses AI. Check for mistakes.

function getGithubProviderSummary(
Expand All @@ -277,7 +277,8 @@ function getGithubProviderSummary(
? 'token via env'
: 'no token found'
const activeSuffix = isActive ? ' (active)' : ''
return `github-models · ${GITHUB_PROVIDER_DEFAULT_BASE_URL} · ${getGithubProviderModel(processEnv)} · ${credentialSummary}${activeSuffix}`
const modelSummary = getGithubProviderModel(processEnv)
return `github-copilot · ${GITHUB_PROVIDER_DEFAULT_BASE_URL}${modelSummary ? ` · ${modelSummary}` : ''} · ${credentialSummary}${activeSuffix}`
}

function describeAtomicChatSelectionIssue(
Expand Down Expand Up @@ -627,6 +628,18 @@ export function ProviderManager({ mode, onDone }: Props): React.ReactNode {
}
}, [refreshCodexOAuthCredentialState, refreshGithubProviderState])

React.useEffect(() => {
if (!githubProviderAvailable || githubCredentialSource === 'none') {
return
}

void refreshGithubModelsCache().catch(error => {
setErrorMessage(
`Could not load GitHub models: ${error instanceof Error ? error.message : String(error)}`,
)
})
}, [githubCredentialSource, githubProviderAvailable])

React.useEffect(() => {
if (screen !== 'select-ollama-model') {
return
Expand Down Expand Up @@ -810,7 +823,7 @@ export function ProviderManager({ mode, onDone }: Props): React.ReactNode {

setAppState(prev => ({
...prev,
Comment thread
LoackyBit marked this conversation as resolved.
mainLoopModel: GITHUB_PROVIDER_DEFAULT_MODEL,
mainLoopModel: 'github:copilot',
mainLoopModelForSession: null,
}))
refreshProfiles()
Expand Down Expand Up @@ -903,7 +916,6 @@ export function ProviderManager({ mode, onDone }: Props): React.ReactNode {
const { error } = updateSettingsForSource('userSettings', {
env: {
CLAUDE_CODE_USE_GITHUB: '1',
OPENAI_MODEL: GITHUB_PROVIDER_DEFAULT_MODEL,
OPENAI_API_KEY: undefined as any,
OPENAI_ORG: undefined as any,
OPENAI_PROJECT: undefined as any,
Expand All @@ -922,7 +934,6 @@ export function ProviderManager({ mode, onDone }: Props): React.ReactNode {
}

process.env.CLAUDE_CODE_USE_GITHUB = '1'
process.env.OPENAI_MODEL = GITHUB_PROVIDER_DEFAULT_MODEL
delete process.env.OPENAI_API_KEY
delete process.env.OPENAI_ORG
delete process.env.OPENAI_PROJECT
Expand Down Expand Up @@ -952,7 +963,6 @@ export function ProviderManager({ mode, onDone }: Props): React.ReactNode {
const { error } = updateSettingsForSource('userSettings', {
env: {
CLAUDE_CODE_USE_GITHUB: undefined as any,
OPENAI_MODEL: undefined as any,
OPENAI_BASE_URL: undefined as any,
OPENAI_API_BASE: undefined as any,
},
Expand All @@ -972,7 +982,6 @@ export function ProviderManager({ mode, onDone }: Props): React.ReactNode {

delete process.env.CLAUDE_CODE_USE_GITHUB
delete process.env[GITHUB_MODELS_HYDRATED_ENV_MARKER]
delete process.env.OPENAI_MODEL
delete process.env.OPENAI_API_KEY
delete process.env.OPENAI_ORG
delete process.env.OPENAI_PROJECT
Expand Down Expand Up @@ -1581,7 +1590,7 @@ export function ProviderManager({ mode, onDone }: Props): React.ReactNode {
isGithubCredentialSourceResolved ? (
<Text dimColor>No provider profiles configured yet.</Text>
) : (
<Text dimColor>Checking GitHub Models credentials...</Text>
<Text dimColor>Checking GitHub Copilot credentials...</Text>
)
) : (
<>
Expand Down Expand Up @@ -1701,7 +1710,7 @@ export function ProviderManager({ mode, onDone }: Props): React.ReactNode {
label: isGithubActive
? `${GITHUB_PROVIDER_LABEL} (active)`
: GITHUB_PROVIDER_LABEL,
description: `github-models · ${GITHUB_PROVIDER_DEFAULT_BASE_URL} · ${getGithubProviderModel()}`,
description: `github-copilot · ${GITHUB_PROVIDER_DEFAULT_BASE_URL}`,
})
}

Expand Down
45 changes: 31 additions & 14 deletions src/components/StartupScreen.ts
Original file line number Diff line number Diff line change
Expand Up @@ -7,8 +7,8 @@

import { isLocalProviderUrl, resolveProviderRequest } from '../services/api/providerConfig.js'
import { getLocalOpenAICompatibleProviderLabel } from '../utils/providerDiscovery.js'
import { getSettings_DEPRECATED } from '../utils/settings/settings.js'
import { parseUserSpecifiedModel } from '../utils/model/model.js'
import { getSettingsForSource } from '../utils/settings/settings.js'
import { getPublicModelDisplayName, parseUserSpecifiedModel } from '../utils/model/model.js'
import { containsExactZaiGlmModelId, isZaiBaseUrl } from '../utils/zaiProvider.js'

declare const MACRO: { VERSION: string; DISPLAY_VERSION?: string }
Expand Down Expand Up @@ -85,32 +85,50 @@ const LOGO_CLAUDE = [
// ─── Provider detection ───────────────────────────────────────────────────────

export function detectProvider(modelOverride?: string): { name: string; model: string; baseUrl: string; isLocal: boolean } {
const settings = getSettingsForSource('userSettings') || {}
const useGemini = process.env.CLAUDE_CODE_USE_GEMINI === '1' || process.env.CLAUDE_CODE_USE_GEMINI === 'true'
const useGithub = process.env.CLAUDE_CODE_USE_GITHUB === '1' || process.env.CLAUDE_CODE_USE_GITHUB === 'true'
const useOpenAI = process.env.CLAUDE_CODE_USE_OPENAI === '1' || process.env.CLAUDE_CODE_USE_OPENAI === 'true'
const useMistral = process.env.CLAUDE_CODE_USE_MISTRAL === '1' || process.env.CLAUDE_CODE_USE_MISTRAL === 'true'

if (useGemini) {
const model = modelOverride || process.env.GEMINI_MODEL || 'gemini-2.0-flash'
const model = modelOverride || settings.model || process.env.GEMINI_MODEL || 'gemini-2.0-flash'
const baseUrl = process.env.GEMINI_BASE_URL || 'https://generativelanguage.googleapis.com/v1beta/openai'
return { name: 'Google Gemini', model, baseUrl, isLocal: false }
return {
name: 'Google Gemini',
model: getPublicModelDisplayName(model) ?? model,
baseUrl,
isLocal: false,
}
}

if (useMistral) {
const model = modelOverride || process.env.MISTRAL_MODEL || 'devstral-latest'
const model = modelOverride || settings.model || process.env.MISTRAL_MODEL || 'devstral-latest'
const baseUrl = process.env.MISTRAL_BASE_URL || 'https://api.mistral.ai/v1'
return { name: 'Mistral', model, baseUrl, isLocal: false }
return {
name: 'Mistral',
model: getPublicModelDisplayName(model) ?? model,
baseUrl,
isLocal: false,
}
}

if (useGithub) {
const model = modelOverride || process.env.OPENAI_MODEL || 'github:copilot'
const baseUrl =
process.env.OPENAI_BASE_URL || 'https://api.githubcopilot.com'
return { name: 'GitHub Copilot', model, baseUrl, isLocal: false }
const rawModel = process.env.OPENAI_MODEL?.trim() || 'github:copilot'
const resolvedRequest = resolveProviderRequest({
model: rawModel,
baseUrl: process.env.OPENAI_BASE_URL,
})
Comment thread
LoackyBit marked this conversation as resolved.
Comment on lines 116 to +121
Copy link

Copilot AI Apr 29, 2026

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

In GitHub mode, the startup screen ignores modelOverride, settings.model, and the new GITHUB_MODEL env var, and instead reads OPENAI_MODEL directly. This can display the wrong model (and will always show the fallback when ProviderManager no longer sets OPENAI_MODEL). Prefer the same resolution order used elsewhere: modelOverride || settings.model || process.env.GITHUB_MODEL || process.env.OPENAI_MODEL || 'github:copilot', then pass that through resolveProviderRequest.

Copilot uses AI. Check for mistakes.
const baseUrl = resolvedRequest.baseUrl
let displayModel = resolvedRequest.resolvedModel
if (resolvedRequest.reasoning?.effort) {
displayModel = `${displayModel} (${resolvedRequest.reasoning.effort})`
}
return { name: 'GitHub Copilot', model: displayModel, baseUrl, isLocal: false }
}

if (useOpenAI) {
const rawModel = modelOverride || process.env.OPENAI_MODEL || 'gpt-4o'
const rawModel = modelOverride || settings.model || process.env.OPENAI_MODEL || 'gpt-4o'
const resolvedRequest = resolveProviderRequest({
model: rawModel,
baseUrl: process.env.OPENAI_BASE_URL,
Expand Down Expand Up @@ -158,7 +176,7 @@ export function detectProvider(modelOverride?: string): { name: string; model: s
else if (isLocal) name = getLocalOpenAICompatibleProviderLabel(baseUrl)

// Resolve model alias to actual model name + reasoning effort
let displayModel = resolvedRequest.resolvedModel
let displayModel = getPublicModelDisplayName(resolvedRequest.resolvedModel) ?? resolvedRequest.resolvedModel
if (resolvedRequest.reasoning?.effort) {
displayModel = `${displayModel} (${resolvedRequest.reasoning.effort})`
}
Expand All @@ -167,12 +185,11 @@ export function detectProvider(modelOverride?: string): { name: string; model: s
}

// Default: Anthropic - check settings.model first, then env vars
const settings = getSettings_DEPRECATED() || {}
const modelSetting = modelOverride || settings.model || process.env.ANTHROPIC_MODEL || process.env.CLAUDE_MODEL || 'claude-sonnet-4-6'
const resolvedModel = parseUserSpecifiedModel(modelSetting)
const baseUrl = process.env.ANTHROPIC_BASE_URL ?? 'https://api.anthropic.com'
const isLocal = isLocalProviderUrl(baseUrl)
return { name: 'Anthropic', model: resolvedModel, baseUrl, isLocal }
return { name: 'Anthropic', model: getPublicModelDisplayName(resolvedModel) ?? resolvedModel, baseUrl, isLocal }
}

// ─── Box drawing ──────────────────────────────────────────────────────────────
Expand Down
10 changes: 9 additions & 1 deletion src/cost-tracker.ts
Original file line number Diff line number Diff line change
Expand Up @@ -50,11 +50,13 @@ import {
getContextWindowForModel,
getModelMaxOutputTokens,
} from './utils/context.js'
import { isEnvTruthy } from './utils/envUtils.js'
import { isFastModeEnabled } from './utils/fastMode.js'
import { formatDuration, formatNumber } from './utils/format.js'
import type { FpsMetrics } from './utils/fpsTracker.js'
import { getCanonicalName } from './utils/model/model.js'
import { calculateUSDCost } from './utils/modelCost.js'
import { formatGithubRateLimitSummary } from './utils/githubRateLimit.js'
export {
getTotalCostUSD as getTotalCost,
getTotalDuration,
Expand Down Expand Up @@ -257,12 +259,18 @@ export function formatTotalCost(): string {

const modelUsageDisplay = formatModelUsage()

// Append GitHub rate-limit summary if available
const rateLimitLine = isEnvTruthy(process.env.CLAUDE_CODE_USE_GITHUB)
? formatGithubRateLimitSummary()
: null
const rateLimitDisplay = rateLimitLine ? `\n${rateLimitLine}` : ''

return chalk.dim(
`Total cost: ${costDisplay}\n` +
`Total duration (API): ${formatDuration(getTotalAPIDuration())}
Total duration (wall): ${formatDuration(getTotalDuration())}
Total code changes: ${getTotalLinesAdded()} ${getTotalLinesAdded() === 1 ? 'line' : 'lines'} added, ${getTotalLinesRemoved()} ${getTotalLinesRemoved() === 1 ? 'line' : 'lines'} removed
${modelUsageDisplay}`,
${modelUsageDisplay}${rateLimitDisplay}`,
)
}

Expand Down
Loading
Loading