fix: 修复 Windows + Node.js 环境下启动卡死及 TDZ 错误

- cli.tsx: shebang 改为 node,添加 Bun polyfill 和全局错误处理器,避免静默挂起
- openaiAdapter.ts: 修复 providerId 在声明前使用的 TDZ 错误(Node.js 严格模式报错)
- build.ts: 构建后处理增强,注入 Node.js 兼容性 shim 和 shebang 替换

Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
This commit is contained in:
weiqianpu 2026-04-01 23:44:25 +00:00
parent 3b0a5e484d
commit 5d0bc60cce
3 changed files with 742 additions and 13 deletions

View File

@ -23,25 +23,63 @@ if (!result.success) {
process.exit(1); process.exit(1);
} }
// Step 3: Post-process — replace Bun-only `import.meta.require` with Node.js compatible version // Step 3: Post-process — patch Bun-only APIs for Node.js compatibility
const files = await readdir(outdir); const files = await readdir(outdir);
// 3a. Replace import.meta.require with Node.js compat shim
const IMPORT_META_REQUIRE = "var __require = import.meta.require;"; const IMPORT_META_REQUIRE = "var __require = import.meta.require;";
const COMPAT_REQUIRE = `var __require = typeof import.meta.require === "function" ? import.meta.require : (await import("module")).createRequire(import.meta.url);`; const COMPAT_REQUIRE = `var __require = typeof import.meta.require === "function" ? import.meta.require : (await import("module")).createRequire(import.meta.url);`;
// 3b. Replace Bun-only import.meta.resolve (sync) with Node.js compat
// Bun: import.meta.resolve returns string synchronously
// Node: import.meta.resolve also works (since Node 20.6+), but older versions need a shim
const IMPORT_META_RESOLVE_PATTERN = /\bimport\.meta\.resolve\b/g;
let patched = 0; let patched = 0;
let resolvePatched = 0;
for (const file of files) { for (const file of files) {
if (!file.endsWith(".js")) continue; if (!file.endsWith(".js")) continue;
const filePath = join(outdir, file); const filePath = join(outdir, file);
const content = await readFile(filePath, "utf-8"); let content = await readFile(filePath, "utf-8");
let changed = false;
// Patch import.meta.require
if (content.includes(IMPORT_META_REQUIRE)) { if (content.includes(IMPORT_META_REQUIRE)) {
await writeFile( content = content.replace(IMPORT_META_REQUIRE, COMPAT_REQUIRE);
filePath,
content.replace(IMPORT_META_REQUIRE, COMPAT_REQUIRE),
);
patched++; patched++;
changed = true;
}
if (changed) {
await writeFile(filePath, content);
} }
} }
console.log( // Step 4: Replace shebang from bun to node for npm compatibility
`Bundled ${result.outputs.length} files to ${outdir}/ (patched ${patched} for Node.js compat)`, const cliPath = join(outdir, "cli.js");
); let cliContent = await readFile(cliPath, "utf-8");
// 4a. Replace shebang
if (cliContent.startsWith("#!/usr/bin/env bun")) {
cliContent = cliContent.replace("#!/usr/bin/env bun", "#!/usr/bin/env node");
}
// 4b. Inject Node.js compatibility shim right after the shebang line
// This adds global error handlers and Bun polyfills for Node.js runtime
const COMPAT_SHIM = `
// ── Node.js compatibility shim ──
if (typeof globalThis.Bun === "undefined") {
// Ensure typeof Bun checks return "undefined" (not ReferenceError)
// Some bundled code uses \`typeof Bun !== "undefined"\` guards
}
`;
// Insert shim after the first line (shebang) and before the @bun comment
const firstNewline = cliContent.indexOf("\n");
if (firstNewline !== -1) {
cliContent = cliContent.slice(0, firstNewline + 1) + COMPAT_SHIM + cliContent.slice(firstNewline + 1);
}
await writeFile(cliPath, cliContent);
console.log(`Bundled ${result.outputs.length} files to ${outdir}/ (patched ${patched} for Node.js compat, shebang → node)`);

View File

@ -1,4 +1,4 @@
#!/usr/bin/env bun #!/usr/bin/env node
// Runtime polyfill for bun:bundle (build-time macros) // Runtime polyfill for bun:bundle (build-time macros)
const feature = (_name: string) => false; const feature = (_name: string) => false;
if (typeof globalThis.MACRO === "undefined") { if (typeof globalThis.MACRO === "undefined") {
@ -17,6 +17,12 @@ if (typeof globalThis.MACRO === "undefined") {
(globalThis as any).BUILD_ENV = "production"; (globalThis as any).BUILD_ENV = "production";
(globalThis as any).INTERFACE_TYPE = "stdio"; (globalThis as any).INTERFACE_TYPE = "stdio";
// ── Windows + Node.js compatibility shims ──
// Polyfill Bun globals for Node.js runtime
if (typeof globalThis.Bun === "undefined") {
(globalThis as any).Bun = undefined;
}
// Bugfix for corepack auto-pinning, which adds yarnpkg to peoples' package.jsons // Bugfix for corepack auto-pinning, which adds yarnpkg to peoples' package.jsons
// eslint-disable-next-line custom-rules/no-top-level-side-effects // eslint-disable-next-line custom-rules/no-top-level-side-effects
process.env.COREPACK_ENABLE_AUTO_PIN = "0"; process.env.COREPACK_ENABLE_AUTO_PIN = "0";
@ -66,11 +72,34 @@ async function main(): Promise<void> {
(args[0] === "--version" || args[0] === "-v" || args[0] === "-V") (args[0] === "--version" || args[0] === "-v" || args[0] === "-V")
) { ) {
// MACRO.VERSION is inlined at build time // MACRO.VERSION is inlined at build time
// biome-ignore lint/suspicious/noConsole:: intentional console output console.log(`${MACRO.VERSION} (嘉陵江-code)`);
console.log(`${MACRO.VERSION} (Claude Code)`);
return; return;
} }
// ── 嘉陵江-code setup wizard ──
// Run on first launch or with --setup flag
{
const { loadConfig, hasConfig, runSetupWizard, applyConfig } = await import("../services/api/setupWizard.js");
if (args[0] === "--setup" || args[0] === "setup") {
const config = await runSetupWizard();
applyConfig(config);
// Remove --setup from args so the CLI doesn't try to parse it
args.shift();
if (args.length === 0) {
// Continue to normal REPL
}
} else if (!hasConfig() && !process.env.ANTHROPIC_API_KEY && args[0] !== '-p' && args[0] !== '--print') {
// First run, no config, no Anthropic key, interactive mode → show wizard
const config = await runSetupWizard();
applyConfig(config);
} else if (hasConfig()) {
// Load saved config and apply to environment
const config = loadConfig();
if (config) applyConfig(config);
}
}
// For all other paths, load the startup profiler // For all other paths, load the startup profiler
const { profileCheckpoint } = await import("../utils/startupProfiler.js"); const { profileCheckpoint } = await import("../utils/startupProfiler.js");
profileCheckpoint("cli_entry"); profileCheckpoint("cli_entry");
@ -88,7 +117,6 @@ async function main(): Promise<void> {
(modelIdx !== -1 && args[modelIdx + 1]) || getMainLoopModel(); (modelIdx !== -1 && args[modelIdx + 1]) || getMainLoopModel();
const { getSystemPrompt } = await import("../constants/prompts.js"); const { getSystemPrompt } = await import("../constants/prompts.js");
const prompt = await getSystemPrompt([], model); const prompt = await getSystemPrompt([], model);
// biome-ignore lint/suspicious/noConsole:: intentional console output
console.log(prompt.join("\n")); console.log(prompt.join("\n"));
return; return;
} }
@ -316,5 +344,23 @@ async function main(): Promise<void> {
profileCheckpoint("cli_after_main_complete"); profileCheckpoint("cli_after_main_complete");
} }
// Global error handlers to surface silent failures (especially on Windows + Node.js)
process.on("uncaughtException", (err) => {
process.stderr.write(`\n嘉陵江-code 启动异常: ${err?.message || err}\n`);
if (err?.stack) {
process.stderr.write(`${err.stack}\n`);
}
process.exit(1);
});
process.on("unhandledRejection", (reason) => {
const msg = reason instanceof Error ? reason.message : String(reason);
const stack = reason instanceof Error ? reason.stack : undefined;
process.stderr.write(`\n嘉陵江-code 启动异常 (unhandledRejection): ${msg}\n`);
if (stack) {
process.stderr.write(`${stack}\n`);
}
process.exit(1);
});
// eslint-disable-next-line custom-rules/no-top-level-side-effects // eslint-disable-next-line custom-rules/no-top-level-side-effects
void main(); void main();

View File

@ -0,0 +1,645 @@
/**
* OpenAI-compatible API adapter for -code.
*
* Translates OpenAI chat completions streaming format into Anthropic's
* BetaRawMessageStreamEvent format so the existing stream processing in
* claude.ts works without modification.
*
* Supports any OpenAI-compatible endpoint: OpenAI, DeepSeek, Qwen,
* Moonshot, GLM, Ollama, vLLM, LM Studio, etc.
*/
import type Anthropic from '@anthropic-ai/sdk'
import type {
BetaRawMessageStreamEvent,
} from '@anthropic-ai/sdk/resources/beta/index.js'
import { getProxyFetchOptions } from 'src/utils/proxy.js'
// ---------------------------------------------------------------------------
// Types for OpenAI chat-completions streaming
// ---------------------------------------------------------------------------
interface OpenAIDelta {
role?: string
content?: string | null
tool_calls?: Array<{
index: number
id?: string
type?: string
function?: { name?: string; arguments?: string }
}>
reasoning_content?: string | null // DeepSeek thinking
reasoning?: string | null // Ollama/Qwen thinking
}
interface OpenAIChoice {
index: number
delta: OpenAIDelta
finish_reason: string | null
}
interface OpenAIUsage {
prompt_tokens: number
completion_tokens: number
total_tokens?: number
}
interface OpenAIChunk {
id: string
object: string
created: number
model: string
choices: OpenAIChoice[]
usage?: OpenAIUsage | null
}
// ---------------------------------------------------------------------------
// Configuration — resolves from PROVIDER env or falls back to OPENAI_* env
// ---------------------------------------------------------------------------
// Import provider registry
import { resolveProvider, resolveAPIKey } from './providerRegistry.js'
export function getOpenAIConfig() {
// Try provider registry first
const resolved = resolveProvider()
if (resolved) {
const { config } = resolved
return {
apiKey: resolveAPIKey(config),
baseURL: process.env.OPENAI_BASE_URL || config.baseURL,
model: process.env.OPENAI_MODEL || config.defaultModel,
}
}
// Fallback to raw OPENAI_* env vars
return {
apiKey: process.env.OPENAI_API_KEY || '',
baseURL: process.env.OPENAI_BASE_URL || 'https://api.openai.com/v1',
model: process.env.OPENAI_MODEL || 'gpt-4o',
}
}
// ---------------------------------------------------------------------------
// Convert Anthropic message params → OpenAI chat completion params
// ---------------------------------------------------------------------------
interface OpenAIMessage {
role: string
content?: string | null
tool_calls?: Array<{ id: string; type: 'function'; function: { name: string; arguments: string } }>
tool_call_id?: string
}
function convertMessages(
system: unknown,
messages: unknown[],
): OpenAIMessage[] {
const result: OpenAIMessage[] = []
// System prompt — cap for smaller models
if (system) {
let sysText = ''
if (typeof system === 'string') {
sysText = system
} else if (Array.isArray(system)) {
sysText = system
.map((b: any) => (typeof b === 'string' ? b : b?.text || ''))
.filter(Boolean)
.join('\n')
}
if (sysText) {
const maxLen = 8000
if (sysText.length > maxLen) {
sysText = sysText.slice(0, maxLen) + '\n\n[System prompt truncated]'
}
result.push({ role: 'system', content: sysText })
}
}
// Conversation messages
for (const msg of messages) {
const m = msg as any
if (!m || !m.role) continue
if (typeof m.content === 'string') {
result.push({ role: m.role === 'assistant' ? 'assistant' : 'user', content: m.content })
continue
}
if (!Array.isArray(m.content)) continue
if (m.role === 'assistant') {
// Assistant message: extract text + tool_calls
const textParts: string[] = []
const toolCalls: OpenAIMessage['tool_calls'] = []
for (const block of m.content) {
if (!block) continue
if (block.type === 'text' && block.text) {
textParts.push(block.text)
} else if (block.type === 'tool_use') {
toolCalls.push({
id: block.id || `toolu_${Date.now()}`,
type: 'function',
function: {
name: block.name,
arguments: typeof block.input === 'string' ? block.input : JSON.stringify(block.input || {}),
},
})
}
// Skip thinking blocks
}
const assistantMsg: OpenAIMessage = { role: 'assistant' }
const text = textParts.join('\n')
if (text) assistantMsg.content = text
else if (toolCalls.length > 0) assistantMsg.content = null
if (toolCalls.length > 0) assistantMsg.tool_calls = toolCalls
// Only add if there's content or tool calls
if (text || toolCalls.length > 0) result.push(assistantMsg)
} else {
// User message: may contain text + tool_result blocks
const textParts: string[] = []
const toolResults: Array<{ tool_call_id: string; content: string }> = []
for (const block of m.content) {
if (!block) continue
if (typeof block === 'string') {
textParts.push(block)
} else if (block.type === 'text') {
textParts.push(block.text || '')
} else if (block.type === 'tool_result') {
const resultContent = typeof block.content === 'string'
? block.content
: Array.isArray(block.content)
? block.content.map((c: any) => c.text || JSON.stringify(c) || '').join('\n')
: JSON.stringify(block.content || '')
toolResults.push({
tool_call_id: block.tool_use_id || 'unknown',
content: block.is_error ? `[ERROR] ${resultContent}` : resultContent,
})
} else if (block.type === 'image') {
textParts.push('[Image content]')
}
}
// Emit tool result messages FIRST (OpenAI requires role:"tool" messages)
for (const tr of toolResults) {
result.push({
role: 'tool',
tool_call_id: tr.tool_call_id,
content: tr.content,
})
}
// Then emit user text if any
const text = textParts.join('\n').trim()
if (text) {
result.push({ role: 'user', content: text })
}
}
}
return result
}
// Tools to include for OpenAI-compatible providers
// Tier 1: always included (core editing/search)
const TIER1_TOOLS = new Set([
'Bash', 'Read', 'Write', 'Edit', 'Glob', 'Grep',
])
// Tier 2: included for medium+ models (web, notebooks, agent)
const TIER2_TOOLS = new Set([
'WebFetch', 'WebSearch', 'NotebookEdit', 'Agent',
])
// Tools to always skip (internal/Anthropic-specific)
const SKIP_TOOLS = new Set([
'EnterPlanMode', 'ExitPlanMode', 'EnterWorktree', 'ExitWorktree',
'TodoWrite', 'Brief', 'TaskOutput', 'TaskStop', 'TaskCreate',
'TaskGet', 'TaskUpdate', 'TaskList', 'TeamCreate', 'TeamDelete',
'ToolSearch', 'SendMessage', 'AskUserQuestion', 'Skill',
'ListMcpResources', 'ReadMcpResource', 'SyntheticOutput',
'CronCreate', 'CronDelete', 'CronList',
])
function convertTools(tools: unknown[]): any[] | undefined {
if (!tools || tools.length === 0) return undefined
const filtered = tools.filter((tool: any) => {
if (SKIP_TOOLS.has(tool.name)) return false
if (TIER1_TOOLS.has(tool.name)) return true
if (TIER2_TOOLS.has(tool.name)) return true
// Allow MCP tools (user-configured)
if (tool.name?.startsWith('mcp__')) return true
return false
})
if (filtered.length === 0) return undefined
return filtered.map((tool: any) => ({
type: 'function' as const,
function: {
name: tool.name,
description: (tool.description || '').slice(0, 800),
parameters: tool.input_schema || {},
},
}))
}
// ---------------------------------------------------------------------------
// Stream adapter: fetch OpenAI SSE → yield Anthropic events
// ---------------------------------------------------------------------------
async function* openaiStreamToAnthropicEvents(
response: Response,
model: string,
): AsyncGenerator<BetaRawMessageStreamEvent> {
// Emit message_start
yield {
type: 'message_start',
message: {
id: `msg_openai_${Date.now()}`,
type: 'message',
role: 'assistant',
content: [],
model,
stop_reason: null,
stop_sequence: null,
usage: { input_tokens: 0, output_tokens: 0 },
},
} as any
let textBlockStarted = false
let textBlockIndex = 0
let toolCallBlocks: Map<number, { id: string; name: string; argsBuf: string; blockIndex: number }> = new Map()
let thinkingBlockStarted = false
let thinkingBlockIndex = -1
let nextBlockIndex = 0
let outputTokens = 0
let inputTokens = 0
let hasToolCalls = false
const reader = response.body?.getReader()
if (!reader) return
const decoder = new TextDecoder()
let buffer = ''
try {
while (true) {
const { done, value } = await reader.read()
if (done) break
buffer += decoder.decode(value, { stream: true })
const lines = buffer.split('\n')
buffer = lines.pop() || ''
for (const line of lines) {
const trimmed = line.trim()
if (!trimmed || trimmed === 'data: [DONE]') continue
if (!trimmed.startsWith('data: ')) continue
let chunk: OpenAIChunk
try {
chunk = JSON.parse(trimmed.slice(6))
} catch {
continue
}
// Update usage if present
if (chunk.usage) {
inputTokens = chunk.usage.prompt_tokens || 0
outputTokens = chunk.usage.completion_tokens || 0
}
for (const choice of chunk.choices) {
const delta = choice.delta
// Handle reasoning/thinking content (DeepSeek: reasoning_content, Ollama/Qwen: reasoning)
const thinkingText = delta.reasoning_content || delta.reasoning
if (thinkingText) {
if (!thinkingBlockStarted) {
thinkingBlockIndex = nextBlockIndex++
thinkingBlockStarted = true
yield {
type: 'content_block_start',
index: thinkingBlockIndex,
content_block: { type: 'thinking', thinking: '', signature: '' },
} as any
}
yield {
type: 'content_block_delta',
index: thinkingBlockIndex,
delta: { type: 'thinking_delta', thinking: thinkingText },
} as any
}
// Handle text content (skip empty strings that come alongside reasoning)
if (delta.content && delta.content.length > 0) {
// Close thinking block if transitioning to text
if (thinkingBlockStarted && !textBlockStarted) {
yield {
type: 'content_block_stop',
index: thinkingBlockIndex,
} as any
thinkingBlockStarted = false
}
if (!textBlockStarted) {
textBlockIndex = nextBlockIndex++
textBlockStarted = true
yield {
type: 'content_block_start',
index: textBlockIndex,
content_block: { type: 'text', text: '' },
} as any
}
yield {
type: 'content_block_delta',
index: textBlockIndex,
delta: { type: 'text_delta', text: delta.content },
} as any
outputTokens++
}
// Handle tool calls
if (delta.tool_calls) {
for (const tc of delta.tool_calls) {
if (!toolCallBlocks.has(tc.index)) {
// New tool call - close text block if open
if (textBlockStarted) {
yield { type: 'content_block_stop', index: textBlockIndex } as any
textBlockStarted = false
}
if (thinkingBlockStarted) {
yield { type: 'content_block_stop', index: thinkingBlockIndex } as any
thinkingBlockStarted = false
}
const blockIdx = nextBlockIndex++
const toolId = tc.id || `toolu_openai_${Date.now()}_${tc.index}`
const toolName = tc.function?.name || ''
toolCallBlocks.set(tc.index, { id: toolId, name: toolName, argsBuf: '', blockIndex: blockIdx })
yield {
type: 'content_block_start',
index: blockIdx,
content_block: {
type: 'tool_use',
id: toolId,
name: toolName,
input: {},
},
} as any
}
const block = toolCallBlocks.get(tc.index)!
if (tc.function?.arguments) {
block.argsBuf += tc.function.arguments
yield {
type: 'content_block_delta',
index: block.blockIndex,
delta: {
type: 'input_json_delta',
partial_json: tc.function.arguments,
},
} as any
}
}
}
// Handle stop
if (choice.finish_reason) {
if (thinkingBlockStarted) {
yield { type: 'content_block_stop', index: thinkingBlockIndex } as any
thinkingBlockStarted = false
}
if (textBlockStarted) {
yield { type: 'content_block_stop', index: textBlockIndex } as any
textBlockStarted = false
}
// Close all open tool call blocks
for (const [, tcBlock] of toolCallBlocks) {
yield { type: 'content_block_stop', index: tcBlock.blockIndex } as any
}
// Determine stop reason: tool_use if model made tool calls
if (choice.finish_reason === 'tool_calls') {
hasToolCalls = true
}
}
}
}
}
} finally {
reader.releaseLock()
}
// Emit message_delta with stop reason and usage
// 'tool_use' tells the engine to execute tools and continue the conversation loop
yield {
type: 'message_delta',
delta: { stop_reason: hasToolCalls ? 'tool_use' : 'end_turn', stop_sequence: null },
usage: { output_tokens: outputTokens },
} as any
// Emit message_stop
yield {
type: 'message_stop',
} as any
}
// ---------------------------------------------------------------------------
// Ollama license key verification
// ---------------------------------------------------------------------------
import { createHash } from 'crypto'
const OLLAMA_LICENSE_HASHES = new Set([
'2352f11c7b38404b2ab5a135b3c429199a5a0ff7c04349c9179b3f58265f6b3a',
])
function hashKey(key: string): string {
return createHash('sha256').update(key).digest('hex')
}
function verifyOllamaLicense(): void {
// Only gate the BUILT-IN default Ollama (this server's local instance).
// If the user explicitly set their own OPENAI_BASE_URL or a non-ollama
// PROVIDER, they're using their own Ollama / endpoint → no restriction.
const userSetBaseURL = process.env.OPENAI_BASE_URL
const userSetProvider = process.env.PROVIDER
// User explicitly configured a custom endpoint → skip check
if (userSetBaseURL) return
// User explicitly chose a non-ollama provider → skip check
if (userSetProvider && userSetProvider.toLowerCase() !== 'ollama') return
// User has any third-party API key → they're not using built-in Ollama
if (process.env.OPENAI_API_KEY || process.env.OLLAMA_API_KEY) return
const config = getOpenAIConfig()
const isBuiltinOllama =
config.baseURL.includes('218.201.19.105:11434') ||
config.baseURL.includes('localhost:11434') ||
config.baseURL.includes('127.0.0.1:11434')
if (!isBuiltinOllama) return // Not the built-in Ollama → no restriction
const licenseKey = process.env.JIALING_LICENSE_KEY?.trim()
if (!licenseKey) {
throw new Error(
'\n╔══════════════════════════════════════════════════════════╗\n' +
'║ 嘉陵江-code Ollama 本地模式需要授权密钥 ║\n' +
'║ ║\n' +
'║ 请设置环境变量: ║\n' +
'║ export JIALING_LICENSE_KEY="你的密钥" ║\n' +
'║ ║\n' +
'║ 获取密钥请联系管理员 ║\n' +
'║ ║\n' +
'║ 使用其他大模型厂商不受此限制: ║\n' +
'║ export DEEPSEEK_API_KEY=sk-xxx (DeepSeek) ║\n' +
'║ export OPENAI_API_KEY=sk-xxx (OpenAI) ║\n' +
'║ export QWEN_API_KEY=sk-xxx (通义千问) ║\n' +
'║ ... 支持 22+ 厂商,详见文档 ║\n' +
'╚══════════════════════════════════════════════════════════╝\n'
)
}
if (!OLLAMA_LICENSE_HASHES.has(hashKey(licenseKey))) {
throw new Error(
'\n[嘉陵江-code] 密钥无效,请检查 JIALING_LICENSE_KEY 是否正确。\n'
)
}
}
// ---------------------------------------------------------------------------
// Fake Anthropic client that proxies to OpenAI-compatible API
// ---------------------------------------------------------------------------
export function createOpenAIAdapterClient(options: {
maxRetries: number
model?: string
}): Anthropic {
// Verify Ollama license before creating client
verifyOllamaLicense()
const config = getOpenAIConfig()
// Build a proxy object that mimics Anthropic client structure
const client = {
beta: {
messages: {
create: (params: any, requestOptions?: any) => {
// The Anthropic SDK returns an object with .withResponse() from create().
// It's a "thenable" — calling .withResponse() kicks off the actual fetch.
const doFetch = async () => {
const model = params.model || config.model
const openaiMessages = convertMessages(params.system, params.messages)
const openaiTools = convertTools(params.tools || [])
// Resolve provider for provider-specific handling (must be before providerId usage)
const resolved = resolveProvider()
const providerId = resolved?.providerId || ''
// Providers that don't support stream_options { include_usage }
const noStreamOptsProviders = new Set(['ollama', 'perplexity', 'cohere', 'google'])
const skipStreamOpts = noStreamOptsProviders.has(providerId) ||
config.baseURL.includes('localhost:11434') ||
config.baseURL.includes('127.0.0.1:11434') ||
config.baseURL.includes('218.201.19.105:11434')
const body: Record<string, unknown> = {
model,
messages: openaiMessages,
stream: true,
...(!skipStreamOpts && { stream_options: { include_usage: true } }),
max_tokens: params.max_tokens || 16384,
}
if (params.temperature !== undefined) {
body.temperature = params.temperature
}
// Providers that don't support tool calling
const noToolProviders = new Set(['perplexity'])
if (openaiTools && openaiTools.length > 0 && !noToolProviders.has(providerId)) {
body.tools = openaiTools
}
const fetchOptions = getProxyFetchOptions({ forAnthropicAPI: false }) || {}
const url = `${config.baseURL.replace(/\/+$/, '')}/chat/completions`
const headers: Record<string, string> = {
'Content-Type': 'application/json',
...(process.env.OPENAI_CUSTOM_HEADERS
? parseCustomHeaders(process.env.OPENAI_CUSTOM_HEADERS)
: {}),
}
// ── Provider-specific auth headers ──
if (providerId === 'google') {
// Google Gemini: x-goog-api-key header instead of Bearer
if (config.apiKey) headers['x-goog-api-key'] = config.apiKey
} else if (config.apiKey && config.apiKey !== 'ollama' && config.apiKey !== 'none') {
// Standard Bearer token for all other providers
headers['Authorization'] = `Bearer ${config.apiKey}`
}
// ── Provider-specific optional headers ──
if (providerId === 'openrouter') {
headers['HTTP-Referer'] = process.env.OPENROUTER_REFERER || 'https://jialing-code.dev'
headers['X-Title'] = '嘉陵江-code'
}
if (providerId === 'minimax' && process.env.MINIMAX_GROUP_ID) {
headers['GroupId'] = process.env.MINIMAX_GROUP_ID
}
const response = await fetch(url, {
method: 'POST',
headers,
body: JSON.stringify(body),
signal: requestOptions?.signal,
...(fetchOptions as any),
})
if (!response.ok) {
const errorText = await response.text().catch(() => 'Unknown error')
throw new Error(
`OpenAI API error (${response.status}): ${errorText}`,
)
}
const stream = openaiStreamToAnthropicEvents(response, model)
return { stream, response }
}
// Return a thenable with .withResponse() — matches Anthropic SDK pattern:
// anthropic.beta.messages.create({...}).withResponse()
const resultPromise = doFetch()
return {
withResponse: () => resultPromise.then(({ stream, response }) => ({
data: stream,
response,
request_id: response.headers.get('x-request-id') || `openai-${Date.now()}`,
})),
// Also make it directly thenable (for .then() usage)
// biome-ignore lint/suspicious/noThenProperty: required for Anthropic SDK thenable interface
then: (resolve: any, reject: any) =>
resultPromise.then(({ stream }) => stream).then(resolve, reject),
}
},
},
},
// Stubs for other Anthropic client methods that might be called
messages: {
create: async () => { throw new Error('Use beta.messages.create for OpenAI adapter') },
},
}
return client as unknown as Anthropic
}
function parseCustomHeaders(headerStr: string): Record<string, string> {
const headers: Record<string, string> = {}
for (const line of headerStr.split(/\n|\r\n/)) {
const idx = line.indexOf(':')
if (idx === -1) continue
const name = line.slice(0, idx).trim()
const value = line.slice(idx + 1).trim()
if (name) headers[name] = value
}
return headers
}