diff --git a/dev/null/post-checkout b/dev/null/post-checkout new file mode 100644 index 00000000000..5abf8ed93f7 --- /dev/null +++ b/dev/null/post-checkout @@ -0,0 +1,3 @@ +#!/bin/sh +command -v git-lfs >/dev/null 2>&1 || { printf >&2 "\n%s\n\n" "This repository is configured for Git LFS but 'git-lfs' was not found on your path. If you no longer wish to use Git LFS, remove this hook by deleting the 'post-checkout' file in the hooks directory (set by 'core.hookspath'; usually '.git/hooks')."; exit 2; } +git lfs post-checkout "$@" diff --git a/dev/null/post-commit b/dev/null/post-commit new file mode 100644 index 00000000000..b8b76c2c425 --- /dev/null +++ b/dev/null/post-commit @@ -0,0 +1,3 @@ +#!/bin/sh +command -v git-lfs >/dev/null 2>&1 || { printf >&2 "\n%s\n\n" "This repository is configured for Git LFS but 'git-lfs' was not found on your path. If you no longer wish to use Git LFS, remove this hook by deleting the 'post-commit' file in the hooks directory (set by 'core.hookspath'; usually '.git/hooks')."; exit 2; } +git lfs post-commit "$@" diff --git a/dev/null/post-merge b/dev/null/post-merge new file mode 100644 index 00000000000..726f909891a --- /dev/null +++ b/dev/null/post-merge @@ -0,0 +1,3 @@ +#!/bin/sh +command -v git-lfs >/dev/null 2>&1 || { printf >&2 "\n%s\n\n" "This repository is configured for Git LFS but 'git-lfs' was not found on your path. If you no longer wish to use Git LFS, remove this hook by deleting the 'post-merge' file in the hooks directory (set by 'core.hookspath'; usually '.git/hooks')."; exit 2; } +git lfs post-merge "$@" diff --git a/dev/null/pre-push b/dev/null/pre-push new file mode 100644 index 00000000000..5f26dc45523 --- /dev/null +++ b/dev/null/pre-push @@ -0,0 +1,3 @@ +#!/bin/sh +command -v git-lfs >/dev/null 2>&1 || { printf >&2 "\n%s\n\n" "This repository is configured for Git LFS but 'git-lfs' was not found on your path. If you no longer wish to use Git LFS, remove this hook by deleting the 'pre-push' file in the hooks directory (set by 'core.hookspath'; usually '.git/hooks')."; exit 2; } +git lfs pre-push "$@" diff --git a/packages/types/src/provider-settings.ts b/packages/types/src/provider-settings.ts index 2de2965d920..1d92b7c08ca 100644 --- a/packages/types/src/provider-settings.ts +++ b/packages/types/src/provider-settings.ts @@ -20,6 +20,7 @@ import { xaiModels, internationalZAiModels, minimaxModels, + mimoModels, } from "./providers/index.js" /** @@ -121,6 +122,7 @@ export const providerNames = [ "mistral", "moonshot", "minimax", + "mimo", "openai-codex", "openai-native", "qwen-code", @@ -334,6 +336,18 @@ const minimaxSchema = apiModelIdProviderModelSchema.extend({ minimaxApiKey: z.string().optional(), }) +const mimoSchema = apiModelIdProviderModelSchema.extend({ + mimoBaseUrl: z + .union([ + z.literal("https://api.xiaomimimo.com/v1"), + z.literal("https://token-plan-cn.xiaomimimo.com/v1"), + z.literal("https://token-plan-sgp.xiaomimimo.com/v1"), + z.literal("https://token-plan-ams.xiaomimimo.com/v1"), + ]) + .optional(), + mimoApiKey: z.string().optional(), +}) + const requestySchema = baseProviderSettingsSchema.extend({ requestyBaseUrl: z.string().optional(), requestyApiKey: z.string().optional(), @@ -417,6 +431,7 @@ export const providerSettingsSchemaDiscriminated = z.discriminatedUnion("apiProv poeSchema.merge(z.object({ apiProvider: z.literal("poe") })), moonshotSchema.merge(z.object({ apiProvider: z.literal("moonshot") })), minimaxSchema.merge(z.object({ apiProvider: z.literal("minimax") })), + mimoSchema.merge(z.object({ apiProvider: z.literal("mimo") })), requestySchema.merge(z.object({ apiProvider: z.literal("requesty") })), unboundSchema.merge(z.object({ apiProvider: z.literal("unbound") })), fakeAiSchema.merge(z.object({ apiProvider: z.literal("fake-ai") })), @@ -451,6 +466,7 @@ export const providerSettingsSchema = z.object({ ...poeSchema.shape, ...moonshotSchema.shape, ...minimaxSchema.shape, + ...mimoSchema.shape, ...requestySchema.shape, ...unboundSchema.shape, ...fakeAiSchema.shape, @@ -525,6 +541,7 @@ export const modelIdKeysByProvider: Record = { mistral: "apiModelId", moonshot: "apiModelId", minimax: "apiModelId", + mimo: "apiModelId", deepseek: "apiModelId", poe: "apiModelId", "qwen-code": "apiModelId", @@ -617,6 +634,11 @@ export const MODELS_BY_PROVIDER: Record< label: "MiniMax", models: Object.keys(minimaxModels), }, + mimo: { + id: "mimo", + label: "Xiaomi MiMo", + models: Object.keys(mimoModels), + }, "openai-codex": { id: "openai-codex", label: "OpenAI - ChatGPT Plus/Pro", diff --git a/packages/types/src/providers/index.ts b/packages/types/src/providers/index.ts index 6c180d5dda4..cd36a611f23 100644 --- a/packages/types/src/providers/index.ts +++ b/packages/types/src/providers/index.ts @@ -25,6 +25,7 @@ export * from "./xai.js" export * from "./vercel-ai-gateway.js" export * from "./zai.js" export * from "./minimax.js" +export * from "./mimo.js" import { anthropicDefaultModelId } from "./anthropic.js" import { basetenDefaultModelId } from "./baseten.js" @@ -49,6 +50,7 @@ import { xaiDefaultModelId } from "./xai.js" import { vercelAiGatewayDefaultModelId } from "./vercel-ai-gateway.js" import { internationalZAiDefaultModelId, mainlandZAiDefaultModelId } from "./zai.js" import { minimaxDefaultModelId } from "./minimax.js" +import { mimoDefaultModelId } from "./mimo.js" // Import the ProviderName type from provider-settings to avoid duplication import type { ProviderName } from "../provider-settings.js" @@ -85,6 +87,8 @@ export function getProviderDefaultModelId( return moonshotDefaultModelId case "minimax": return minimaxDefaultModelId + case "mimo": + return mimoDefaultModelId case "zai": return options?.isChina ? mainlandZAiDefaultModelId : internationalZAiDefaultModelId case "openai-native": diff --git a/packages/types/src/providers/mimo.ts b/packages/types/src/providers/mimo.ts new file mode 100644 index 00000000000..88a00c9dbe4 --- /dev/null +++ b/packages/types/src/providers/mimo.ts @@ -0,0 +1,49 @@ +import type { ModelInfo } from "../model.js" + +// https://platform.xiaomimimo.com/static/docs/pricing.md +export type MimoModelId = keyof typeof mimoModels + +export const mimoDefaultModelId: MimoModelId = "mimo-v2.5-pro" + +export const mimoModels = { + "mimo-v2.5-pro": { + maxTokens: 131_072, + contextWindow: 1_048_576, + supportsImages: false, + supportsPromptCache: true, + preserveReasoning: true, + inputPrice: 1.0, // $1.00 per million tokens (cache miss, ≤256K) + outputPrice: 3.0, // $3.00 per million tokens + cacheReadsPrice: 0.2, // $0.20 per million tokens (cache hit) + cacheWritesPrice: 0, // Free for limited time + description: + "MiMo V2.5 Pro - Xiaomi's flagship reasoning model with 1M context, interleaved thinking, tool calling, and structured output.", + }, + "mimo-v2.5": { + maxTokens: 131_072, + contextWindow: 1_048_576, + supportsImages: false, + supportsPromptCache: true, + preserveReasoning: true, + inputPrice: 0.4, // $0.40 per million tokens (cache miss, ≤256K) + outputPrice: 2.0, // $2.00 per million tokens + cacheReadsPrice: 0.08, // $0.08 per million tokens (cache hit) + cacheWritesPrice: 0, // Free for limited time + description: + "MiMo V2.5 - Full modal understanding model with 1M context, deep thinking, tool calling, and structured output.", + }, + "mimo-v2-flash": { + maxTokens: 65_536, + contextWindow: 262_144, + supportsImages: false, + supportsPromptCache: false, + preserveReasoning: true, + inputPrice: 0.1, // $0.10 per million tokens (cache miss) + outputPrice: 0.3, // $0.30 per million tokens + description: "MiMo V2 Flash - Fast and cost-effective reasoning model with tool calling support.", + }, +} as const satisfies Record + +export const mimoDefaultModelInfo: ModelInfo = mimoModels[mimoDefaultModelId] + +export const MIMO_DEFAULT_TEMPERATURE = 1.0 diff --git a/src/api/index.ts b/src/api/index.ts index 9e0c4078225..c9e5e7b1b94 100644 --- a/src/api/index.ts +++ b/src/api/index.ts @@ -33,6 +33,7 @@ import { FireworksHandler, VercelAiGatewayHandler, MiniMaxHandler, + MimoHandler, BasetenHandler, } from "./providers" import { NativeOllamaHandler } from "./providers/native-ollama" @@ -167,6 +168,8 @@ export function buildApiHandler(configuration: ProviderSettings): ApiHandler { return new LiteLLMHandler(options) case "sambanova": return new SambaNovaHandler(options) + case "mimo": + return new MimoHandler(options) case "zai": return new ZAiHandler(options) case "fireworks": diff --git a/src/api/providers/base-openai-compatible-provider.ts b/src/api/providers/base-openai-compatible-provider.ts index fc3d769ae2a..48cac9d9389 100644 --- a/src/api/providers/base-openai-compatible-provider.ts +++ b/src/api/providers/base-openai-compatible-provider.ts @@ -90,7 +90,10 @@ export abstract class BaseOpenAiCompatibleProvider model, max_tokens, temperature, - messages: [{ role: "system", content: systemPrompt }, ...convertToOpenAiMessages(messages)], + messages: [ + { role: "system", content: systemPrompt }, + ...convertToOpenAiMessages(messages, { modelId: model }), + ], stream: true, stream_options: { include_usage: true }, tools: this.convertToolsForOpenAI(metadata?.tools), @@ -224,7 +227,7 @@ export abstract class BaseOpenAiCompatibleProvider const params: OpenAI.Chat.Completions.ChatCompletionCreateParams = { model: modelId, - messages: [{ role: "user", content: prompt }], + messages: [{ role: "user", content: prompt }], // No history, no MiMo strip needed } // Add thinking parameter if reasoning is enabled and model supports it diff --git a/src/api/providers/index.ts b/src/api/providers/index.ts index 41aff953d43..9712e79fabf 100644 --- a/src/api/providers/index.ts +++ b/src/api/providers/index.ts @@ -27,4 +27,5 @@ export { FireworksHandler } from "./fireworks" export { RooHandler } from "./roo" export { VercelAiGatewayHandler } from "./vercel-ai-gateway" export { MiniMaxHandler } from "./minimax" +export { MimoHandler } from "./mimo" export { BasetenHandler } from "./baseten" diff --git a/src/api/providers/lite-llm.ts b/src/api/providers/lite-llm.ts index cf8d16a1129..b6a902ac108 100644 --- a/src/api/providers/lite-llm.ts +++ b/src/api/providers/lite-llm.ts @@ -117,6 +117,7 @@ export class LiteLLMHandler extends RouterProvider implements SingleCompletionHa const { id: modelId, info } = await this.fetchModel() const openAiMessages = convertToOpenAiMessages(messages, { + modelId, normalizeToolCallId: sanitizeOpenAiCallId, }) diff --git a/src/api/providers/lm-studio.ts b/src/api/providers/lm-studio.ts index a771394c535..6370fdf40e2 100644 --- a/src/api/providers/lm-studio.ts +++ b/src/api/providers/lm-studio.ts @@ -44,7 +44,7 @@ export class LmStudioHandler extends BaseProvider implements SingleCompletionHan ): ApiStream { const openAiMessages: OpenAI.Chat.ChatCompletionMessageParam[] = [ { role: "system", content: systemPrompt }, - ...convertToOpenAiMessages(messages), + ...convertToOpenAiMessages(messages, { modelId: this.getModel().id }), ] // ------------------------- diff --git a/src/api/providers/mimo.ts b/src/api/providers/mimo.ts new file mode 100644 index 00000000000..855dbec9c56 --- /dev/null +++ b/src/api/providers/mimo.ts @@ -0,0 +1,303 @@ +import { Anthropic } from "@anthropic-ai/sdk" +import OpenAI from "openai" + +import { mimoModels, mimoDefaultModelId, MIMO_DEFAULT_TEMPERATURE, type ModelInfo } from "@roo-code/types" + +import type { ApiHandlerOptions } from "../../shared/api" + +import { ApiStream } from "../transform/stream" +import { convertToOpenAiMessages } from "../transform/openai-format" +import { getModelParams } from "../transform/model-params" +import { calculateApiCostOpenAI } from "../../shared/cost" + +import { OpenAiHandler } from "./openai" +import type { ApiHandlerCreateMessageMetadata } from "../index" + +/** + * MiMoHandler extends OpenAiHandler with MiMo-specific adaptations. + * + * CRITICAL: Per MiMo's official docs, reasoning_content MUST be passed back + * in multi-turn conversations with tool calls. Without it, the API returns 400. + * + * Reference: https://platform.xiaomimimo.com/static/docs/usage-guide/passing-back-reasoning_content.md + */ +export class MimoHandler extends OpenAiHandler { + constructor(options: ApiHandlerOptions) { + super({ + ...options, + openAiApiKey: options.mimoApiKey ?? "not-provided", + openAiModelId: options.apiModelId ?? mimoDefaultModelId, + openAiBaseUrl: options.mimoBaseUrl || "https://token-plan-sgp.xiaomimimo.com/v1", + openAiStreamingEnabled: true, + includeMaxTokens: false, + }) + } + + override getModel() { + const id = this.options.apiModelId ?? mimoDefaultModelId + const info: ModelInfo = mimoModels[id as keyof typeof mimoModels] || mimoModels[mimoDefaultModelId] + const params = getModelParams({ + format: "openai", + modelId: id, + model: info, + settings: this.options, + defaultTemperature: MIMO_DEFAULT_TEMPERATURE, + }) + return { id, info, ...params } + } + + /** + * Strip OpenAI-specific extensions that MiMo's proxy rejects: + * - strict: true on tools + * - additionalProperties: false on schemas + */ + protected override convertToolsForOpenAI(tools: any[] | undefined): any[] | undefined { + if (!tools) { + return undefined + } + + return tools.map((tool) => { + if (tool.type !== "function") { + return tool + } + + return { + type: "function", + function: { + name: tool.function.name, + description: tool.function.description, + parameters: this.stripOpenAiExtensions(tool.function.parameters), + }, + } + }) + } + + private stripOpenAiExtensions(schema: any): any { + if (!schema || typeof schema !== "object") { + return schema + } + + const { additionalProperties, ...rest } = schema + + if (rest.properties) { + const newProps: Record = {} + for (const [key, prop] of Object.entries(rest.properties)) { + newProps[key] = this.stripOpenAiExtensions(prop) + } + rest.properties = newProps + } + + if (rest.items && typeof rest.items === "object") { + rest.items = this.stripOpenAiExtensions(rest.items) + } + + return rest + } + + /** + * Convert Anthropic messages to MiMo-compatible OpenAI format. + * + * CRITICAL: Extracts `type: "reasoning"` content blocks from Anthropic + * messages and converts them to `reasoning_content` field in OpenAI + * assistant messages. MiMo REQUIRES this for multi-turn tool calling. + */ + private convertMessagesForMiMo( + anthropicMessages: Anthropic.Messages.MessageParam[], + ): OpenAI.Chat.ChatCompletionMessageParam[] { + const converted: OpenAI.Chat.ChatCompletionMessageParam[] = [] + + for (const msg of anthropicMessages) { + if (msg.role === "assistant" && Array.isArray(msg.content)) { + // Extract reasoning content from Anthropic content blocks + const reasoningParts: string[] = [] + const textParts: string[] = [] + const toolUseParts: Anthropic.ToolUseBlockParam[] = [] + + for (const block of msg.content) { + if ((block as any).type === "reasoning") { + reasoningParts.push((block as any).text || "") + } else if (block.type === "text") { + textParts.push(block.text) + } else if (block.type === "tool_use") { + toolUseParts.push(block) + } + } + + // Build OpenAI assistant message with reasoning_content + const assistantMsg: any = { + role: "assistant", + content: textParts.join("\n") || "", + } + + // CRITICAL: Add reasoning_content if present + if (reasoningParts.length > 0) { + assistantMsg.reasoning_content = reasoningParts.join("\n") + } + + // Add tool_calls if present + if (toolUseParts.length > 0) { + assistantMsg.tool_calls = toolUseParts.map((block) => ({ + id: block.id, + type: "function" as const, + function: { + name: block.name, + arguments: typeof block.input === "string" ? block.input : JSON.stringify(block.input), + }, + })) + } + + converted.push(assistantMsg) + } else if (msg.role === "assistant" && typeof msg.content === "string") { + const assistantMsg: any = { + role: "assistant", + content: msg.content, + } + const reasoningContent = (msg as any).reasoning_content + if (typeof reasoningContent === "string" && reasoningContent.trim()) { + assistantMsg.reasoning_content = reasoningContent + } + converted.push(assistantMsg) + } else if (msg.role === "user" && Array.isArray(msg.content)) { + // Process user messages: separate tool_results from text + const toolResults: Anthropic.ToolResultBlockParam[] = [] + const textBlocks: string[] = [] + + for (const block of msg.content) { + if (block.type === "tool_result") { + toolResults.push(block) + } else if (block.type === "text") { + textBlocks.push(block.text) + } + } + + // Add tool results as role:"tool" messages (MiMo supports this) + for (const tr of toolResults) { + let content: string + if (typeof tr.content === "string") { + content = tr.content + } else if (Array.isArray(tr.content)) { + content = tr.content.map((p: any) => (p.type === "text" ? p.text : "")).join("\n") + } else { + content = "" + } + + converted.push({ + role: "tool", + tool_call_id: tr.tool_use_id, + content: content || "(empty)", + }) + } + + // Add text content as user message + if (textBlocks.length > 0) { + converted.push({ + role: "user", + content: textBlocks.join("\n"), + }) + } + } else if (msg.role === "user" && typeof msg.content === "string") { + converted.push({ + role: "user", + content: msg.content, + }) + } + } + + return converted + } + + override async *createMessage( + systemPrompt: string, + messages: Anthropic.Messages.MessageParam[], + metadata?: ApiHandlerCreateMessageMetadata, + ): ApiStream { + const { id: modelId, info: modelInfo, temperature } = this.getModel() + + // Use custom conversion that preserves reasoning_content + const convertedMessages = this.convertMessagesForMiMo(messages) + + const tools = this.convertToolsForOpenAI(metadata?.tools) + + // Build request per MiMo's OpenAI-compatible API + const params: Record = { + model: modelId, + temperature, + messages: [{ role: "system", content: systemPrompt }, ...convertedMessages], + stream: true, + // MiMo requires thinking to be enabled via extra_body + extra_body: { thinking: { type: "enabled" } }, + } + + if (tools && tools.length > 0) { + params.tools = tools + } + + let stream: AsyncIterable + try { + stream = (await this.client.chat.completions.create(params as any)) as any + } catch (error) { + const { handleOpenAIError } = await import("./utils/openai-error-handler") + throw handleOpenAIError(error, "MiMo") + } + + let lastUsage: OpenAI.CompletionUsage | undefined + + for await (const chunk of stream) { + const delta = chunk.choices?.[0]?.delta ?? {} + + if (delta.content) { + yield { + type: "text", + text: delta.content, + } + } + + if ("reasoning_content" in delta && delta.reasoning_content) { + yield { + type: "reasoning", + text: (delta.reasoning_content as string) || "", + } + } + + if (delta.tool_calls) { + for (const toolCall of delta.tool_calls) { + yield { + type: "tool_call_partial", + index: toolCall.index, + id: toolCall.id, + name: toolCall.function?.name, + arguments: toolCall.function?.arguments, + } + } + } + + if (chunk.usage) { + lastUsage = chunk.usage + } + } + + if (lastUsage) { + const inputTokens = lastUsage?.prompt_tokens || 0 + const outputTokens = lastUsage?.completion_tokens || 0 + const cacheWriteTokens = (lastUsage?.prompt_tokens_details as any)?.cache_write_tokens || 0 + const cacheReadTokens = lastUsage?.prompt_tokens_details?.cached_tokens || 0 + + const { totalCost } = calculateApiCostOpenAI( + modelInfo, + inputTokens, + outputTokens, + cacheWriteTokens, + cacheReadTokens, + ) + + yield { + type: "usage", + inputTokens, + outputTokens, + cacheWriteTokens: cacheWriteTokens || undefined, + cacheReadTokens: cacheReadTokens || undefined, + totalCost, + } + } + } +} diff --git a/src/api/providers/openai.ts b/src/api/providers/openai.ts index 33b29abcafe..155e96b5fda 100644 --- a/src/api/providers/openai.ts +++ b/src/api/providers/openai.ts @@ -121,7 +121,7 @@ export class OpenAiHandler extends BaseProvider implements SingleCompletionHandl } } - convertedMessages = [systemMessage, ...convertToOpenAiMessages(messages)] + convertedMessages = [systemMessage, ...convertToOpenAiMessages(messages, { modelId })] if (modelInfo.supportsPromptCache) { // Note: the following logic is copied from openrouter: @@ -225,7 +225,7 @@ export class OpenAiHandler extends BaseProvider implements SingleCompletionHandl model: modelId, messages: deepseekReasoner ? convertToR1Format([{ role: "user", content: systemPrompt }, ...messages]) - : [systemMessage, ...convertToOpenAiMessages(messages)], + : [systemMessage, ...convertToOpenAiMessages(messages, { modelId })], // Tools are always present (minimum ALWAYS_AVAILABLE_TOOLS) tools: this.convertToolsForOpenAI(metadata?.tools), tool_choice: metadata?.tool_choice, diff --git a/src/api/providers/openrouter.ts b/src/api/providers/openrouter.ts index 7fcc24b15f6..95a7287a523 100644 --- a/src/api/providers/openrouter.ts +++ b/src/api/providers/openrouter.ts @@ -238,7 +238,7 @@ export class OpenRouterHandler extends BaseProvider implements SingleCompletionH { role: "system", content: systemPrompt }, ...convertToOpenAiMessages( messages, - isMistral ? { normalizeToolCallId: normalizeMistralToolCallId } : undefined, + isMistral ? { normalizeToolCallId: normalizeMistralToolCallId, modelId } : { modelId }, ), ] diff --git a/src/api/providers/qwen-code.ts b/src/api/providers/qwen-code.ts index f2a207051ef..22e32ba6af0 100644 --- a/src/api/providers/qwen-code.ts +++ b/src/api/providers/qwen-code.ts @@ -223,7 +223,7 @@ export class QwenCodeHandler extends BaseProvider implements SingleCompletionHan content: systemPrompt, } - const convertedMessages = [systemMessage, ...convertToOpenAiMessages(messages)] + const convertedMessages = [systemMessage, ...convertToOpenAiMessages(messages, { modelId: model.id })] const requestOptions: OpenAI.Chat.Completions.ChatCompletionCreateParamsStreaming = { model: model.id, diff --git a/src/api/providers/requesty.ts b/src/api/providers/requesty.ts index b241c347b08..e6c6e4a5b0b 100644 --- a/src/api/providers/requesty.ts +++ b/src/api/providers/requesty.ts @@ -131,7 +131,7 @@ export class RequestyHandler extends BaseProvider implements SingleCompletionHan const openAiMessages: OpenAI.Chat.ChatCompletionMessageParam[] = [ { role: "system", content: systemPrompt }, - ...convertToOpenAiMessages(messages), + ...convertToOpenAiMessages(messages, { modelId: this.getModel().id }), ] // Map extended efforts to OpenAI Chat Completions-accepted values (omit unsupported) diff --git a/src/api/providers/roo.ts b/src/api/providers/roo.ts index f9e4ccd129c..163991faec8 100644 --- a/src/api/providers/roo.ts +++ b/src/api/providers/roo.ts @@ -100,7 +100,10 @@ export class RooHandler extends BaseOpenAiCompatibleProvider { model, max_tokens, temperature, - messages: [{ role: "system", content: systemPrompt }, ...convertToOpenAiMessages(messages)], + messages: [ + { role: "system", content: systemPrompt }, + ...convertToOpenAiMessages(messages, { modelId: model }), + ], stream: true, stream_options: { include_usage: true }, ...(reasoning && { reasoning }), diff --git a/src/api/transform/openai-format.ts b/src/api/transform/openai-format.ts index 8974dd599ba..49464d27654 100644 --- a/src/api/transform/openai-format.ts +++ b/src/api/transform/openai-format.ts @@ -256,7 +256,8 @@ export function sanitizeGeminiMessages( /** * Options for converting Anthropic messages to OpenAI format. */ -export interface ConvertToOpenAiMessagesOptions { +interface ConvertToOpenAiMessagesOptions { + modelId?: string // NEW: For provider-specific stripping (e.g., MiMo strict mode) /** * Optional function to normalize tool call IDs for providers with strict ID requirements. * When provided, this function will be applied to all tool_use IDs and tool_result tool_use_ids. @@ -314,7 +315,7 @@ export function convertToOpenAiMessages( if (anthropicMessage.role === "assistant") { const mapped = mapReasoningDetails(messageWithDetails.reasoning_details) - if (mapped) { + if (mapped && !(options?.modelId && /mimo/i.test(options.modelId || ""))) { ;(baseMessage as any).reasoning_details = mapped } } @@ -490,7 +491,8 @@ export function convertToOpenAiMessages( // Pass through reasoning_details to preserve the original shape from the API. // The `id` field is stripped from openai-responses-v1 blocks (see mapReasoningDetails). const mapped = mapReasoningDetails(messageWithDetails.reasoning_details) - if (mapped) { + const isMimoModel = !!(options?.modelId && /mimo/i.test(options.modelId)) + if (mapped && !isMimoModel) { baseMessage.reasoning_details = mapped } diff --git a/webview-ui/src/components/settings/ApiOptions.tsx b/webview-ui/src/components/settings/ApiOptions.tsx index f598b707ed5..7f526d0616f 100644 --- a/webview-ui/src/components/settings/ApiOptions.tsx +++ b/webview-ui/src/components/settings/ApiOptions.tsx @@ -31,6 +31,7 @@ import { fireworksDefaultModelId, vercelAiGatewayDefaultModelId, minimaxDefaultModelId, + mimoDefaultModelId, unboundDefaultModelId, } from "@roo-code/types" @@ -92,6 +93,7 @@ import { Fireworks, VercelAiGateway, MiniMax, + Mimo, } from "./providers" import { MODELS_BY_PROVIDER, PROVIDERS } from "./constants" @@ -345,6 +347,7 @@ const ApiOptions = ({ deepseek: { field: "apiModelId", default: deepSeekDefaultModelId }, moonshot: { field: "apiModelId", default: moonshotDefaultModelId }, minimax: { field: "apiModelId", default: minimaxDefaultModelId }, + mimo: { field: "apiModelId", default: mimoDefaultModelId }, mistral: { field: "apiModelId", default: mistralDefaultModelId }, xai: { field: "apiModelId", default: xaiDefaultModelId }, baseten: { field: "apiModelId", default: basetenDefaultModelId }, @@ -639,6 +642,14 @@ const ApiOptions = ({ /> )} + {selectedProvider === "mimo" && ( + + )} + {selectedProvider === "vscode-lm" && ( >> = { @@ -35,6 +36,7 @@ export const MODELS_BY_PROVIDER: Partial void + simplifySettings?: boolean +} + +export const Mimo = ({ apiConfiguration, setApiConfigurationField }: MimoProps) => { + const { t } = useAppTranslation() + + const handleInputChange = useCallback( + ( + field: K, + transform: (event: E) => ProviderSettings[K] = inputEventTransform, + ) => + (event: E | Event) => { + setApiConfigurationField(field, transform(event as E)) + }, + [setApiConfigurationField], + ) + + return ( + <> +
+ + + + Token Plan - Singapore (Default) + + + Token Plan - China + + + Token Plan - Europe (AMS) + + + Pay-as-you-go + + +
+
+ + + +
+ {t("settings:providers.apiKeyStorageNotice")} +
+ {!apiConfiguration?.mimoApiKey && ( + + {t("settings:providers.getMimoApiKey")} + + )} +
+ + ) +} diff --git a/webview-ui/src/components/settings/providers/index.ts b/webview-ui/src/components/settings/providers/index.ts index 7badb543115..02a928ffb54 100644 --- a/webview-ui/src/components/settings/providers/index.ts +++ b/webview-ui/src/components/settings/providers/index.ts @@ -23,4 +23,5 @@ export { LiteLLM } from "./LiteLLM" export { Fireworks } from "./Fireworks" export { VercelAiGateway } from "./VercelAiGateway" export { MiniMax } from "./MiniMax" +export { Mimo } from "./Mimo" export { Baseten } from "./Baseten" diff --git a/webview-ui/src/components/settings/utils/providerModelConfig.ts b/webview-ui/src/components/settings/utils/providerModelConfig.ts index 59f76862b45..3b270d09522 100644 --- a/webview-ui/src/components/settings/utils/providerModelConfig.ts +++ b/webview-ui/src/components/settings/utils/providerModelConfig.ts @@ -16,6 +16,7 @@ import { fireworksDefaultModelId, minimaxDefaultModelId, basetenDefaultModelId, + mimoDefaultModelId, } from "@roo-code/types" import { MODELS_BY_PROVIDER } from "../constants" @@ -40,6 +41,7 @@ export const PROVIDER_SERVICE_CONFIG: Partial> = zai: internationalZAiDefaultModelId, fireworks: fireworksDefaultModelId, minimax: minimaxDefaultModelId, + mimo: mimoDefaultModelId, baseten: basetenDefaultModelId, } diff --git a/webview-ui/src/components/ui/hooks/useSelectedModel.ts b/webview-ui/src/components/ui/hooks/useSelectedModel.ts index 4b01d55740e..6aca405a1d9 100644 --- a/webview-ui/src/components/ui/hooks/useSelectedModel.ts +++ b/webview-ui/src/components/ui/hooks/useSelectedModel.ts @@ -9,6 +9,7 @@ import { deepSeekModels, moonshotModels, minimaxModels, + mimoModels, geminiModels, mistralModels, openAiModelInfoSaneDefaults, @@ -250,6 +251,11 @@ function getSelectedModel({ const info = minimaxModels[id as keyof typeof minimaxModels] return { id, info } } + case "mimo": { + const id = apiConfiguration.apiModelId ?? defaultModelId + const info = mimoModels[id as keyof typeof mimoModels] + return { id, info } + } case "zai": { const isChina = apiConfiguration.zaiApiLine === "china_coding" const models = isChina ? mainlandZAiModels : internationalZAiModels diff --git a/webview-ui/src/i18n/locales/en/settings.json b/webview-ui/src/i18n/locales/en/settings.json index d6d0cdacac8..a94989ed6df 100644 --- a/webview-ui/src/i18n/locales/en/settings.json +++ b/webview-ui/src/i18n/locales/en/settings.json @@ -453,6 +453,9 @@ "minimaxApiKey": "MiniMax API Key", "getMiniMaxApiKey": "Get MiniMax API Key", "minimaxBaseUrl": "MiniMax Entrypoint", + "mimoApiKey": "MiMo API Key", + "getMimoApiKey": "Get MiMo API Key", + "mimoBaseUrl": "MiMo Entrypoint", "zaiApiKey": "Z AI API Key", "getZaiApiKey": "Get Z AI API Key", "zaiEntrypoint": "Z AI Entrypoint",