Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3 changes: 3 additions & 0 deletions dev/null/post-checkout
Original file line number Diff line number Diff line change
@@ -0,0 +1,3 @@
#!/bin/sh
command -v git-lfs >/dev/null 2>&1 || { printf >&2 "\n%s\n\n" "This repository is configured for Git LFS but 'git-lfs' was not found on your path. If you no longer wish to use Git LFS, remove this hook by deleting the 'post-checkout' file in the hooks directory (set by 'core.hookspath'; usually '.git/hooks')."; exit 2; }
git lfs post-checkout "$@"
3 changes: 3 additions & 0 deletions dev/null/post-commit
Original file line number Diff line number Diff line change
@@ -0,0 +1,3 @@
#!/bin/sh
command -v git-lfs >/dev/null 2>&1 || { printf >&2 "\n%s\n\n" "This repository is configured for Git LFS but 'git-lfs' was not found on your path. If you no longer wish to use Git LFS, remove this hook by deleting the 'post-commit' file in the hooks directory (set by 'core.hookspath'; usually '.git/hooks')."; exit 2; }
git lfs post-commit "$@"
3 changes: 3 additions & 0 deletions dev/null/post-merge
Original file line number Diff line number Diff line change
@@ -0,0 +1,3 @@
#!/bin/sh
command -v git-lfs >/dev/null 2>&1 || { printf >&2 "\n%s\n\n" "This repository is configured for Git LFS but 'git-lfs' was not found on your path. If you no longer wish to use Git LFS, remove this hook by deleting the 'post-merge' file in the hooks directory (set by 'core.hookspath'; usually '.git/hooks')."; exit 2; }
git lfs post-merge "$@"
3 changes: 3 additions & 0 deletions dev/null/pre-push
Original file line number Diff line number Diff line change
@@ -0,0 +1,3 @@
#!/bin/sh
command -v git-lfs >/dev/null 2>&1 || { printf >&2 "\n%s\n\n" "This repository is configured for Git LFS but 'git-lfs' was not found on your path. If you no longer wish to use Git LFS, remove this hook by deleting the 'pre-push' file in the hooks directory (set by 'core.hookspath'; usually '.git/hooks')."; exit 2; }
git lfs pre-push "$@"
22 changes: 22 additions & 0 deletions packages/types/src/provider-settings.ts
Original file line number Diff line number Diff line change
Expand Up @@ -20,6 +20,7 @@ import {
xaiModels,
internationalZAiModels,
minimaxModels,
mimoModels,
} from "./providers/index.js"

/**
Expand Down Expand Up @@ -121,6 +122,7 @@ export const providerNames = [
"mistral",
"moonshot",
"minimax",
"mimo",
"openai-codex",
"openai-native",
"qwen-code",
Expand Down Expand Up @@ -334,6 +336,18 @@ const minimaxSchema = apiModelIdProviderModelSchema.extend({
minimaxApiKey: z.string().optional(),
})

const mimoSchema = apiModelIdProviderModelSchema.extend({
mimoBaseUrl: z
.union([
z.literal("https://api.xiaomimimo.com/v1"),
z.literal("https://token-plan-cn.xiaomimimo.com/v1"),
z.literal("https://token-plan-sgp.xiaomimimo.com/v1"),
z.literal("https://token-plan-ams.xiaomimimo.com/v1"),
])
.optional(),
mimoApiKey: z.string().optional(),
})

const requestySchema = baseProviderSettingsSchema.extend({
requestyBaseUrl: z.string().optional(),
requestyApiKey: z.string().optional(),
Expand Down Expand Up @@ -417,6 +431,7 @@ export const providerSettingsSchemaDiscriminated = z.discriminatedUnion("apiProv
poeSchema.merge(z.object({ apiProvider: z.literal("poe") })),
moonshotSchema.merge(z.object({ apiProvider: z.literal("moonshot") })),
minimaxSchema.merge(z.object({ apiProvider: z.literal("minimax") })),
mimoSchema.merge(z.object({ apiProvider: z.literal("mimo") })),
requestySchema.merge(z.object({ apiProvider: z.literal("requesty") })),
unboundSchema.merge(z.object({ apiProvider: z.literal("unbound") })),
fakeAiSchema.merge(z.object({ apiProvider: z.literal("fake-ai") })),
Expand Down Expand Up @@ -451,6 +466,7 @@ export const providerSettingsSchema = z.object({
...poeSchema.shape,
...moonshotSchema.shape,
...minimaxSchema.shape,
...mimoSchema.shape,
...requestySchema.shape,
...unboundSchema.shape,
...fakeAiSchema.shape,
Expand Down Expand Up @@ -525,6 +541,7 @@ export const modelIdKeysByProvider: Record<TypicalProvider, ModelIdKey> = {
mistral: "apiModelId",
moonshot: "apiModelId",
minimax: "apiModelId",
mimo: "apiModelId",
deepseek: "apiModelId",
poe: "apiModelId",
"qwen-code": "apiModelId",
Expand Down Expand Up @@ -617,6 +634,11 @@ export const MODELS_BY_PROVIDER: Record<
label: "MiniMax",
models: Object.keys(minimaxModels),
},
mimo: {
id: "mimo",
label: "Xiaomi MiMo",
models: Object.keys(mimoModels),
},
"openai-codex": {
id: "openai-codex",
label: "OpenAI - ChatGPT Plus/Pro",
Expand Down
4 changes: 4 additions & 0 deletions packages/types/src/providers/index.ts
Original file line number Diff line number Diff line change
Expand Up @@ -25,6 +25,7 @@ export * from "./xai.js"
export * from "./vercel-ai-gateway.js"
export * from "./zai.js"
export * from "./minimax.js"
export * from "./mimo.js"

import { anthropicDefaultModelId } from "./anthropic.js"
import { basetenDefaultModelId } from "./baseten.js"
Expand All @@ -49,6 +50,7 @@ import { xaiDefaultModelId } from "./xai.js"
import { vercelAiGatewayDefaultModelId } from "./vercel-ai-gateway.js"
import { internationalZAiDefaultModelId, mainlandZAiDefaultModelId } from "./zai.js"
import { minimaxDefaultModelId } from "./minimax.js"
import { mimoDefaultModelId } from "./mimo.js"

// Import the ProviderName type from provider-settings to avoid duplication
import type { ProviderName } from "../provider-settings.js"
Expand Down Expand Up @@ -85,6 +87,8 @@ export function getProviderDefaultModelId(
return moonshotDefaultModelId
case "minimax":
return minimaxDefaultModelId
case "mimo":
return mimoDefaultModelId
case "zai":
return options?.isChina ? mainlandZAiDefaultModelId : internationalZAiDefaultModelId
case "openai-native":
Expand Down
49 changes: 49 additions & 0 deletions packages/types/src/providers/mimo.ts
Original file line number Diff line number Diff line change
@@ -0,0 +1,49 @@
import type { ModelInfo } from "../model.js"

// https://platform.xiaomimimo.com/static/docs/pricing.md
export type MimoModelId = keyof typeof mimoModels

export const mimoDefaultModelId: MimoModelId = "mimo-v2.5-pro"

export const mimoModels = {
"mimo-v2.5-pro": {
maxTokens: 131_072,
contextWindow: 1_048_576,
supportsImages: false,
supportsPromptCache: true,
preserveReasoning: true,
inputPrice: 1.0, // $1.00 per million tokens (cache miss, ≤256K)
outputPrice: 3.0, // $3.00 per million tokens
cacheReadsPrice: 0.2, // $0.20 per million tokens (cache hit)
cacheWritesPrice: 0, // Free for limited time
description:
"MiMo V2.5 Pro - Xiaomi's flagship reasoning model with 1M context, interleaved thinking, tool calling, and structured output.",
},
"mimo-v2.5": {
maxTokens: 131_072,
contextWindow: 1_048_576,
supportsImages: false,
supportsPromptCache: true,
preserveReasoning: true,
inputPrice: 0.4, // $0.40 per million tokens (cache miss, ≤256K)
outputPrice: 2.0, // $2.00 per million tokens
cacheReadsPrice: 0.08, // $0.08 per million tokens (cache hit)
cacheWritesPrice: 0, // Free for limited time
description:
"MiMo V2.5 - Full modal understanding model with 1M context, deep thinking, tool calling, and structured output.",
},
"mimo-v2-flash": {
maxTokens: 65_536,
contextWindow: 262_144,
supportsImages: false,
supportsPromptCache: false,
preserveReasoning: true,
inputPrice: 0.1, // $0.10 per million tokens (cache miss)
outputPrice: 0.3, // $0.30 per million tokens
description: "MiMo V2 Flash - Fast and cost-effective reasoning model with tool calling support.",
},
} as const satisfies Record<string, ModelInfo>

export const mimoDefaultModelInfo: ModelInfo = mimoModels[mimoDefaultModelId]

export const MIMO_DEFAULT_TEMPERATURE = 1.0
3 changes: 3 additions & 0 deletions src/api/index.ts
Original file line number Diff line number Diff line change
Expand Up @@ -33,6 +33,7 @@ import {
FireworksHandler,
VercelAiGatewayHandler,
MiniMaxHandler,
MimoHandler,
BasetenHandler,
} from "./providers"
import { NativeOllamaHandler } from "./providers/native-ollama"
Expand Down Expand Up @@ -167,6 +168,8 @@ export function buildApiHandler(configuration: ProviderSettings): ApiHandler {
return new LiteLLMHandler(options)
case "sambanova":
return new SambaNovaHandler(options)
case "mimo":
return new MimoHandler(options)
case "zai":
return new ZAiHandler(options)
case "fireworks":
Expand Down
7 changes: 5 additions & 2 deletions src/api/providers/base-openai-compatible-provider.ts
Original file line number Diff line number Diff line change
Expand Up @@ -90,7 +90,10 @@ export abstract class BaseOpenAiCompatibleProvider<ModelName extends string>
model,
max_tokens,
temperature,
messages: [{ role: "system", content: systemPrompt }, ...convertToOpenAiMessages(messages)],
messages: [
{ role: "system", content: systemPrompt },
...convertToOpenAiMessages(messages, { modelId: model }),
],
stream: true,
stream_options: { include_usage: true },
tools: this.convertToolsForOpenAI(metadata?.tools),
Expand Down Expand Up @@ -224,7 +227,7 @@ export abstract class BaseOpenAiCompatibleProvider<ModelName extends string>

const params: OpenAI.Chat.Completions.ChatCompletionCreateParams = {
model: modelId,
messages: [{ role: "user", content: prompt }],
messages: [{ role: "user", content: prompt }], // No history, no MiMo strip needed
}

// Add thinking parameter if reasoning is enabled and model supports it
Expand Down
1 change: 1 addition & 0 deletions src/api/providers/index.ts
Original file line number Diff line number Diff line change
Expand Up @@ -27,4 +27,5 @@ export { FireworksHandler } from "./fireworks"
export { RooHandler } from "./roo"
export { VercelAiGatewayHandler } from "./vercel-ai-gateway"
export { MiniMaxHandler } from "./minimax"
export { MimoHandler } from "./mimo"
export { BasetenHandler } from "./baseten"
1 change: 1 addition & 0 deletions src/api/providers/lite-llm.ts
Original file line number Diff line number Diff line change
Expand Up @@ -117,6 +117,7 @@ export class LiteLLMHandler extends RouterProvider implements SingleCompletionHa
const { id: modelId, info } = await this.fetchModel()

const openAiMessages = convertToOpenAiMessages(messages, {
modelId,
normalizeToolCallId: sanitizeOpenAiCallId,
})

Expand Down
2 changes: 1 addition & 1 deletion src/api/providers/lm-studio.ts
Original file line number Diff line number Diff line change
Expand Up @@ -44,7 +44,7 @@ export class LmStudioHandler extends BaseProvider implements SingleCompletionHan
): ApiStream {
const openAiMessages: OpenAI.Chat.ChatCompletionMessageParam[] = [
{ role: "system", content: systemPrompt },
...convertToOpenAiMessages(messages),
...convertToOpenAiMessages(messages, { modelId: this.getModel().id }),
]

// -------------------------
Expand Down
Loading