Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
24 changes: 15 additions & 9 deletions package.json
Original file line number Diff line number Diff line change
Expand Up @@ -45,14 +45,20 @@
"pnpm": ">=10.0.0"
},
"pnpm": {
"overrides": {
"@types/react": "19.1.10",
"@types/react-dom": "19.1.7",
"@ai-sdk/openai": "2.0.32",
"@ai-sdk/anthropic": "2.0.17",
"@ai-sdk/google": "2.0.14",
"@openrouter/ai-sdk-provider": "1.2.0"
},
"engine-strict": true
}
},
"overrides": {
"@types/react": "19.1.10",
"@types/react-dom": "19.1.7",
"@ai-sdk/openai": "2.0.32",
"@ai-sdk/anthropic": "2.0.17",
"@ai-sdk/google": "2.0.14",
"@openrouter/ai-sdk-provider": "1.2.0"
},
"workspaces": [
"packages/app/*",
"packages/sdk/*",
"packages/sdk/examples/*",
"packages/tests/*"
]
}
2 changes: 2 additions & 0 deletions packages/app/server/src/services/AccountingService.ts
Original file line number Diff line number Diff line change
Expand Up @@ -10,6 +10,7 @@ import {
SupportedImageModel,
SupportedVideoModel,
XAIModels,
VercelAIGatewayModels,
} from '@merit-systems/echo-typescript-sdk';

import { Decimal } from '@prisma/client/runtime/library';
Expand All @@ -30,6 +31,7 @@ export const ALL_SUPPORTED_MODELS: SupportedModel[] = [
...OpenRouterModels,
...GroqModels,
...XAIModels,
...VercelAIGatewayModels,
];

// Handle image models separately since they have different pricing structure
Expand Down
4 changes: 3 additions & 1 deletion packages/sdk/ts/package.json
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,8 @@
"update-models:gemini": "tsx scripts/update-gemini-models.ts",
"update-models:openrouter": "tsx scripts/update-openrouter-models.ts",
"update-models:groq": "tsx scripts/update-groq-models.ts",
"update-all-models": "pnpm run update-models:openai && pnpm run update-models:anthropic && pnpm run update-models:gemini && pnpm run update-models:openrouter && pnpm run update-models:groq",
"update-models:vercel-ai-gateway": "tsx scripts/update-vercel-ai-gateway-models.ts",
"update-all-models": "pnpm run update-models:openai && pnpm run update-models:anthropic && pnpm run update-models:gemini && pnpm run update-models:openrouter && pnpm run update-models:groq && pnpm run update-models:vercel-ai-gateway",
"prepublishOnly": "pnpm run build"
},
"keywords": [
Expand Down Expand Up @@ -61,6 +62,7 @@
"@ai-sdk/google": "2.0.14",
"@ai-sdk/groq": "2.0.17",
"@ai-sdk/openai": "2.0.32",
"@ai-sdk/openai-compatible": "2.0.35",
"@ai-sdk/xai": "2.0.16",
"@openrouter/ai-sdk-provider": "1.2.0",
"ai": "5.0.47"
Expand Down
24 changes: 24 additions & 0 deletions packages/sdk/ts/scripts/update-vercel-ai-gateway-models.ts
Original file line number Diff line number Diff line change
@@ -0,0 +1,24 @@
// Script to update Vercel AI Gateway models
// Usage: pnpm run update-models:vercel-ai-gateway

import { writeFileSync } from 'fs';
import { SupportedModel } from '../src/supported-models/types';

// TODO: Fetch models from Vercel AI Gateway API when available
// For now, models are manually maintained based on:
// https://vercel.com/ai-gateway/models
// https://vercel.com/docs/ai-gateway/pricing

const models: SupportedModel[] = [
// OpenAI models via Vercel AI Gateway
{
model_id: 'gpt-4o',
input_cost_per_token: 0.0000025,
output_cost_per_token: 0.00001,
provider: 'Vercel AI Gateway',
},
// ... add more models as needed
];

console.log(`Found ${models.length} Vercel AI Gateway models`);
// writeFileSync(...) // Uncomment to auto-update the model file
2 changes: 2 additions & 0 deletions packages/sdk/ts/src/providers/index.ts
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,7 @@ export * from './groq';
export * from './xai';
export * from './openai';
export * from './openrouter';
export * from './vercel-ai-gateway';

export function echoFetch(
originalFetch: typeof fetch,
Expand Down Expand Up @@ -63,3 +64,4 @@ export { type GroqProvider } from '@ai-sdk/groq';
export { type OpenAIProvider } from '@ai-sdk/openai';
export { type OpenRouterProvider } from '@openrouter/ai-sdk-provider';
export { type XaiProvider } from '@ai-sdk/xai';
export { type OpenAICompatibleProvider } from '@ai-sdk/openai-compatible';
44 changes: 44 additions & 0 deletions packages/sdk/ts/src/providers/vercel-ai-gateway.ts
Original file line number Diff line number Diff line change
@@ -0,0 +1,44 @@
import {
createOpenAICompatible,
OpenAICompatibleProvider,
} from '@ai-sdk/openai-compatible';

import { ROUTER_BASE_URL } from 'config';
import { EchoConfig } from '../types';
import { validateAppId } from '../utils/validation';
import { echoFetch } from './index';

/**
* Creates a Vercel AI Gateway provider for the AI SDK.
*
* Vercel AI Gateway provides a unified API that proxies requests to
* underlying AI providers (OpenAI, Anthropic, Google, etc.) with
* built-in observability, rate limiting, and caching.
*
* @see https://vercel.com/docs/ai-gateway
* @see https://sdk.vercel.ai/providers/ai-sdk-providers/vercel
*/
export function createEchoVercelAIGateway(
{ appId, baseRouterUrl = ROUTER_BASE_URL }: EchoConfig,
getTokenFn: (appId: string) => Promise<string | null>,
onInsufficientFunds?: () => void,
options?: {
/** Custom gateway base URL. Defaults to the Echo router. */
gatewayBaseURL?: string;
}
): OpenAICompatibleProvider {
validateAppId(appId, 'createEchoVercelAIGateway');

return createOpenAICompatible({
name: 'vercel-ai-gateway',
baseURL: options?.gatewayBaseURL ?? baseRouterUrl,
headers: {
'x-echo-app-id': appId,
},
fetch: echoFetch(
fetch,
async () => await getTokenFn(appId),
onInsufficientFunds
),
});
}
166 changes: 166 additions & 0 deletions packages/sdk/ts/src/supported-models/chat/vercel-ai-gateway.ts
Original file line number Diff line number Diff line change
@@ -0,0 +1,166 @@
import { SupportedModel } from '../types';

// Vercel AI Gateway model IDs
// Vercel AI Gateway acts as a proxy - it routes requests to underlying providers
// (OpenAI, Anthropic, Google, Groq, etc.) so pricing matches the upstream provider.
// Pricing sourced from: https://vercel.com/docs/ai-gateway/pricing
// Model list sourced from: https://vercel.com/ai-gateway/models
// Last updated: 2026-03-20

export type VercelAIGatewayModel =
| 'gpt-4o'
| 'gpt-4o-mini'
| 'gpt-4.1'
| 'gpt-4.1-mini'
| 'gpt-4.1-nano'
| 'gpt-5'
| 'gpt-5-mini'
| 'gpt-5-nano'
| 'o3-mini'
| 'o4-mini'
| 'claude-sonnet-4-20250514'
| 'claude-haiku-4-20250414'
| 'gemini-2.5-pro'
| 'gemini-2.5-flash'
| 'deepseek-r1'
| 'deepseek-chat'
| 'llama-3.1-8b-instant'
| 'llama-3.3-70b-versatile'
| 'mistral-large-latest'
| 'qwen/qwen3-32b';

export const VercelAIGatewayModels: SupportedModel[] = [
// === OpenAI models via Vercel AI Gateway ===
{
model_id: 'gpt-4o',
input_cost_per_token: 0.0000025, // $2.50 per 1M tokens
output_cost_per_token: 0.00001, // $10.00 per 1M tokens
provider: 'Vercel AI Gateway',
},
{
model_id: 'gpt-4o-mini',
input_cost_per_token: 0.00000015, // $0.15 per 1M tokens
output_cost_per_token: 0.0000006, // $0.60 per 1M tokens
provider: 'Vercel AI Gateway',
},
{
model_id: 'gpt-4.1',
input_cost_per_token: 0.000002, // $2.00 per 1M tokens
output_cost_per_token: 0.000008, // $8.00 per 1M tokens
provider: 'Vercel AI Gateway',
},
{
model_id: 'gpt-4.1-mini',
input_cost_per_token: 0.0000004, // $0.40 per 1M tokens
output_cost_per_token: 0.0000016, // $1.60 per 1M tokens
provider: 'Vercel AI Gateway',
},
{
model_id: 'gpt-4.1-nano',
input_cost_per_token: 0.0000001, // $0.10 per 1M tokens
output_cost_per_token: 0.0000004, // $0.40 per 1M tokens
provider: 'Vercel AI Gateway',
},
{
model_id: 'gpt-5',
input_cost_per_token: 0.0000025, // $2.50 per 1M tokens
output_cost_per_token: 0.000015, // $15.00 per 1M tokens
provider: 'Vercel AI Gateway',
},
{
model_id: 'gpt-5-mini',
input_cost_per_token: 0.0000005, // $0.50 per 1M tokens
output_cost_per_token: 0.000003, // $3.00 per 1M tokens
provider: 'Vercel AI Gateway',
},
{
model_id: 'gpt-5-nano',
input_cost_per_token: 0.00000005, // $0.05 per 1M tokens
output_cost_per_token: 0.00000025, // $0.25 per 1M tokens
provider: 'Vercel AI Gateway',
},
{
model_id: 'o3-mini',
input_cost_per_token: 0.0000011, // $1.10 per 1M tokens
output_cost_per_token: 0.0000044, // $4.40 per 1M tokens
provider: 'Vercel AI Gateway',
},
{
model_id: 'o4-mini',
input_cost_per_token: 0.0000011, // $1.10 per 1M tokens
output_cost_per_token: 0.0000044, // $4.40 per 1M tokens
provider: 'Vercel AI Gateway',
},

// === Anthropic models via Vercel AI Gateway ===
{
model_id: 'claude-sonnet-4-20250514',
input_cost_per_token: 0.000003, // $3.00 per 1M tokens
output_cost_per_token: 0.000015, // $15.00 per 1M tokens
provider: 'Vercel AI Gateway',
},
{
model_id: 'claude-haiku-4-20250414',
input_cost_per_token: 0.0000008, // $0.80 per 1M tokens
output_cost_per_token: 0.000004, // $4.00 per 1M tokens
provider: 'Vercel AI Gateway',
},

// === Google models via Vercel AI Gateway ===
{
model_id: 'gemini-2.5-pro',
input_cost_per_token: 0.00000125, // $1.25 per 1M tokens
output_cost_per_token: 0.00001, // $10.00 per 1M tokens
provider: 'Vercel AI Gateway',
},
{
model_id: 'gemini-2.5-flash',
input_cost_per_token: 0.00000015, // $0.15 per 1M tokens
output_cost_per_token: 0.0000006, // $0.60 per 1M tokens
provider: 'Vercel AI Gateway',
},

// === DeepSeek models via Vercel AI Gateway ===
{
model_id: 'deepseek-r1',
input_cost_per_token: 0.00000055, // $0.55 per 1M tokens
output_cost_per_token: 0.00000219, // $2.19 per 1M tokens
provider: 'Vercel AI Gateway',
},
{
model_id: 'deepseek-chat',
input_cost_per_token: 0.00000014, // $0.14 per 1M tokens
output_cost_per_token: 0.00000028, // $0.28 per 1M tokens
provider: 'Vercel AI Gateway',
},

// === Groq models via Vercel AI Gateway ===
{
model_id: 'llama-3.1-8b-instant',
input_cost_per_token: 0.00000005, // $0.05 per 1M tokens
output_cost_per_token: 0.00000008, // $0.08 per 1M tokens
provider: 'Vercel AI Gateway',
},
{
model_id: 'llama-3.3-70b-versatile',
input_cost_per_token: 0.00000059, // $0.59 per 1M tokens
output_cost_per_token: 0.00000079, // $0.79 per 1M tokens
provider: 'Vercel AI Gateway',
},

// === Mistral models via Vercel AI Gateway ===
{
model_id: 'mistral-large-latest',
input_cost_per_token: 0.000002, // $2.00 per 1M tokens
output_cost_per_token: 0.000006, // $6.00 per 1M tokens
provider: 'Vercel AI Gateway',
},

// === Qwen models via Vercel AI Gateway ===
{
model_id: 'qwen/qwen3-32b',
input_cost_per_token: 0.00000029, // $0.29 per 1M tokens
output_cost_per_token: 0.00000059, // $0.59 per 1M tokens
provider: 'Vercel AI Gateway',
},
];
1 change: 1 addition & 0 deletions packages/sdk/ts/src/supported-models/index.ts
Original file line number Diff line number Diff line change
Expand Up @@ -8,3 +8,4 @@ export * from './image/openai';
export * from './responses/openai';
export * from './video/gemini';
export * from './video/vertex-ai';
export * from './chat/vercel-ai-gateway';