Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions packages/app/server/src/env.ts
Original file line number Diff line number Diff line change
Expand Up @@ -45,6 +45,7 @@ export const env = createEnv({
GROQ_API_KEY: z.string().optional(),
XAI_API_KEY: z.string().optional(),
OPENROUTER_API_KEY: z.string().optional(),
VERCEL_AI_GATEWAY_API_KEY: z.string().optional(),
TAVILY_API_KEY: z.string().optional(),
E2B_API_KEY: z.string().optional(),
GOOGLE_SERVICE_ACCOUNT_KEY_ENCODED: z.string().optional(),
Expand Down
7 changes: 7 additions & 0 deletions packages/app/server/src/providers/ProviderFactory.ts
Original file line number Diff line number Diff line change
Expand Up @@ -21,6 +21,7 @@ import { OpenAIImageProvider } from './OpenAIImageProvider';
import { OpenAIResponsesProvider } from './OpenAIResponsesProvider';
import { OpenRouterProvider } from './OpenRouterProvider';
import { ProviderType } from './ProviderType';
import { VercelAIGatewayProvider } from './VercelAIGatewayProvider';
import { XAIProvider } from './XAIProvider';
import {
VertexAIProvider,
Expand Down Expand Up @@ -58,6 +59,10 @@ const createChatModelToProviderMapping = (): Record<string, ProviderType> => {
case 'Xai':
mapping[modelConfig.model_id] = ProviderType.XAI;
break;
case 'Vercel':
case 'VercelAIGateway':
mapping[modelConfig.model_id] = ProviderType.VERCEL_AI_GATEWAY;
break;
// Add other providers as needed
default:
// Skip models with unsupported providers
Expand Down Expand Up @@ -192,6 +197,8 @@ export const getProvider = (
return new GroqProvider(stream, model);
case ProviderType.XAI:
return new XAIProvider(stream, model);
case ProviderType.VERCEL_AI_GATEWAY:
return new VercelAIGatewayProvider(stream, model);
default:
throw new Error(`Unknown provider type: ${type}`);
}
Expand Down
1 change: 1 addition & 0 deletions packages/app/server/src/providers/ProviderType.ts
Original file line number Diff line number Diff line change
Expand Up @@ -12,4 +12,5 @@ export enum ProviderType {
OPENAI_VIDEOS = 'OPENAI_VIDEOS',
GROQ = 'GROQ',
XAI = 'XAI',
VERCEL_AI_GATEWAY = 'VERCEL_AI_GATEWAY',
}
82 changes: 82 additions & 0 deletions packages/app/server/src/providers/VercelAIGatewayProvider.ts
Original file line number Diff line number Diff line change
@@ -0,0 +1,82 @@
import { LlmTransactionMetadata, Transaction } from '../types';
import { getCostPerToken } from '../services/AccountingService';
import { BaseProvider } from './BaseProvider';
import { ProviderType } from './ProviderType';
import { CompletionStateBody, parseSSEGPTFormat } from './GPTProvider';
import logger from '../logger';
import { env } from '../env';

export class VercelAIGatewayProvider extends BaseProvider {
private readonly VERCEL_AI_GATEWAY_BASE_URL =
'https://ai-gateway.vercel.sh/v1';

getType(): ProviderType {
return ProviderType.VERCEL_AI_GATEWAY;
}

getBaseUrl(): string {
return this.VERCEL_AI_GATEWAY_BASE_URL;
}

getApiKey(): string | undefined {
return env.VERCEL_AI_GATEWAY_API_KEY;
}

override supportsStream(): boolean {
return true;
}

async handleBody(data: string): Promise<Transaction> {
try {
let prompt_tokens = 0;
let completion_tokens = 0;
let total_tokens = 0;
let providerId = 'null';

if (this.getIsStream()) {
const chunks = parseSSEGPTFormat(data);

for (const chunk of chunks) {
if (chunk.usage !== null) {
prompt_tokens += chunk.usage.prompt_tokens;
completion_tokens += chunk.usage.completion_tokens;
total_tokens += chunk.usage.total_tokens;
}
providerId = chunk.id || 'null';
}
} else {
const parsed = JSON.parse(data) as CompletionStateBody;
prompt_tokens += parsed.usage.prompt_tokens;
completion_tokens += parsed.usage.completion_tokens;
total_tokens += parsed.usage.total_tokens;
providerId = parsed.id || 'null';
}

const cost = getCostPerToken(
this.getModel(),
prompt_tokens,
completion_tokens
);

const metadata: LlmTransactionMetadata = {
providerId: providerId,
provider: this.getType(),
model: this.getModel(),
inputTokens: prompt_tokens,
outputTokens: completion_tokens,
totalTokens: total_tokens,
};

const transaction: Transaction = {
rawTransactionCost: cost,
metadata: metadata,
status: 'success',
};

return transaction;
} catch (error) {
logger.error(`Error processing data: ${error}`);
throw error;
}
}
}
2 changes: 2 additions & 0 deletions packages/app/server/src/services/AccountingService.ts
Original file line number Diff line number Diff line change
Expand Up @@ -10,6 +10,7 @@ import {
SupportedImageModel,
SupportedVideoModel,
XAIModels,
VercelModels,
} from '@merit-systems/echo-typescript-sdk';

import { Decimal } from '@prisma/client/runtime/library';
Expand All @@ -30,6 +31,7 @@ export const ALL_SUPPORTED_MODELS: SupportedModel[] = [
...OpenRouterModels,
...GroqModels,
...XAIModels,
...VercelModels,
];

// Handle image models separately since they have different pricing structure
Expand Down
4 changes: 3 additions & 1 deletion packages/sdk/ts/package.json
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,8 @@
"update-models:gemini": "tsx scripts/update-gemini-models.ts",
"update-models:openrouter": "tsx scripts/update-openrouter-models.ts",
"update-models:groq": "tsx scripts/update-groq-models.ts",
"update-all-models": "pnpm run update-models:openai && pnpm run update-models:anthropic && pnpm run update-models:gemini && pnpm run update-models:openrouter && pnpm run update-models:groq",
"update-models:vercel": "tsx scripts/update-vercel-models.ts",
"update-all-models": "pnpm run update-models:openai && pnpm run update-models:anthropic && pnpm run update-models:gemini && pnpm run update-models:openrouter && pnpm run update-models:groq && pnpm run update-models:vercel",
"prepublishOnly": "pnpm run build"
},
"keywords": [
Expand Down Expand Up @@ -58,6 +59,7 @@
],
"dependencies": {
"@ai-sdk/anthropic": "2.0.17",
"@ai-sdk/gateway": "^1.0.12",
"@ai-sdk/google": "2.0.14",
"@ai-sdk/groq": "2.0.17",
"@ai-sdk/openai": "2.0.32",
Expand Down
86 changes: 86 additions & 0 deletions packages/sdk/ts/scripts/update-vercel-models.ts
Original file line number Diff line number Diff line change
@@ -0,0 +1,86 @@
#!/usr/bin/env node

// -> Fetch all available models from the Vercel AI Gateway
// Uses the public models endpoint to get model IDs and pricing
// Writes to src/supported-models/chat/vercel.ts

import { generateModelFile, type SupportedModel } from './update-models';
import { writeFileSync } from 'fs';
import { join } from 'path';

interface VercelGatewayModel {
id: string;
type: string;
pricing?: {
input: string;
output: string;
} | null;
}

interface VercelGatewayResponse {
object: string;
data: VercelGatewayModel[];
}

async function updateVercelModels(): Promise<void> {
try {
console.log('🔄 Starting Vercel AI Gateway model update process...\n');

// Fetch available models from the public gateway endpoint
console.log('📡 Fetching available models from Vercel AI Gateway...');
const response = await fetch('https://ai-gateway.vercel.sh/v1/models');

if (!response.ok) {
throw new Error(
`Failed to fetch models: ${response.status} ${response.statusText}`
);
}

const data: VercelGatewayResponse = await response.json();
console.log(`🔍 Found ${data.data.length} total models from gateway`);

// Filter for language models with pricing
const models: SupportedModel[] = data.data
.filter(
model =>
model.type === 'language' &&
model.pricing?.input != null &&
model.pricing?.output != null
)
.map(model => ({
model_id: model.id,
input_cost_per_token: Number(model.pricing!.input),
output_cost_per_token: Number(model.pricing!.output),
provider: 'Vercel',
}));

console.log(`📝 Filtered to ${models.length} language models with pricing`);

// Generate the file content
const fileContent = generateModelFile(models, 'Vercel', 'Vercel');

// Write the updated file
const outputPath = join(
process.cwd(),
'src/supported-models/chat/vercel.ts'
);
writeFileSync(outputPath, fileContent, 'utf8');

console.log(
`\n✅ Successfully updated vercel.ts with ${models.length} models`
);
console.log('📊 Models included:');
models.forEach(model => {
console.log(` - ${model.model_id}`);
});
} catch (error) {
console.error('❌ Error updating Vercel AI Gateway models:', error);
process.exit(1);
}
}

// Run the script
updateVercelModels().catch(error => {
console.error('❌ Unexpected error:', error);
process.exit(1);
});
2 changes: 2 additions & 0 deletions packages/sdk/ts/src/index.ts
Original file line number Diff line number Diff line change
Expand Up @@ -49,6 +49,8 @@ export { GroqModels } from './supported-models/chat/groq';
export type { GroqModel } from './supported-models/chat/groq';
export { XAIModels } from './supported-models/chat/xai';
export type { XAIModel } from './supported-models/chat/xai';
export { VercelModels } from './supported-models/chat/vercel';
export type { VercelModel } from './supported-models/chat/vercel';
export { OpenAIImageModels } from './supported-models/image/openai';
export type { OpenAIImageModel } from './supported-models/image/openai';
export { GeminiVideoModels } from './supported-models/video/gemini';
Expand Down
2 changes: 2 additions & 0 deletions packages/sdk/ts/src/providers/index.ts
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,7 @@ export * from './groq';
export * from './xai';
export * from './openai';
export * from './openrouter';
export * from './vercel';

export function echoFetch(
originalFetch: typeof fetch,
Expand Down Expand Up @@ -63,3 +64,4 @@ export { type GroqProvider } from '@ai-sdk/groq';
export { type OpenAIProvider } from '@ai-sdk/openai';
export { type OpenRouterProvider } from '@openrouter/ai-sdk-provider';
export { type XaiProvider } from '@ai-sdk/xai';
export { type GatewayProvider } from '@ai-sdk/gateway';
23 changes: 23 additions & 0 deletions packages/sdk/ts/src/providers/vercel.ts
Original file line number Diff line number Diff line change
@@ -0,0 +1,23 @@
import { createGateway as createGatewayBase } from '@ai-sdk/gateway';
import { ROUTER_BASE_URL } from 'config';
import { EchoConfig } from '../types';
import { validateAppId } from '../utils/validation';
import { echoFetch } from './index';

export function createEchoVercelAIGateway(
{ appId, baseRouterUrl = ROUTER_BASE_URL }: EchoConfig,
getTokenFn: (appId: string) => Promise<string | null>,
onInsufficientFunds?: () => void
) {
validateAppId(appId, 'createEchoVercelAIGateway');

return createGatewayBase({
baseURL: baseRouterUrl,
apiKey: 'placeholder_replaced_by_echoFetch',
fetch: echoFetch(
fetch,
async () => await getTokenFn(appId),
onInsufficientFunds
),
});
}
Loading