update free logic

This commit is contained in:
Thomas G. Lopes 2025-06-19 13:37:30 +01:00
parent ee2d8bcb6c
commit 30312e6f55
3 changed files with 50 additions and 32 deletions

View file

@ -8,3 +8,9 @@ BETTER_AUTH_SECRET=
GITHUB_CLIENT_ID= GITHUB_CLIENT_ID=
GITHUB_CLIENT_SECRET= GITHUB_CLIENT_SECRET=
GOOGLE_CLIENT_ID=
GOOGLE_CLIENT_SECRET=
OPENROUTER_FREE_KEY=

View file

@ -138,7 +138,11 @@ export const enable_initial = mutation({
return; return;
} }
const initialModels = ['google/gemini-2.5-flash', 'deepseek/deepseek-chat-v3-0324:free']; const initialModels = [
'google/gemini-2.5-flash',
'deepseek/deepseek-chat-v3-0324:free',
'microsoft/phi-4:free',
];
await Promise.all( await Promise.all(
initialModels.map((model) => initialModels.map((model) =>

View file

@ -301,42 +301,50 @@ async function generateAIResponse({
actualKey = userKey; actualKey = userKey;
log('Background: Using user API key', startTime); log('Background: Using user API key', startTime);
} else { } else {
// User doesn't have API key, check free tier limit // User doesn't have API key, check if using a free model
const freeMessagesUsed = userSettings?.free_messages_used || 0; const isFreeModel = model.model_id.endsWith(':free');
if (freeMessagesUsed >= 10) { if (!isFreeModel) {
handleGenerationError({ // For non-free models, check the 10 message limit
error: 'Free message limit reached (10/10). Please add your own OpenRouter API key to continue chatting.', const freeMessagesUsed = userSettings?.free_messages_used || 0;
conversationId,
messageId: mid,
sessionToken,
startTime,
});
return;
}
// Increment free message count before generating if (freeMessagesUsed >= 10) {
const incrementResult = await ResultAsync.fromPromise( handleGenerationError({
client.mutation(api.user_settings.incrementFreeMessageCount, { error: 'Free message limit reached (10/10). Please add your own OpenRouter API key to continue chatting, or use a free model ending in ":free".',
session_token: sessionToken, conversationId,
}), messageId: mid,
(e) => `Failed to increment free message count: ${e}` sessionToken,
); startTime,
});
return;
}
if (incrementResult.isErr()) { // Increment free message count before generating (only for non-free models)
handleGenerationError({ const incrementResult = await ResultAsync.fromPromise(
error: `Failed to track free message usage: ${incrementResult.error}`, client.mutation(api.user_settings.incrementFreeMessageCount, {
conversationId, session_token: sessionToken,
messageId: mid, }),
sessionToken, (e) => `Failed to increment free message count: ${e}`
startTime, );
});
return; if (incrementResult.isErr()) {
handleGenerationError({
error: `Failed to track free message usage: ${incrementResult.error}`,
conversationId,
messageId: mid,
sessionToken,
startTime,
});
return;
}
log(`Background: Using free tier (${freeMessagesUsed + 1}/10 messages)`, startTime);
} else {
log(`Background: Using free model (${model.model_id}) - no message count`, startTime);
} }
// Use environment OpenRouter key // Use environment OpenRouter key
actualKey = OPENROUTER_FREE_KEY; actualKey = OPENROUTER_FREE_KEY;
log(`Background: Using free tier (${freeMessagesUsed + 1}/10 messages)`, startTime);
} }
if (rulesResult.isErr()) { if (rulesResult.isErr()) {