From 30312e6f55645f89f6e4b89a5bc0c1f4a7a0a744 Mon Sep 17 00:00:00 2001 From: "Thomas G. Lopes" <26071571+TGlide@users.noreply.github.com> Date: Thu, 19 Jun 2025 13:37:30 +0100 Subject: [PATCH] update free logic --- .env.example | 6 ++ src/lib/backend/convex/user_enabled_models.ts | 6 +- src/routes/api/generate-message/+server.ts | 70 +++++++++++-------- 3 files changed, 50 insertions(+), 32 deletions(-) diff --git a/.env.example b/.env.example index 9dbfe6b..1d75b31 100644 --- a/.env.example +++ b/.env.example @@ -8,3 +8,9 @@ BETTER_AUTH_SECRET= GITHUB_CLIENT_ID= GITHUB_CLIENT_SECRET= + + +GOOGLE_CLIENT_ID= +GOOGLE_CLIENT_SECRET= + +OPENROUTER_FREE_KEY= diff --git a/src/lib/backend/convex/user_enabled_models.ts b/src/lib/backend/convex/user_enabled_models.ts index 60d6c72..346a00b 100644 --- a/src/lib/backend/convex/user_enabled_models.ts +++ b/src/lib/backend/convex/user_enabled_models.ts @@ -138,7 +138,11 @@ export const enable_initial = mutation({ return; } - const initialModels = ['google/gemini-2.5-flash', 'deepseek/deepseek-chat-v3-0324:free']; + const initialModels = [ + 'google/gemini-2.5-flash', + 'deepseek/deepseek-chat-v3-0324:free', + 'microsoft/phi-4:free', + ]; await Promise.all( initialModels.map((model) => diff --git a/src/routes/api/generate-message/+server.ts b/src/routes/api/generate-message/+server.ts index 660f760..ed08ca8 100644 --- a/src/routes/api/generate-message/+server.ts +++ b/src/routes/api/generate-message/+server.ts @@ -301,42 +301,50 @@ async function generateAIResponse({ actualKey = userKey; log('Background: Using user API key', startTime); } else { - // User doesn't have API key, check free tier limit - const freeMessagesUsed = userSettings?.free_messages_used || 0; + // User doesn't have API key, check if using a free model + const isFreeModel = model.model_id.endsWith(':free'); - if (freeMessagesUsed >= 10) { - handleGenerationError({ - error: 'Free message limit reached (10/10). Please add your own OpenRouter API key to continue chatting.', - conversationId, - messageId: mid, - sessionToken, - startTime, - }); - return; - } - - // Increment free message count before generating - const incrementResult = await ResultAsync.fromPromise( - client.mutation(api.user_settings.incrementFreeMessageCount, { - session_token: sessionToken, - }), - (e) => `Failed to increment free message count: ${e}` - ); - - if (incrementResult.isErr()) { - handleGenerationError({ - error: `Failed to track free message usage: ${incrementResult.error}`, - conversationId, - messageId: mid, - sessionToken, - startTime, - }); - return; + if (!isFreeModel) { + // For non-free models, check the 10 message limit + const freeMessagesUsed = userSettings?.free_messages_used || 0; + + if (freeMessagesUsed >= 10) { + handleGenerationError({ + error: 'Free message limit reached (10/10). Please add your own OpenRouter API key to continue chatting, or use a free model ending in ":free".', + conversationId, + messageId: mid, + sessionToken, + startTime, + }); + return; + } + + // Increment free message count before generating (only for non-free models) + const incrementResult = await ResultAsync.fromPromise( + client.mutation(api.user_settings.incrementFreeMessageCount, { + session_token: sessionToken, + }), + (e) => `Failed to increment free message count: ${e}` + ); + + if (incrementResult.isErr()) { + handleGenerationError({ + error: `Failed to track free message usage: ${incrementResult.error}`, + conversationId, + messageId: mid, + sessionToken, + startTime, + }); + return; + } + + log(`Background: Using free tier (${freeMessagesUsed + 1}/10 messages)`, startTime); + } else { + log(`Background: Using free model (${model.model_id}) - no message count`, startTime); } // Use environment OpenRouter key actualKey = OPENROUTER_FREE_KEY; - log(`Background: Using free tier (${freeMessagesUsed + 1}/10 messages)`, startTime); } if (rulesResult.isErr()) {