update free logic
This commit is contained in:
parent
ee2d8bcb6c
commit
30312e6f55
3 changed files with 50 additions and 32 deletions
|
|
@ -8,3 +8,9 @@ BETTER_AUTH_SECRET=
|
||||||
|
|
||||||
GITHUB_CLIENT_ID=
|
GITHUB_CLIENT_ID=
|
||||||
GITHUB_CLIENT_SECRET=
|
GITHUB_CLIENT_SECRET=
|
||||||
|
|
||||||
|
|
||||||
|
GOOGLE_CLIENT_ID=
|
||||||
|
GOOGLE_CLIENT_SECRET=
|
||||||
|
|
||||||
|
OPENROUTER_FREE_KEY=
|
||||||
|
|
|
||||||
|
|
@ -138,7 +138,11 @@ export const enable_initial = mutation({
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
const initialModels = ['google/gemini-2.5-flash', 'deepseek/deepseek-chat-v3-0324:free'];
|
const initialModels = [
|
||||||
|
'google/gemini-2.5-flash',
|
||||||
|
'deepseek/deepseek-chat-v3-0324:free',
|
||||||
|
'microsoft/phi-4:free',
|
||||||
|
];
|
||||||
|
|
||||||
await Promise.all(
|
await Promise.all(
|
||||||
initialModels.map((model) =>
|
initialModels.map((model) =>
|
||||||
|
|
|
||||||
|
|
@ -301,12 +301,16 @@ async function generateAIResponse({
|
||||||
actualKey = userKey;
|
actualKey = userKey;
|
||||||
log('Background: Using user API key', startTime);
|
log('Background: Using user API key', startTime);
|
||||||
} else {
|
} else {
|
||||||
// User doesn't have API key, check free tier limit
|
// User doesn't have API key, check if using a free model
|
||||||
|
const isFreeModel = model.model_id.endsWith(':free');
|
||||||
|
|
||||||
|
if (!isFreeModel) {
|
||||||
|
// For non-free models, check the 10 message limit
|
||||||
const freeMessagesUsed = userSettings?.free_messages_used || 0;
|
const freeMessagesUsed = userSettings?.free_messages_used || 0;
|
||||||
|
|
||||||
if (freeMessagesUsed >= 10) {
|
if (freeMessagesUsed >= 10) {
|
||||||
handleGenerationError({
|
handleGenerationError({
|
||||||
error: 'Free message limit reached (10/10). Please add your own OpenRouter API key to continue chatting.',
|
error: 'Free message limit reached (10/10). Please add your own OpenRouter API key to continue chatting, or use a free model ending in ":free".',
|
||||||
conversationId,
|
conversationId,
|
||||||
messageId: mid,
|
messageId: mid,
|
||||||
sessionToken,
|
sessionToken,
|
||||||
|
|
@ -315,7 +319,7 @@ async function generateAIResponse({
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
// Increment free message count before generating
|
// Increment free message count before generating (only for non-free models)
|
||||||
const incrementResult = await ResultAsync.fromPromise(
|
const incrementResult = await ResultAsync.fromPromise(
|
||||||
client.mutation(api.user_settings.incrementFreeMessageCount, {
|
client.mutation(api.user_settings.incrementFreeMessageCount, {
|
||||||
session_token: sessionToken,
|
session_token: sessionToken,
|
||||||
|
|
@ -334,9 +338,13 @@ async function generateAIResponse({
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
log(`Background: Using free tier (${freeMessagesUsed + 1}/10 messages)`, startTime);
|
||||||
|
} else {
|
||||||
|
log(`Background: Using free model (${model.model_id}) - no message count`, startTime);
|
||||||
|
}
|
||||||
|
|
||||||
// Use environment OpenRouter key
|
// Use environment OpenRouter key
|
||||||
actualKey = OPENROUTER_FREE_KEY;
|
actualKey = OPENROUTER_FREE_KEY;
|
||||||
log(`Background: Using free tier (${freeMessagesUsed + 1}/10 messages)`, startTime);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if (rulesResult.isErr()) {
|
if (rulesResult.isErr()) {
|
||||||
|
|
|
||||||
Loading…
Add table
Reference in a new issue