better title gen & conv ordering

This commit is contained in:
Thomas G. Lopes 2025-06-19 14:02:27 +01:00
parent 7cd104b27b
commit d0ebe4924d
3 changed files with 44 additions and 35 deletions

View file

@ -28,6 +28,7 @@ export const get = query({
.collect(); .collect();
return conversations.sort((a, b) => { return conversations.sort((a, b) => {
if (a.generating && b.generating) return 0;
const aTime = a.updated_at ?? 0; const aTime = a.updated_at ?? 0;
const bTime = b.updated_at ?? 0; const bTime = b.updated_at ?? 0;

View file

@ -78,7 +78,7 @@ async function generateConversationTitle({
const userKey = keyResult.value; const userKey = keyResult.value;
const actualKey = userKey || OPENROUTER_FREE_KEY; const actualKey = userKey || OPENROUTER_FREE_KEY;
log(`Title generation: Using ${userKey ? 'user' : 'free tier'} API key`, startTime); log(`Title generation: Using ${userKey ? 'user' : 'free tier'} API key`, startTime);
// Only generate title if conversation currently has default title // Only generate title if conversation currently has default title
@ -108,18 +108,23 @@ async function generateConversationTitle({
}); });
// Create a prompt for title generation using only the first user message // Create a prompt for title generation using only the first user message
const titlePrompt = `Based on this user request, generate a concise, specific title (max 4-5 words): const titlePrompt = `Based on this message:
"""${userMessage}"""
${userMessage} Generate a concise, specific title (max 4-5 words).
Generate only the title based on the message, nothing else. Don't name the title 'Generate Title' or anything stupid like that, otherwise its obvious we're generating a title with AI.
Generate only the title based on what the user is asking for, nothing else:`; Also, do not interact with the message directly or answer it. Just generate the title based on the message.
If its a simple hi, just name it "Greeting" or something like that.
`;
const titleResult = await ResultAsync.fromPromise( const titleResult = await ResultAsync.fromPromise(
openai.chat.completions.create({ openai.chat.completions.create({
model: 'mistralai/ministral-8b', model: 'mistralai/ministral-8b',
messages: [{ role: 'user', content: titlePrompt }], messages: [{ role: 'user', content: titlePrompt }],
max_tokens: 20, max_tokens: 20,
temperature: 0.3, temperature: 0.5,
}), }),
(e) => `Title generation API call failed: ${e}` (e) => `Title generation API call failed: ${e}`
); );
@ -149,6 +154,7 @@ Generate only the title based on what the user is asking for, nothing else:`;
}), }),
(e) => `Failed to update conversation title: ${e}` (e) => `Failed to update conversation title: ${e}`
); );
t;
if (updateResult.isErr()) { if (updateResult.isErr()) {
log(`Title generation: Failed to update title: ${updateResult.error}`, startTime); log(`Title generation: Failed to update title: ${updateResult.error}`, startTime);
@ -184,19 +190,20 @@ async function generateAIResponse({
return; return;
} }
const [modelResult, keyResult, messagesQueryResult, rulesResult, userSettingsResult] = await Promise.all([ const [modelResult, keyResult, messagesQueryResult, rulesResult, userSettingsResult] =
modelResultPromise, await Promise.all([
keyResultPromise, modelResultPromise,
ResultAsync.fromPromise( keyResultPromise,
client.query(api.messages.getAllFromConversation, { ResultAsync.fromPromise(
conversation_id: conversationId as Id<'conversations'>, client.query(api.messages.getAllFromConversation, {
session_token: sessionToken, conversation_id: conversationId as Id<'conversations'>,
}), session_token: sessionToken,
(e) => `Failed to get messages: ${e}` }),
), (e) => `Failed to get messages: ${e}`
rulesResultPromise, ),
userSettingsPromise, rulesResultPromise,
]); userSettingsPromise,
]);
if (modelResult.isErr()) { if (modelResult.isErr()) {
handleGenerationError({ handleGenerationError({
@ -296,7 +303,7 @@ async function generateAIResponse({
const userKey = keyResult.value; const userKey = keyResult.value;
const userSettings = userSettingsResult.value; const userSettings = userSettingsResult.value;
let actualKey: string; let actualKey: string;
if (userKey) { if (userKey) {
// User has their own API key // User has their own API key
actualKey = userKey; actualKey = userKey;
@ -304,14 +311,15 @@ async function generateAIResponse({
} else { } else {
// User doesn't have API key, check if using a free model // User doesn't have API key, check if using a free model
const isFreeModel = model.model_id.endsWith(':free'); const isFreeModel = model.model_id.endsWith(':free');
if (!isFreeModel) { if (!isFreeModel) {
// For non-free models, check the 10 message limit // For non-free models, check the 10 message limit
const freeMessagesUsed = userSettings?.free_messages_used || 0; const freeMessagesUsed = userSettings?.free_messages_used || 0;
if (freeMessagesUsed >= 10) { if (freeMessagesUsed >= 10) {
handleGenerationError({ handleGenerationError({
error: 'Free message limit reached (10/10). Please add your own OpenRouter API key to continue chatting, or use a free model ending in ":free".', error:
'Free message limit reached (10/10). Please add your own OpenRouter API key to continue chatting, or use a free model ending in ":free".',
conversationId, conversationId,
messageId: mid, messageId: mid,
sessionToken, sessionToken,
@ -319,7 +327,7 @@ async function generateAIResponse({
}); });
return; return;
} }
// Increment free message count before generating (only for non-free models) // Increment free message count before generating (only for non-free models)
const incrementResult = await ResultAsync.fromPromise( const incrementResult = await ResultAsync.fromPromise(
client.mutation(api.user_settings.incrementFreeMessageCount, { client.mutation(api.user_settings.incrementFreeMessageCount, {
@ -327,7 +335,7 @@ async function generateAIResponse({
}), }),
(e) => `Failed to increment free message count: ${e}` (e) => `Failed to increment free message count: ${e}`
); );
if (incrementResult.isErr()) { if (incrementResult.isErr()) {
handleGenerationError({ handleGenerationError({
error: `Failed to track free message usage: ${incrementResult.error}`, error: `Failed to track free message usage: ${incrementResult.error}`,
@ -338,12 +346,12 @@ async function generateAIResponse({
}); });
return; return;
} }
log(`Background: Using free tier (${freeMessagesUsed + 1}/10 messages)`, startTime); log(`Background: Using free tier (${freeMessagesUsed + 1}/10 messages)`, startTime);
} else { } else {
log(`Background: Using free model (${model.model_id}) - no message count`, startTime); log(`Background: Using free model (${model.model_id}) - no message count`, startTime);
} }
// Use environment OpenRouter key // Use environment OpenRouter key
actualKey = OPENROUTER_FREE_KEY; actualKey = OPENROUTER_FREE_KEY;
} }
@ -518,12 +526,15 @@ ${attachedRules.map((r) => `- ${r.name}: ${r.rule}`).join('\n')}`,
(e) => `Failed to render HTML: ${e}` (e) => `Failed to render HTML: ${e}`
); );
const generationStatsResult = await retryResult(() => getGenerationStats(generationId!, actualKey), { const generationStatsResult = await retryResult(
delay: 500, () => getGenerationStats(generationId!, actualKey),
retries: 2, {
startTime, delay: 500,
fnName: 'getGenerationStats', retries: 2,
}); startTime,
fnName: 'getGenerationStats',
}
);
if (generationStatsResult.isErr()) { if (generationStatsResult.isErr()) {
log(`Background: Failed to get generation stats: ${generationStatsResult.error}`, startTime); log(`Background: Failed to get generation stats: ${generationStatsResult.error}`, startTime);

View file

@ -148,9 +148,6 @@
<div class={['truncate font-medium', titleMatch && 'text-heading']}> <div class={['truncate font-medium', titleMatch && 'text-heading']}>
{conversation.title} {conversation.title}
</div> </div>
<div class="text-muted-foreground bg-muted rounded px-1.5 py-0.5 text-xs">
{Math.round(score * 100)}%
</div>
</div> </div>
<div class="text-muted-foreground text-xs"> <div class="text-muted-foreground text-xs">
{messages.length} matching message{messages.length !== 1 ? 's' : ''} {messages.length} matching message{messages.length !== 1 ? 's' : ''}