better title gen & conv ordering

This commit is contained in:
Thomas G. Lopes 2025-06-19 14:02:27 +01:00
parent 7cd104b27b
commit d0ebe4924d
3 changed files with 44 additions and 35 deletions

View file

@ -28,6 +28,7 @@ export const get = query({
.collect();
return conversations.sort((a, b) => {
if (a.generating && b.generating) return 0;
const aTime = a.updated_at ?? 0;
const bTime = b.updated_at ?? 0;

View file

@ -108,18 +108,23 @@ async function generateConversationTitle({
});
// Create a prompt for title generation using only the first user message
const titlePrompt = `Based on this user request, generate a concise, specific title (max 4-5 words):
const titlePrompt = `Based on this message:
"""${userMessage}"""
${userMessage}
Generate a concise, specific title (max 4-5 words).
Generate only the title based on the message, nothing else. Don't name the title 'Generate Title' or anything stupid like that, otherwise its obvious we're generating a title with AI.
Generate only the title based on what the user is asking for, nothing else:`;
Also, do not interact with the message directly or answer it. Just generate the title based on the message.
If its a simple hi, just name it "Greeting" or something like that.
`;
const titleResult = await ResultAsync.fromPromise(
openai.chat.completions.create({
model: 'mistralai/ministral-8b',
messages: [{ role: 'user', content: titlePrompt }],
max_tokens: 20,
temperature: 0.3,
temperature: 0.5,
}),
(e) => `Title generation API call failed: ${e}`
);
@ -149,6 +154,7 @@ Generate only the title based on what the user is asking for, nothing else:`;
}),
(e) => `Failed to update conversation title: ${e}`
);
t;
if (updateResult.isErr()) {
log(`Title generation: Failed to update title: ${updateResult.error}`, startTime);
@ -184,19 +190,20 @@ async function generateAIResponse({
return;
}
const [modelResult, keyResult, messagesQueryResult, rulesResult, userSettingsResult] = await Promise.all([
modelResultPromise,
keyResultPromise,
ResultAsync.fromPromise(
client.query(api.messages.getAllFromConversation, {
conversation_id: conversationId as Id<'conversations'>,
session_token: sessionToken,
}),
(e) => `Failed to get messages: ${e}`
),
rulesResultPromise,
userSettingsPromise,
]);
const [modelResult, keyResult, messagesQueryResult, rulesResult, userSettingsResult] =
await Promise.all([
modelResultPromise,
keyResultPromise,
ResultAsync.fromPromise(
client.query(api.messages.getAllFromConversation, {
conversation_id: conversationId as Id<'conversations'>,
session_token: sessionToken,
}),
(e) => `Failed to get messages: ${e}`
),
rulesResultPromise,
userSettingsPromise,
]);
if (modelResult.isErr()) {
handleGenerationError({
@ -311,7 +318,8 @@ async function generateAIResponse({
if (freeMessagesUsed >= 10) {
handleGenerationError({
error: 'Free message limit reached (10/10). Please add your own OpenRouter API key to continue chatting, or use a free model ending in ":free".',
error:
'Free message limit reached (10/10). Please add your own OpenRouter API key to continue chatting, or use a free model ending in ":free".',
conversationId,
messageId: mid,
sessionToken,
@ -518,12 +526,15 @@ ${attachedRules.map((r) => `- ${r.name}: ${r.rule}`).join('\n')}`,
(e) => `Failed to render HTML: ${e}`
);
const generationStatsResult = await retryResult(() => getGenerationStats(generationId!, actualKey), {
delay: 500,
retries: 2,
startTime,
fnName: 'getGenerationStats',
});
const generationStatsResult = await retryResult(
() => getGenerationStats(generationId!, actualKey),
{
delay: 500,
retries: 2,
startTime,
fnName: 'getGenerationStats',
}
);
if (generationStatsResult.isErr()) {
log(`Background: Failed to get generation stats: ${generationStatsResult.error}`, startTime);

View file

@ -148,9 +148,6 @@
<div class={['truncate font-medium', titleMatch && 'text-heading']}>
{conversation.title}
</div>
<div class="text-muted-foreground bg-muted rounded px-1.5 py-0.5 text-xs">
{Math.round(score * 100)}%
</div>
</div>
<div class="text-muted-foreground text-xs">
{messages.length} matching message{messages.length !== 1 ? 's' : ''}