better title gen & conv ordering
This commit is contained in:
parent
7cd104b27b
commit
d0ebe4924d
3 changed files with 44 additions and 35 deletions
|
|
@ -28,6 +28,7 @@ export const get = query({
|
|||
.collect();
|
||||
|
||||
return conversations.sort((a, b) => {
|
||||
if (a.generating && b.generating) return 0;
|
||||
const aTime = a.updated_at ?? 0;
|
||||
const bTime = b.updated_at ?? 0;
|
||||
|
||||
|
|
|
|||
|
|
@ -108,18 +108,23 @@ async function generateConversationTitle({
|
|||
});
|
||||
|
||||
// Create a prompt for title generation using only the first user message
|
||||
const titlePrompt = `Based on this user request, generate a concise, specific title (max 4-5 words):
|
||||
const titlePrompt = `Based on this message:
|
||||
"""${userMessage}"""
|
||||
|
||||
${userMessage}
|
||||
Generate a concise, specific title (max 4-5 words).
|
||||
Generate only the title based on the message, nothing else. Don't name the title 'Generate Title' or anything stupid like that, otherwise its obvious we're generating a title with AI.
|
||||
|
||||
Generate only the title based on what the user is asking for, nothing else:`;
|
||||
Also, do not interact with the message directly or answer it. Just generate the title based on the message.
|
||||
|
||||
If its a simple hi, just name it "Greeting" or something like that.
|
||||
`;
|
||||
|
||||
const titleResult = await ResultAsync.fromPromise(
|
||||
openai.chat.completions.create({
|
||||
model: 'mistralai/ministral-8b',
|
||||
messages: [{ role: 'user', content: titlePrompt }],
|
||||
max_tokens: 20,
|
||||
temperature: 0.3,
|
||||
temperature: 0.5,
|
||||
}),
|
||||
(e) => `Title generation API call failed: ${e}`
|
||||
);
|
||||
|
|
@ -149,6 +154,7 @@ Generate only the title based on what the user is asking for, nothing else:`;
|
|||
}),
|
||||
(e) => `Failed to update conversation title: ${e}`
|
||||
);
|
||||
t;
|
||||
|
||||
if (updateResult.isErr()) {
|
||||
log(`Title generation: Failed to update title: ${updateResult.error}`, startTime);
|
||||
|
|
@ -184,7 +190,8 @@ async function generateAIResponse({
|
|||
return;
|
||||
}
|
||||
|
||||
const [modelResult, keyResult, messagesQueryResult, rulesResult, userSettingsResult] = await Promise.all([
|
||||
const [modelResult, keyResult, messagesQueryResult, rulesResult, userSettingsResult] =
|
||||
await Promise.all([
|
||||
modelResultPromise,
|
||||
keyResultPromise,
|
||||
ResultAsync.fromPromise(
|
||||
|
|
@ -311,7 +318,8 @@ async function generateAIResponse({
|
|||
|
||||
if (freeMessagesUsed >= 10) {
|
||||
handleGenerationError({
|
||||
error: 'Free message limit reached (10/10). Please add your own OpenRouter API key to continue chatting, or use a free model ending in ":free".',
|
||||
error:
|
||||
'Free message limit reached (10/10). Please add your own OpenRouter API key to continue chatting, or use a free model ending in ":free".',
|
||||
conversationId,
|
||||
messageId: mid,
|
||||
sessionToken,
|
||||
|
|
@ -518,12 +526,15 @@ ${attachedRules.map((r) => `- ${r.name}: ${r.rule}`).join('\n')}`,
|
|||
(e) => `Failed to render HTML: ${e}`
|
||||
);
|
||||
|
||||
const generationStatsResult = await retryResult(() => getGenerationStats(generationId!, actualKey), {
|
||||
const generationStatsResult = await retryResult(
|
||||
() => getGenerationStats(generationId!, actualKey),
|
||||
{
|
||||
delay: 500,
|
||||
retries: 2,
|
||||
startTime,
|
||||
fnName: 'getGenerationStats',
|
||||
});
|
||||
}
|
||||
);
|
||||
|
||||
if (generationStatsResult.isErr()) {
|
||||
log(`Background: Failed to get generation stats: ${generationStatsResult.error}`, startTime);
|
||||
|
|
|
|||
|
|
@ -148,9 +148,6 @@
|
|||
<div class={['truncate font-medium', titleMatch && 'text-heading']}>
|
||||
{conversation.title}
|
||||
</div>
|
||||
<div class="text-muted-foreground bg-muted rounded px-1.5 py-0.5 text-xs">
|
||||
{Math.round(score * 100)}%
|
||||
</div>
|
||||
</div>
|
||||
<div class="text-muted-foreground text-xs">
|
||||
{messages.length} matching message{messages.length !== 1 ? 's' : ''}
|
||||
|
|
|
|||
Loading…
Add table
Reference in a new issue