waitUntil and parallelize (#10)

This commit is contained in:
Aidan Bleser 2025-06-16 15:04:43 -05:00 committed by GitHub
parent 923935f310
commit 743f300e04
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
4 changed files with 46 additions and 27 deletions

View file

@ -28,6 +28,7 @@
"@tailwindcss/vite": "^4.0.0",
"@testing-library/jest-dom": "^6.6.3",
"@testing-library/svelte": "^5.2.4",
"@vercel/functions": "^2.2.0",
"clsx": "^2.1.1",
"concurrently": "^9.1.2",
"convex": "^1.24.8",

14
pnpm-lock.yaml generated
View file

@ -63,6 +63,9 @@ importers:
'@testing-library/svelte':
specifier: ^5.2.4
version: 5.2.8(svelte@5.34.1)(vite@6.3.5(@types/node@24.0.1)(jiti@2.4.2)(lightningcss@1.30.1))(vitest@3.2.3(@types/node@24.0.1)(jiti@2.4.2)(jsdom@26.1.0)(lightningcss@1.30.1))
'@vercel/functions':
specifier: ^2.2.0
version: 2.2.0
clsx:
specifier: ^2.1.1
version: 2.1.1
@ -1026,6 +1029,15 @@ packages:
resolution: {integrity: sha512-qHV7pW7E85A0x6qyrFn+O+q1k1p3tQCsqIZ1KZ5ESLXY57aTvUd3/a4rdPTeXisvhXn2VQG0VSKUqs8KHF2zcA==}
engines: {node: ^18.18.0 || ^20.9.0 || >=21.1.0}
'@vercel/functions@2.2.0':
resolution: {integrity: sha512-x1Zrc2jOclTSB9+Ic/XNMDinO0SG4ZS5YeV2Xz1m/tuJOM7QtPVU3Epw2czBao0dukefmC8HCNpyUL8ZchJ/Tg==}
engines: {node: '>= 18'}
peerDependencies:
'@aws-sdk/credential-provider-web-identity': '*'
peerDependenciesMeta:
'@aws-sdk/credential-provider-web-identity':
optional: true
'@vitest/expect@3.2.3':
resolution: {integrity: sha512-W2RH2TPWVHA1o7UmaFKISPvdicFJH+mjykctJFoAkUw+SPTJTGjUNdKscFBrqM7IPnCVu6zihtKYa7TkZS1dkQ==}
@ -3224,6 +3236,8 @@ snapshots:
'@typescript-eslint/types': 8.34.0
eslint-visitor-keys: 4.2.1
'@vercel/functions@2.2.0': {}
'@vitest/expect@3.2.3':
dependencies:
'@types/chai': 5.2.2

View file

@ -39,7 +39,6 @@
async function toggleEnabled(v: boolean) {
enabled = v; // Optimistic!
console.log('hi');
if (!session.current?.user.id) return;
const res = await ResultAsync.fromPromise(

View file

@ -7,6 +7,7 @@ import { error, json, type RequestHandler } from '@sveltejs/kit';
import { ConvexHttpClient } from 'convex/browser';
import { ResultAsync } from 'neverthrow';
import OpenAI from 'openai';
import { waitUntil } from '@vercel/functions';
import { z } from 'zod/v4';
@ -48,14 +49,30 @@ async function generateAIResponse(
) {
log('Starting AI response generation in background', startTime);
const modelResult = await ResultAsync.fromPromise(
client.query(api.user_enabled_models.get, {
provider: Provider.OpenRouter,
model_id: modelId,
session_token: session.token,
}),
(e) => `Failed to get model: ${e}`
);
const [modelResult, keyResult, messagesQueryResult] = await Promise.all([
ResultAsync.fromPromise(
client.query(api.user_enabled_models.get, {
provider: Provider.OpenRouter,
model_id: modelId,
session_token: session.token,
}),
(e) => `Failed to get model: ${e}`
),
ResultAsync.fromPromise(
client.query(api.user_keys.get, {
provider: Provider.OpenRouter,
session_token: session.token,
}),
(e) => `Failed to get API key: ${e}`
),
ResultAsync.fromPromise(
client.query(api.messages.getAllFromConversation, {
conversation_id: conversationId as Id<'conversations'>,
session_token: session.token,
}),
(e) => `Failed to get messages: ${e}`
),
]);
if (modelResult.isErr()) {
log(`Background model query failed: ${modelResult.error}`, startTime);
@ -70,13 +87,7 @@ async function generateAIResponse(
log('Background: Model found and enabled', startTime);
const messagesQuery = await ResultAsync.fromPromise(
client.query(api.messages.getAllFromConversation, {
conversation_id: conversationId as Id<'conversations'>,
session_token: session.token,
}),
(e) => `Failed to get messages: ${e}`
);
const messagesQuery = await messagesQueryResult;
if (messagesQuery.isErr()) {
log(`Background messages query failed: ${messagesQuery.error}`, startTime);
@ -86,14 +97,6 @@ async function generateAIResponse(
const messages = messagesQuery.value;
log(`Background: Retrieved ${messages.length} messages from conversation`, startTime);
const keyResult = await ResultAsync.fromPromise(
client.query(api.user_keys.get, {
provider: Provider.OpenRouter,
session_token: session.token,
}),
(e) => `Failed to get API key: ${e}`
);
if (keyResult.isErr()) {
log(`Background API key query failed: ${keyResult.error}`, startTime);
return;
@ -271,9 +274,11 @@ export const POST: RequestHandler = async ({ request }) => {
}
// Start AI response generation in background - don't await
generateAIResponse(conversationId, session, args.model_id, startTime).catch((error) => {
log(`Background AI response generation error: ${error}`, startTime);
});
waitUntil(
generateAIResponse(conversationId, session, args.model_id, startTime).catch((error) => {
log(`Background AI response generation error: ${error}`, startTime);
})
);
log('Response sent, AI generation started in background', startTime);
return response({ ok: true, conversation_id: conversationId });