more parallelization

This commit is contained in:
Aidan Bleser 2025-06-16 15:23:54 -05:00
parent 743f300e04
commit 8926a0b608

View file

@ -1,6 +1,6 @@
import { PUBLIC_CONVEX_URL } from '$env/static/public'; import { PUBLIC_CONVEX_URL } from '$env/static/public';
import { api } from '$lib/backend/convex/_generated/api'; import { api } from '$lib/backend/convex/_generated/api';
import type { Id } from '$lib/backend/convex/_generated/dataModel'; import type { Doc, Id } from '$lib/backend/convex/_generated/dataModel';
import type { SessionObj } from '$lib/backend/convex/betterAuth'; import type { SessionObj } from '$lib/backend/convex/betterAuth';
import { Provider } from '$lib/types'; import { Provider } from '$lib/types';
import { error, json, type RequestHandler } from '@sveltejs/kit'; import { error, json, type RequestHandler } from '@sveltejs/kit';
@ -41,30 +41,24 @@ function log(message: string, startTime: number): void {
const client = new ConvexHttpClient(PUBLIC_CONVEX_URL); const client = new ConvexHttpClient(PUBLIC_CONVEX_URL);
async function generateAIResponse( async function generateAIResponse({
conversationId: string, conversationId,
session: SessionObj, session,
modelId: string, startTime,
startTime: number modelResultPromise,
) { keyResultPromise,
}: {
conversationId: string;
session: SessionObj;
startTime: number;
keyResultPromise: ResultAsync<string | null, string>;
modelResultPromise: ResultAsync<Doc<'user_enabled_models'> | null, string>;
}) {
log('Starting AI response generation in background', startTime); log('Starting AI response generation in background', startTime);
const [modelResult, keyResult, messagesQueryResult] = await Promise.all([ const [modelResult, keyResult, messagesQueryResult] = await Promise.all([
ResultAsync.fromPromise( modelResultPromise,
client.query(api.user_enabled_models.get, { keyResultPromise,
provider: Provider.OpenRouter,
model_id: modelId,
session_token: session.token,
}),
(e) => `Failed to get model: ${e}`
),
ResultAsync.fromPromise(
client.query(api.user_keys.get, {
provider: Provider.OpenRouter,
session_token: session.token,
}),
(e) => `Failed to get API key: ${e}`
),
ResultAsync.fromPromise( ResultAsync.fromPromise(
client.query(api.messages.getAllFromConversation, { client.query(api.messages.getAllFromConversation, {
conversation_id: conversationId as Id<'conversations'>, conversation_id: conversationId as Id<'conversations'>,
@ -231,6 +225,23 @@ export const POST: RequestHandler = async ({ request }) => {
return error(401, 'Unauthorized'); return error(401, 'Unauthorized');
} }
const modelResultPromise = ResultAsync.fromPromise(
client.query(api.user_enabled_models.get, {
provider: Provider.OpenRouter,
model_id: args.model_id,
session_token: session.token,
}),
(e) => `Failed to get model: ${e}`
);
const keyResultPromise = ResultAsync.fromPromise(
client.query(api.user_keys.get, {
provider: Provider.OpenRouter,
session_token: session.token,
}),
(e) => `Failed to get API key: ${e}`
);
log('Session authenticated successfully', startTime); log('Session authenticated successfully', startTime);
let conversationId = args.conversation_id; let conversationId = args.conversation_id;
@ -275,7 +286,13 @@ export const POST: RequestHandler = async ({ request }) => {
// Start AI response generation in background - don't await // Start AI response generation in background - don't await
waitUntil( waitUntil(
generateAIResponse(conversationId, session, args.model_id, startTime).catch((error) => { generateAIResponse({
conversationId,
session,
startTime,
modelResultPromise,
keyResultPromise,
}).catch((error) => {
log(`Background AI response generation error: ${error}`, startTime); log(`Background AI response generation error: ${error}`, startTime);
}) })
); );
@ -283,3 +300,16 @@ export const POST: RequestHandler = async ({ request }) => {
log('Response sent, AI generation started in background', startTime); log('Response sent, AI generation started in background', startTime);
return response({ ok: true, conversation_id: conversationId }); return response({ ok: true, conversation_id: conversationId });
}; };
// function parseMessageForRules(message: string, rules: Doc<'user_rules'>[]): Doc<'user_rules'>[] {
// const matchedRules: Doc<'user_rules'>[] = [];
// for (const rule of rules) {
// const match = message.indexOf(`@${rule.name}`);
// if (match === -1) continue;
// matchedRules.push(rule);
// }
// return matchedRules;
// }