You cannot select more than 25 topics
Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.
135 lines
4.3 KiB
TypeScript
135 lines
4.3 KiB
TypeScript
import {
|
|
router,
|
|
publicProcedure,
|
|
createCallerFactory,
|
|
} from "../../trpc/server.js";
|
|
import { db, type Fact } from "../../database/lowdb.js";
|
|
import type { DraftMessage } from "../../types.js";
|
|
import { openrouter } from "./provider.js";
|
|
import { generateObject, generateText, jsonSchema } from "ai";
|
|
|
|
const factsFromNewMessagesSystemPrompt = ({
|
|
previousRunningSummary,
|
|
messagesSincePreviousRunningSummary,
|
|
}: {
|
|
previousRunningSummary: string;
|
|
messagesSincePreviousRunningSummary: Array<DraftMessage>;
|
|
}) => `You are an expert at extracting facts from conversations.
|
|
|
|
An AI assistant is in the middle of a conversation whose data is given below. The data consists of a summary of a conversation, and optionally some messages exchanged since that summary was produced. The user will provide you with *new* messages.
|
|
|
|
Your task is to extract *new* facts that can be gleaned from the *new* messages that the user sends.
|
|
|
|
* You should not extract any facts that are already in the summary.
|
|
* The user should be referred to as "the user" in the fact text.
|
|
* The user's pronouns should be either he or she, NOT "they" or "them", because this summary will be read by an AI assistant to give it context; and excessive use of "they" or "them" will make what they refer to unclear or ambiguous.
|
|
* The assistant should be referred to as "I" or "me", because these facts will be read by an AI assistant to give it context.
|
|
|
|
<running_summary>
|
|
${previousRunningSummary}
|
|
</running_summary>
|
|
|
|
${messagesSincePreviousRunningSummary.map(
|
|
(message) =>
|
|
`<${message.role}_message>${message.content}</${message.role}_message>`,
|
|
)}
|
|
`;
|
|
|
|
const factsFromNewMessagesUserPrompt = ({
|
|
newMessages,
|
|
}: {
|
|
newMessages: Array<DraftMessage>;
|
|
}) =>
|
|
`${newMessages.map(
|
|
(message) =>
|
|
`<${message.role}_message>${message.content}</${message.role}_message>`,
|
|
)}
|
|
|
|
Extract new facts from these messages.`;
|
|
|
|
export const facts = router({
|
|
fetchByConversationId: publicProcedure
|
|
.input((x) => x as { conversationId: string })
|
|
.query(async ({ input: { conversationId } }) => {
|
|
const conversationMessageIds = db.data.messages
|
|
.filter((m) => m.conversationId === conversationId)
|
|
.map((m) => m.id);
|
|
const rows = await db.data.facts.filter((f) =>
|
|
conversationMessageIds.includes(f.sourceMessageId),
|
|
);
|
|
return rows as Array<Fact>;
|
|
}),
|
|
deleteOne: publicProcedure
|
|
.input(
|
|
(x) =>
|
|
x as {
|
|
factId: string;
|
|
},
|
|
)
|
|
.mutation(async ({ input: { factId } }) => {
|
|
const deletedFactId = db.data.facts.findIndex(
|
|
(fact) => fact.id === factId,
|
|
);
|
|
if (deletedFactId === -1) throw new Error("Fact not found");
|
|
db.data.facts.splice(deletedFactId, 1);
|
|
db.write();
|
|
return { ok: true };
|
|
}),
|
|
extractFromNewMessages: publicProcedure
|
|
.input(
|
|
(x) =>
|
|
x as {
|
|
previousRunningSummary: string;
|
|
/** will *not* have facts extracted */
|
|
messagesSincePreviousRunningSummary: Array<DraftMessage>;
|
|
/** *will* have facts extracted */
|
|
newMessages: Array<DraftMessage>;
|
|
},
|
|
)
|
|
.query(
|
|
async ({
|
|
input: {
|
|
previousRunningSummary,
|
|
messagesSincePreviousRunningSummary,
|
|
newMessages,
|
|
},
|
|
}) => {
|
|
const factsFromUserMessageResponse = await generateObject<{
|
|
facts: Array<string>;
|
|
}>({
|
|
model: openrouter("mistralai/mistral-nemo"),
|
|
messages: [
|
|
{
|
|
role: "system" as const,
|
|
content: factsFromNewMessagesSystemPrompt({
|
|
previousRunningSummary,
|
|
messagesSincePreviousRunningSummary,
|
|
}),
|
|
},
|
|
{
|
|
role: "user" as const,
|
|
content: factsFromNewMessagesUserPrompt({
|
|
newMessages,
|
|
}),
|
|
},
|
|
],
|
|
schema: jsonSchema({
|
|
type: "object",
|
|
properties: {
|
|
facts: {
|
|
type: "array",
|
|
items: {
|
|
type: "string",
|
|
},
|
|
},
|
|
},
|
|
}),
|
|
temperature: 0.4,
|
|
});
|
|
return factsFromUserMessageResponse;
|
|
},
|
|
),
|
|
});
|
|
|
|
export const createCaller = createCallerFactory(facts);
|