Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[WIP] chore: bump vercel ai #512

Draft
wants to merge 1 commit into
base: main
Choose a base branch
from
Draft
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
16 changes: 8 additions & 8 deletions templates/components/llamaindex/typescript/streaming/events.ts
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
import { StreamData } from "ai";
import { DataStreamWriter } from "ai";
import {
CallbackManager,
LLamaCloudFileService,
Expand All @@ -15,7 +15,7 @@ import { downloadFile } from "./file";
const LLAMA_CLOUD_DOWNLOAD_FOLDER = "output/llamacloud";

export function appendSourceData(
data: StreamData,
data: DataStreamWriter,
sourceNodes?: NodeWithScore<Metadata>[],
) {
if (!sourceNodes?.length) return;
Expand All @@ -27,7 +27,7 @@ export function appendSourceData(
url: getNodeUrl(node.node.metadata),
text: node.node.getContent(MetadataMode.NONE),
}));
data.appendMessageAnnotation({
data.writeMessageAnnotation({
type: "sources",
data: {
nodes,
Expand All @@ -38,9 +38,9 @@ export function appendSourceData(
}
}

export function appendEventData(data: StreamData, title?: string) {
export function appendEventData(data: DataStreamWriter, title?: string) {
if (!title) return;
data.appendMessageAnnotation({
data.writeMessageAnnotation({
type: "events",
data: {
title,
Expand All @@ -49,11 +49,11 @@ export function appendEventData(data: StreamData, title?: string) {
}

export function appendToolData(
data: StreamData,
data: DataStreamWriter,
toolCall: ToolCall,
toolOutput: ToolOutput,
) {
data.appendMessageAnnotation({
data.writeMessageAnnotation({
type: "tools",
data: {
toolCall: {
Expand All @@ -69,7 +69,7 @@ export function appendToolData(
});
}

export function createCallbackManager(stream: StreamData) {
export function createCallbackManager(stream: DataStreamWriter) {
const callbackManager = new CallbackManager();

callbackManager.on("retrieve-end", (data) => {
Expand Down
64 changes: 33 additions & 31 deletions templates/types/streaming/nextjs/app/api/chat/route.ts
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
import { initObservability } from "@/app/observability";
import { LlamaIndexAdapter, Message, StreamData } from "ai";
import { createDataStreamResponse, LlamaIndexAdapter, Message } from "ai";
import { ChatMessage, Settings } from "llamaindex";
import { NextRequest, NextResponse } from "next/server";
import { createChatEngine } from "./engine/chat";
Expand All @@ -20,7 +20,6 @@ export const dynamic = "force-dynamic";

export async function POST(request: NextRequest) {
// Init Vercel AI StreamData and timeout
const vercelStreamData = new StreamData();

try {
const body = await request.json();
Expand All @@ -42,39 +41,42 @@ export async function POST(request: NextRequest) {

// retrieve user message content from Vercel/AI format
const userMessageContent = retrieveMessageContent(messages);
const chatHistory = messages.slice(0, -1) as ChatMessage[];

// Setup callbacks
const callbackManager = createCallbackManager(vercelStreamData);
const chatHistory: ChatMessage[] = messages.slice(0, -1) as ChatMessage[];
return createDataStreamResponse({
async execute(vercelStreamData) {
// Setup callbacks
const callbackManager = createCallbackManager(vercelStreamData);

// Calling LlamaIndex's ChatEngine to get a streamed response
const response = await Settings.withCallbackManager(callbackManager, () => {
return chatEngine.chat({
message: userMessageContent,
chatHistory,
stream: true,
});
});

const onCompletion = (content: string) => {
chatHistory.push({ role: "assistant", content: content });
generateNextQuestions(chatHistory)
.then((questions: string[]) => {
if (questions.length > 0) {
vercelStreamData.appendMessageAnnotation({
type: "suggested_questions",
data: questions,
// Calling LlamaIndex's ChatEngine to get a streamed response
const response = await Settings.withCallbackManager(
callbackManager,
() => {
return chatEngine.chat({
message: userMessageContent,
chatHistory,
stream: true,
});
}
})
.finally(() => {
vercelStreamData.close();
});
};
},
);

return LlamaIndexAdapter.toDataStreamResponse(response, {
data: vercelStreamData,
callbacks: { onCompletion },
const onFinal = (content: string) => {
chatHistory.push({ role: "assistant", content: content });
generateNextQuestions(chatHistory).then((questions: string[]) => {
if (questions.length > 0) {
vercelStreamData.writeMessageAnnotation({
type: "suggested_questions",
data: questions,
});
}
});
};

LlamaIndexAdapter.mergeIntoDataStream(response, {
dataStream: vercelStreamData,
callbacks: { onFinal },
});
Comment on lines +75 to +78
Copy link
Collaborator Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Look like this merge function doesn't work as expectation. It still makes the markdown content fully generated

},
});
} catch (error) {
console.error("[LlamaIndex]", error);
Expand Down
2 changes: 1 addition & 1 deletion templates/types/streaming/nextjs/package.json
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,7 @@
"@radix-ui/react-slot": "^1.0.2",
"@radix-ui/react-tabs": "^1.1.0",
"@llamaindex/chat-ui": "^0.2.0",
"ai": "^4.0.3",
"ai": "^4.1.16",
"ajv": "^8.12.0",
"class-variance-authority": "^0.7.1",
"clsx": "^2.1.1",
Expand Down
Loading