Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
9 changes: 5 additions & 4 deletions src/agent/pizza-orderer/index.ts
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,7 @@ import { GenerativeUIAnnotation } from "../types";
import { z } from "zod";
import { AIMessage, ToolMessage } from "@langchain/langgraph-sdk";
import { v4 as uuidv4 } from "uuid";
import { ChatOpenAI } from "@langchain/openai";

const PizzaOrdererAnnotation = Annotation.Root({
messages: GenerativeUIAnnotation.spec.messages,
Expand All @@ -30,8 +31,8 @@ const workflow = new StateGraph(PizzaOrdererAnnotation)
),
})
.describe("The schema for finding a pizza shop for the user");
const model = new ChatAnthropic({
model: "claude-3-5-sonnet-latest",
const model = new ChatOpenAI({
model: "gpt-4o",
temperature: 0,
}).withStructuredOutput(findShopSchema, {
name: "find_pizza_shop",
Expand Down Expand Up @@ -76,8 +77,8 @@ const workflow = new StateGraph(PizzaOrdererAnnotation)
order: z.string().describe("The full pizza order for the user"),
})
.describe("The schema for ordering a pizza for the user");
const model = new ChatAnthropic({
model: "claude-3-5-sonnet-latest",
const model = new ChatOpenAI({
model: "gpt-4o",
temperature: 0,
}).withStructuredOutput(placeOrderSchema, {
name: "place_pizza_order",
Expand Down
28 changes: 27 additions & 1 deletion src/agent/supervisor/nodes/general-input.ts
Original file line number Diff line number Diff line change
Expand Up @@ -2,13 +2,39 @@ import { SupervisorState, SupervisorUpdate } from "../types";
import { ALL_TOOL_DESCRIPTIONS } from "../index";
import { ChatOpenAI } from "@langchain/openai";

const USER_PROFILE = {
"id": "user123",
"name": "Alice",
"profile": {
"personalHistory": "Alice was born in Munich and moved to Berlin for her studies. She holds a Master’s degree in Computer Science from TU Berlin. From a young age, she was fascinated by how machines learn and evolve, which led her into the field of artificial intelligence. She started her career as a backend developer and gradually transitioned into AI-focused roles. Outside of work, Alice enjoys hiking in the Alps, painting, and participating in tech meetups.",
"education": "M.Sc. in Computer Science, Technische Universität Berlin",
"expertise": "Machine learning, deep learning, distributed systems, transformer models, and open-source AI contributions.",
"projectA": {
"title": "Multilingual Generative Language Model",
"description": "Led the development of a multilingual transformer-based model capable of generating coherent text in over 15 languages. The project aimed to enhance NLP capabilities in underrepresented languages.",
"technologies": ["PyTorch", "Transformers", "HuggingFace", "TensorBoard"]
},
"projectB": {
"title": "Federated Learning System",
"description": "Built a federated learning pipeline enabling training on decentralized datasets across devices to maintain data privacy while improving model generalizability.",
"technologies": ["Python", "gRPC", "TensorFlow Federated", "Docker", "Kubernetes"]
},
"ongoingProjects": [
"Experimenting with sparse attention mechanisms for large language models.",
"Mentoring junior developers in the open-source AI community.",
"Collaborating with a university lab on adversarial robustness in deep models."
]
}
}

export async function generalInput(
state: SupervisorState,
): Promise<SupervisorUpdate> {
const GENERAL_INPUT_SYSTEM_PROMPT = `You are an AI assistant.
const GENERAL_INPUT_SYSTEM_PROMPT = `You are a friendly and cheerful assistant, here to help out to your friend .Here is the background of your friend from ${USER_PROFILE} give some personal touch in language to show friendliness.
If the user asks what you can do, describe these tools.
${ALL_TOOL_DESCRIPTIONS}


If the last message is a tool result, describe what the action was, congratulate the user, or send a friendly followup in response to the tool action. Ensure this is a clear and concise message.

Otherwise, just answer as normal.`;
Expand Down
5 changes: 3 additions & 2 deletions src/agent/supervisor/nodes/router.ts
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,7 @@ import { ChatGoogleGenerativeAI } from "@langchain/google-genai";
import { ALL_TOOL_DESCRIPTIONS } from "../index";
import { SupervisorState, SupervisorUpdate } from "../types";
import { formatMessages } from "@/agent/utils/format-messages";
import { ChatOpenAI } from "@langchain/openai";

export async function router(
state: SupervisorState,
Expand All @@ -29,8 +30,8 @@ ${ALL_TOOL_DESCRIPTIONS}
schema: routerSchema,
};

const llm = new ChatGoogleGenerativeAI({
model: "gemini-2.0-flash",
const llm = new ChatOpenAI({
model: "gpt-4o",
temperature: 0,
})
.bindTools([routerTool], { tool_choice: "router" })
Expand Down
81 changes: 70 additions & 11 deletions src/agent/trip-planner/nodes/extraction.ts
Original file line number Diff line number Diff line change
Expand Up @@ -76,20 +76,79 @@ export async function extraction(
},
]);

const prompt = `You're an AI assistant for planning trips. The user has requested information about a trip they want to go on.
Before you can help them, you need to extract the following information from their request:
- location - The location to plan the trip for. Can be a city, state, or country.
- startDate - The start date of the trip. Should be in YYYY-MM-DD format. Optional
- endDate - The end date of the trip. Should be in YYYY-MM-DD format. Optional
- numberOfGuests - The number of guests for the trip. Optional
const USER_PROFILE = {
"id": "user123",
"name": "Alice",
"profile": {
"personalHistory": "Alice was born in Munich and moved to Berlin for her studies. She holds a Master’s degree in Computer Science from TU Berlin. From a young age, she was fascinated by how machines learn and evolve, which led her into the field of artificial intelligence. She started her career as a backend developer and gradually transitioned into AI-focused roles. Outside of work, Alice enjoys hiking in the Alps, painting, and participating in tech meetups.",
"education": "M.Sc. in Computer Science, Technische Universität Berlin",
"expertise": "Machine learning, deep learning, distributed systems, transformer models, and open-source AI contributions.",
"projectA": {
"title": "Multilingual Generative Language Model",
"description": "Led the development of a multilingual transformer-based model capable of generating coherent text in over 15 languages. The project aimed to enhance NLP capabilities in underrepresented languages.",
"technologies": ["PyTorch", "Transformers", "HuggingFace", "TensorBoard"]
},
"projectB": {
"title": "Federated Learning System",
"description": "Built a federated learning pipeline enabling training on decentralized datasets across devices to maintain data privacy while improving model generalizability.",
"technologies": ["Python", "gRPC", "TensorFlow Federated", "Docker", "Kubernetes"]
},
"currentLocation":"Delhi",
"ongoingProjects": [
"Experimenting with sparse attention mechanisms for large language models.",
"Mentoring junior developers in the open-source AI community.",
"Collaborating with a university lab on adversarial robustness in deep models."
]
}
}

You are provided with the ENTIRE conversation history between you, and the user. Use these messages to extract the necessary information.
const prompt = `You’re an enthusiastic travel buddy named "Sunny" who chats with the user like a friend. You’ve been given the user’s profile (${USER_PROFILE}) so you can sprinkle in personal touches and even guess why they’re planning this trip.

Do NOT guess, or make up any information. If the user did NOT specify a location, please respond with a request for them to specify the location.
You should ONLY send a clarification message if the user did not provide the location. You do NOT need any of the other fields, so if they're missing, proceed without them.
It should be a single sentence, along the lines of "Please specify the location for the trip you want to go on".
Conversation Starter:

Extract only what is specified by the user. It is okay to leave fields blank if the user did not specify them.
Greet warmly and mention something from their profile:

“Hey there! I remember you love street photography—ready to plan a trip full of colorful corners and candid moments?”

Spark curiosity about their dream destination:

“If you could teleport anywhere this instant, where would you land? A bustling city, a quiet beach, or maybe a mountain retreat?”

Uncover their why:

“I’m guessing you’re after this getaway to recharge after that big project at work—or is it to celebrate something special?”

Explore travel style and companions:

“Are you flying solo, road-tripping with friends, or bringing the whole family along?”

“Do you lean more toward adventurous hikes, lazy beach days, foodie tours, or cultural explorations?”

Nail down the basics (location, dates, guests):

“Which place are we looking at?” (city/state/country)

“Do you have dates in mind, or an ideal season?” (ask for YYYY‑MM‑DD if they know)

“How many people should I plan for?”

Bucket-list moments:

“Any must-do experiences on your list? Hot-air balloon ride, local cooking class, or maybe dancing under the northern lights?”

Extraction Rules:

location: The trip destination (city/state/country).

startDate/endDate: YYYY‑MM‑DD (optional; if unknown, ask for a season or month).

numberOfGuests: Integer (optional; if unknown, ask for a headcount).

Use the full conversation history to reuse provided details. Don’t guess or invent anything—if some info is missing, ask the user in your next message with a friendly follow-up.

Once all details are collected, reply with a warm confirmation like:

"Awesome! Planning a trip to [location] from [startDate] to [endDate] for [numberOfGuests] people. Let’s make it unforgettable! 🎉"
`;

const humanMessage = `Here is the entire conversation so far:\n${formatMessages(state.messages)}`;
Expand Down
112 changes: 75 additions & 37 deletions src/agent/trip-planner/nodes/tools.ts
Original file line number Diff line number Diff line change
Expand Up @@ -41,47 +41,85 @@ export async function callTools(
ACCOMMODATIONS_TOOLS,
);

const response = await llm.invoke([
{
role: "system",
content:
"You are an AI assistant who helps users book trips. Use the user's most recent message(s) to contextually generate a response.",
},
...state.messages,
]);
const systemPrompt = {
role: "system",
content: `You are an AI assistant who helps users book trips. When a user asks about a trip or destination, you should:
1. Use the list-accommodations tool to show available accommodations.
2. Use the list-restaurants tool to show local dining options.

const listAccommodationsToolCall = response.tool_calls?.find(
findToolCall("list-accommodations")<typeof listAccommodationsSchema>,
);
const listRestaurantsToolCall = response.tool_calls?.find(
findToolCall("list-restaurants")<typeof listRestaurantsSchema>,
);
After using these tools, always summarize the results in a friendly, readable text reply for the user.
- List the top accommodations and restaurants you found, including their names, prices, and ratings if available.
- If no results are found, say so.
- Always include this summary in your reply, even if the user did not specifically ask for it.

if (!listAccommodationsToolCall && !listRestaurantsToolCall) {
throw new Error("No tool calls found");
}
These tools should be used for ANY trip-related query, even if the user hasn't specifically asked about accommodations or restaurants yet.

if (listAccommodationsToolCall) {
ui.push(
{
name: "accommodations-list",
props: {
toolCallId: listAccommodationsToolCall.id ?? "",
...getAccommodationsListProps(state.tripDetails),
},
},
{ message: response },
);
}
Current trip details:
- Location: ${state.tripDetails.location}
- Start Date: ${state.tripDetails.startDate}
- End Date: ${state.tripDetails.endDate}
- Number of Guests: ${state.tripDetails.numberOfGuests}`,
};

let messages: any[] = [systemPrompt, ...state.messages];
let response = await llm.invoke(messages);

// Tool call loop
while (response.tool_calls && response.tool_calls.length > 0) {
// 1. Generate tool messages for each tool call
const toolMessages = response.tool_calls
.map((toolCall: any) => {
if (toolCall.name === "list-accommodations") {
const accommodationsData = getAccommodationsListProps(
state.tripDetails as import("../types").TripDetails,
);
ui.push(
{
name: "accommodations-list",
props: {
toolCallId: toolCall.id ?? "",
...accommodationsData,
},
},
{ message: response },
);
return {
role: "tool",
tool_call_id: toolCall.id,
name: toolCall.name,
content: JSON.stringify(accommodationsData.accommodations),
};
} else if (toolCall.name === "list-restaurants") {
ui.push(
{
name: "restaurants-list",
props: {
tripDetails:
state.tripDetails as import("../types").TripDetails,
},
},
{ message: response },
);
return {
role: "tool",
tool_call_id: toolCall.id,
name: toolCall.name,
content: "(Restaurant data coming soon!)",
};
}
return null;
})
.filter(Boolean);

if (listRestaurantsToolCall) {
ui.push(
{
name: "restaurants-list",
props: { tripDetails: state.tripDetails },
},
{ message: response },
);
// 2. Only send: systemPrompt, userMessages, last ai message, tool messages
messages = [systemPrompt, ...state.messages, response, ...toolMessages];
response = await llm.invoke(messages);
if (
typeof response.content === "string" &&
response.content.trim() !== ""
) {
break;
}
}

return {
Expand Down
5 changes: 3 additions & 2 deletions src/agent/writer-agent/index.ts
Original file line number Diff line number Diff line change
Expand Up @@ -19,8 +19,9 @@ import { findToolCall } from "../find-tool-call";
import { GenerativeUIAnnotation } from "../types";

import type ComponentMap from "../../agent-uis/index";
import { ChatOpenAI } from "@langchain/openai";

const MODEL_NAME = "claude-3-5-sonnet-latest";
const MODEL_NAME = "gpt-4o";

const WriterAnnotation = Annotation.Root({
messages: GenerativeUIAnnotation.spec.messages,
Expand Down Expand Up @@ -150,7 +151,7 @@ async function suggestions(state: WriterState): WriterUpdate {
messages.push({ type: "tool", content: "Finished", tool_call_id: tool.id });
}

const model = new ChatAnthropic({ model: MODEL_NAME });
const model = new ChatOpenAI({ model: MODEL_NAME });
const finish = await model.invoke(messages);
messages.push(finish);

Expand Down
Empty file added src/v2v_realtime/.dockerignore
Empty file.
13 changes: 13 additions & 0 deletions src/v2v_realtime/.env.example
Original file line number Diff line number Diff line change
@@ -0,0 +1,13 @@
AZURE_OPENAI_ENDPOINT=
AZURE_OPENAI_REALTIME_DEPLOYMENT=
AZURE_OPENAI_REALTIME_VOICE_CHOICE=
AZURE_OPENAI_API_KEY=
AZURE_SEARCH_ENDPOINT=
AZURE_SEARCH_INDEX=<......>
AZURE_SEARCH_API_KEY=<.....>

OPENAI_API_KEY=""


MONGO_URI=

25 changes: 25 additions & 0 deletions src/v2v_realtime/Dockerfile
Original file line number Diff line number Diff line change
@@ -0,0 +1,25 @@
# Use the official Python image from the Docker Hub
FROM python:3.10-slim-bookworm
COPY --from=ghcr.io/astral-sh/uv:latest /uv /uvx /bin/

# Set the working directory in the container
WORKDIR /app

# Copy the requirements file into the container
COPY requirements.txt .

# Create and activate a virtual environment
RUN uv venv --python 3.10
ENV PATH="/app/.venv/bin:$PATH"

# Install the dependencies
RUN uv pip install --no-cache-dir -r requirements.txt

# Copy the FastAPI app code into the container
COPY . .

# Expose the port that the app runs on
EXPOSE 8765

# Command to run the FastAPI app using uvicorn
CMD ["python", "app.py"]
Empty file added src/v2v_realtime/__init__.py
Empty file.
Loading