-
Notifications
You must be signed in to change notification settings - Fork 27
/
Copy pathlanggraph_complex_config_example.ts
215 lines (181 loc) · 6.51 KB
/
langgraph_complex_config_example.ts
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
/**
* Configuration Test with Math Server using LangGraph
*
* This example demonstrates using configuration files (auth_mcp.json and complex_mcp.json)
* and directly connecting to the local math_server.py script using LangGraph.
*/
/* eslint-disable no-console */
import { ChatOpenAI } from "@langchain/openai";
import path from "path";
import fs from "fs";
import dotenv from "dotenv";
import {
StateGraph,
END,
START,
MessagesAnnotation,
} from "@langchain/langgraph";
import { ToolNode } from "@langchain/langgraph/prebuilt";
import {
HumanMessage,
AIMessage,
SystemMessage,
} from "@langchain/core/messages";
// MCP client imports
import { MultiServerMCPClient } from "../src/index.js";
// Load environment variables from .env file
dotenv.config();
/**
* This example demonstrates using multiple configuration files to
* connect to different MCP servers and use their tools with LangGraph
*/
async function runConfigTest() {
try {
// Log when we start
console.log("Starting test with configuration files...");
// Step 1: Load and verify auth_mcp.json configuration (just testing parsing)
console.log("Parsing auth_mcp.json configuration...");
const authConfigPath = path.join(
process.cwd(),
"examples",
"auth_mcp.json"
);
if (!fs.existsSync(authConfigPath)) {
throw new Error(`Configuration file not found: ${authConfigPath}`);
}
// Load the auth configuration to verify it parses correctly
const authConfig = JSON.parse(fs.readFileSync(authConfigPath, "utf-8"));
console.log(
"Successfully parsed auth_mcp.json with the following servers:"
);
console.log("Servers:", Object.keys(authConfig.servers));
// Print auth headers (redacted for security) to verify they're present
Object.entries(authConfig.servers).forEach(([serverName, serverConfig]) => {
if (
serverConfig &&
typeof serverConfig === "object" &&
"headers" in serverConfig &&
serverConfig.headers
) {
console.log(
`Server ${serverName} has headers:`,
Object.keys(serverConfig.headers).map((key) => `${key}: ***`)
);
}
});
// Step 2: Load and verify complex_mcp.json configuration
console.log("Parsing complex_mcp.json configuration...");
const complexConfigPath = path.join(
process.cwd(),
"examples",
"complex_mcp.json"
);
if (!fs.existsSync(complexConfigPath)) {
throw new Error(`Configuration file not found: ${complexConfigPath}`);
}
const complexConfig = JSON.parse(
fs.readFileSync(complexConfigPath, "utf-8")
);
console.log(
"Successfully parsed complex_mcp.json with the following servers:"
);
console.log("Servers:", Object.keys(complexConfig.servers));
// Step 3: Connect directly to the math server using explicit path
console.log("Connecting to math server directly...");
// Create a client with the math server only
const client = new MultiServerMCPClient({
math: {
transport: "stdio",
command: "npx",
args: ["-y", "@modelcontextprotocol/server-math"],
},
});
// Get tools from the math server
const mcpTools = await client.getTools();
console.log(`Loaded ${mcpTools.length} tools from math server`);
// Log the names of available tools
const toolNames = mcpTools.map((tool) => tool.name);
console.log("Available tools:", toolNames.join(", "));
// Create an OpenAI model for the agent
const model = new ChatOpenAI({
modelName: process.env.OPENAI_MODEL_NAME || "gpt-4o",
temperature: 0,
}).bindTools(mcpTools);
// Create a tool node for the LangGraph
const toolNode = new ToolNode(mcpTools);
// Define the function that calls the model
const llmNode = async (state: typeof MessagesAnnotation.State) => {
console.log("Calling LLM with messages:", state.messages.length);
const response = await model.invoke(state.messages);
return { messages: [response] };
};
// Create a new graph with MessagesAnnotation
const workflow = new StateGraph(MessagesAnnotation)
// Add the nodes to the graph
.addNode("llm", llmNode)
.addNode("tools", toolNode)
// Add edges - need to cast to any to fix TypeScript errors
.addEdge(START, "llm")
.addEdge("tools", "llm")
// Add conditional logic to determine the next step
.addConditionalEdges("llm", (state) => {
const lastMessage = state.messages[state.messages.length - 1];
// If the last message has tool calls, we need to execute the tools
const aiMessage = lastMessage as AIMessage;
if (aiMessage.tool_calls && aiMessage.tool_calls.length > 0) {
console.log("Tool calls detected, routing to tools node");
return "tools";
}
// If there are no tool calls, we're done
console.log("No tool calls, ending the workflow");
return END;
});
// Compile the graph
const app = workflow.compile();
// Define test queries that use math tools
const testQueries = [
// Basic math queries
"What is 5 + 3?",
"What is 7 * 9?",
"If I have 10 and add 15 to it, then multiply the result by 2, what do I get?",
];
// Run each test query
for (const query of testQueries) {
console.log(`\n=== Running query: "${query}" ===`);
try {
// Create initial messages with a system message and the user query
const messages = [
new SystemMessage(
"You are a helpful assistant that can use tools to solve math problems."
),
new HumanMessage(query),
];
// Run the LangGraph workflow
const result = await app.invoke({ messages });
// Get the last AI message as the response
const lastMessage = result.messages
.filter((message) => message._getType() === "ai")
.pop();
console.log(`\nFinal Answer: ${lastMessage?.content}`);
} catch (error) {
console.error(`Error processing query "${query}":`, error);
}
}
// Close all connections
console.log("\nClosing connections...");
await client.close();
console.log("Test completed successfully");
} catch (error) {
console.error("Error running test:", error);
}
}
// Run the test
runConfigTest()
.then(() => {
console.log("Configuration test completed successfully");
process.exit(0);
})
.catch((error) => {
console.error("Error running configuration test:", error);
process.exit(1);
});