-
Notifications
You must be signed in to change notification settings - Fork 27
/
Copy pathfilesystem_langgraph_example.ts
241 lines (205 loc) · 7.71 KB
/
filesystem_langgraph_example.ts
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
/**
* Filesystem MCP Server with LangGraph Example
*
* This example demonstrates how to use the Filesystem MCP server with LangGraph
* to create a structured workflow for complex file operations.
*
* The graph-based approach allows:
* 1. Clear separation of responsibilities (reasoning vs execution)
* 2. Conditional routing based on file operation types
* 3. Structured handling of complex multi-file operations
*/
/* eslint-disable no-console */
import { ChatOpenAI } from "@langchain/openai";
import {
StateGraph,
END,
START,
MessagesAnnotation,
} from "@langchain/langgraph";
import { ToolNode } from "@langchain/langgraph/prebuilt";
import {
HumanMessage,
AIMessage,
SystemMessage,
isHumanMessage,
} from "@langchain/core/messages";
import dotenv from "dotenv";
import fs from "fs";
import path from "path";
// MCP client imports
import { MultiServerMCPClient } from "../src/index.js";
// Load environment variables from .env file
dotenv.config();
/**
* Example demonstrating how to use MCP filesystem tools with LangGraph agent flows
* This example focuses on file operations like reading multiple files and writing files
*/
export async function runExample(client?: MultiServerMCPClient) {
try {
console.log("Initializing MCP client...");
// Create a client with configurations for the filesystem server
// eslint-disable-next-line no-param-reassign
client =
client ??
new MultiServerMCPClient({
filesystem: {
transport: "stdio",
command: "npx",
args: [
"-y",
"@modelcontextprotocol/server-filesystem",
"./examples/filesystem_test", // This directory needs to exist
],
},
});
console.log("Connected to server");
// Get all tools (flattened array is the default now)
const mcpTools = await client.getTools();
if (mcpTools.length === 0) {
throw new Error("No tools found");
}
console.log(
`Loaded ${mcpTools.length} MCP tools: ${mcpTools
.map((tool) => tool.name)
.join(", ")}`
);
// Create an OpenAI model with tools attached
const systemMessage = `You are an assistant that helps users with file operations.
You have access to tools that can read and write files, create directories,
and perform other filesystem operations. Be careful with file operations,
especially writing and editing files. Always confirm the content and path before
making changes.
For file writing operations, format the content properly based on the file type.
For reading multiple files, you can use the read_multiple_files tool.`;
const model = new ChatOpenAI({
modelName: process.env.OPENAI_MODEL_NAME || "gpt-4o-mini",
temperature: 0,
}).bindTools(mcpTools);
// Create a tool node for the LangGraph
const toolNode = new ToolNode(mcpTools);
// ================================================
// Create a LangGraph agent flow
// ================================================
console.log("\n=== CREATING LANGGRAPH AGENT FLOW ===");
// Define the function that calls the model
const llmNode = async (state: typeof MessagesAnnotation.State) => {
console.log(`Calling LLM with ${state.messages.length} messages`);
// Add system message if it's the first call
let { messages } = state;
if (messages.length === 1 && isHumanMessage(messages[0])) {
messages = [new SystemMessage(systemMessage), ...messages];
}
const response = await model.invoke(messages);
return { messages: [response] };
};
// Create a new graph with MessagesAnnotation
const workflow = new StateGraph(MessagesAnnotation)
// Add the nodes to the graph
.addNode("llm", llmNode)
.addNode("tools", toolNode)
// Add edges - these define how nodes are connected
.addEdge(START, "llm")
.addEdge("tools", "llm")
// Conditional routing to end or continue the tool loop
.addConditionalEdges("llm", (state) => {
const lastMessage = state.messages[state.messages.length - 1];
// Cast to AIMessage to access tool_calls property
const aiMessage = lastMessage as AIMessage;
if (aiMessage.tool_calls && aiMessage.tool_calls.length > 0) {
console.log("Tool calls detected, routing to tools node");
// Log what tools are being called
const toolNames = aiMessage.tool_calls
.map((tc) => tc.name)
.join(", ");
console.log(`Tools being called: ${toolNames}`);
return "tools";
}
// If there are no tool calls, we're done
console.log("No tool calls, ending the workflow");
return END;
});
// Compile the graph
const app = workflow.compile();
// Define examples to run
const examples = [
{
name: "Write multiple files",
query:
"Create two files: 'notes.txt' with content 'Important meeting on Thursday' and 'reminder.txt' with content 'Call John about the project'.",
},
{
name: "Read multiple files",
query:
"Read both notes.txt and reminder.txt files and create a summary file called 'summary.txt' that contains information from both files.",
},
{
name: "Create directory structure",
query:
"Create a directory structure for a simple web project. Make a 'project' directory with subdirectories for 'css', 'js', and 'images'. Add an index.html file in the main project directory with a basic HTML5 template.",
},
{
name: "Search and organize",
query:
"Search for all .txt files and create a new directory called 'text_files', then list the names of all found text files in a new file called 'text_files/index.txt'.",
},
];
// Run the examples
console.log("\n=== RUNNING LANGGRAPH AGENT ===");
for (const example of examples) {
console.log(`\n--- Example: ${example.name} ---`);
console.log(`Query: ${example.query}`);
// Run the LangGraph agent
const result = await app.invoke({
messages: [new HumanMessage(example.query)],
});
// Display the final answer
const finalMessage = result.messages[result.messages.length - 1];
console.log(`\nResult: ${finalMessage.content}`);
// Let's list the directory to see the changes
console.log("\nDirectory listing after operations:");
try {
const listResult = await app.invoke({
messages: [
new HumanMessage(
"List all files and directories in the current directory and show their structure."
),
],
});
const listMessage = listResult.messages[listResult.messages.length - 1];
console.log(listMessage.content);
} catch (error) {
console.error("Error listing directory:", error);
}
}
} catch (error) {
console.error("Error:", error);
process.exit(1); // Exit with error code
} finally {
if (client) {
await client.close();
console.log("Closed all MCP connections");
}
// Exit process after a short delay to allow for cleanup
setTimeout(() => {
console.log("Example completed, exiting process.");
process.exit(0);
}, 500);
}
}
/**
* Create a directory for our tests if it doesn't exist yet
*/
async function setupTestDirectory() {
const testDir = path.join("./examples", "filesystem_test");
if (!fs.existsSync(testDir)) {
fs.mkdirSync(testDir, { recursive: true });
console.log(`Created test directory: ${testDir}`);
}
}
const isMainModule = import.meta.url === `file://${process.argv[1]}`;
if (isMainModule) {
setupTestDirectory()
.then(() => runExample())
.catch((error) => console.error("Setup error:", error));
}