Update the way the stream is concatenated

Update the ts sdk
This commit is contained in:
cliffhall
2025-07-18 16:09:56 -04:00
parent 2feb7cbaa5
commit ba20bd60af
3 changed files with 47 additions and 43 deletions

9
package-lock.json generated
View File

@@ -6159,7 +6159,7 @@
"version": "0.6.2",
"license": "MIT",
"dependencies": {
"@modelcontextprotocol/sdk": "^1.12.3",
"@modelcontextprotocol/sdk": "^1.16.0",
"diff": "^5.1.0",
"glob": "^10.3.10",
"minimatch": "^10.0.1",
@@ -6182,9 +6182,9 @@
}
},
"src/filesystem/node_modules/@modelcontextprotocol/sdk": {
"version": "1.12.3",
"resolved": "https://registry.npmjs.org/@modelcontextprotocol/sdk/-/sdk-1.12.3.tgz",
"integrity": "sha512-DyVYSOafBvk3/j1Oka4z5BWT8o4AFmoNyZY9pALOm7Lh3GZglR71Co4r4dEUoqDWdDazIZQHBe7J2Nwkg6gHgQ==",
"version": "1.16.0",
"resolved": "https://registry.npmjs.org/@modelcontextprotocol/sdk/-/sdk-1.16.0.tgz",
"integrity": "sha512-8ofX7gkZcLj9H9rSd50mCgm3SSF8C7XoclxJuLoV0Cz3rEQ1tv9MZRYYvJtm9n1BiEQQMzSmE/w2AEkNacLYfg==",
"license": "MIT",
"dependencies": {
"ajv": "^6.12.6",
@@ -6192,6 +6192,7 @@
"cors": "^2.8.5",
"cross-spawn": "^7.0.5",
"eventsource": "^3.0.2",
"eventsource-parser": "^3.0.0",
"express": "^5.0.1",
"express-rate-limit": "^7.5.0",
"pkce-challenge": "^5.0.0",

View File

@@ -379,10 +379,10 @@ async function applyFileEdits(
function formatSize(bytes: number): string {
const units = ['B', 'KB', 'MB', 'GB', 'TB'];
if (bytes === 0) return '0 B';
const i = Math.floor(Math.log(bytes) / Math.log(1024));
if (i === 0) return `${bytes} ${units[i]}`;
return `${(bytes / Math.pow(1024, i)).toFixed(2)} ${units[i]}`;
}
@@ -391,9 +391,9 @@ async function tailFile(filePath: string, numLines: number): Promise<string> {
const CHUNK_SIZE = 1024; // Read 1KB at a time
const stats = await fs.stat(filePath);
const fileSize = stats.size;
if (fileSize === 0) return '';
// Open file for reading
const fileHandle = await fs.open(filePath, 'r');
try {
@@ -402,36 +402,36 @@ async function tailFile(filePath: string, numLines: number): Promise<string> {
let chunk = Buffer.alloc(CHUNK_SIZE);
let linesFound = 0;
let remainingText = '';
// Read chunks from the end of the file until we have enough lines
while (position > 0 && linesFound < numLines) {
const size = Math.min(CHUNK_SIZE, position);
position -= size;
const { bytesRead } = await fileHandle.read(chunk, 0, size, position);
if (!bytesRead) break;
// Get the chunk as a string and prepend any remaining text from previous iteration
const readData = chunk.slice(0, bytesRead).toString('utf-8');
const chunkText = readData + remainingText;
// Split by newlines and count
const chunkLines = normalizeLineEndings(chunkText).split('\n');
// If this isn't the end of the file, the first line is likely incomplete
// Save it to prepend to the next chunk
if (position > 0) {
remainingText = chunkLines[0];
chunkLines.shift(); // Remove the first (incomplete) line
}
// Add lines to our result (up to the number we need)
for (let i = chunkLines.length - 1; i >= 0 && linesFound < numLines; i--) {
lines.unshift(chunkLines[i]);
linesFound++;
}
}
return lines.join('\n');
} finally {
await fileHandle.close();
@@ -446,14 +446,14 @@ async function headFile(filePath: string, numLines: number): Promise<string> {
let buffer = '';
let bytesRead = 0;
const chunk = Buffer.alloc(1024); // 1KB buffer
// Read chunks and count lines until we have enough or reach EOF
while (lines.length < numLines) {
const result = await fileHandle.read(chunk, 0, chunk.length, bytesRead);
if (result.bytesRead === 0) break; // End of file
bytesRead += result.bytesRead;
buffer += chunk.slice(0, result.bytesRead).toString('utf-8');
const newLineIndex = buffer.lastIndexOf('\n');
if (newLineIndex !== -1) {
const completeLines = buffer.slice(0, newLineIndex).split('\n');
@@ -464,29 +464,32 @@ async function headFile(filePath: string, numLines: number): Promise<string> {
}
}
}
// If there is leftover content and we still need lines, add it
if (buffer.length > 0 && lines.length < numLines) {
lines.push(buffer);
}
return lines.join('\n');
} finally {
await fileHandle.close();
}
}
// Stream a file and return its Base64 representation without loading the
// entire file into memory at once. Chunks are encoded individually and
// concatenated into the final string.
// Reads a file as a stream of buffers, concatenates them, and then encodes
// the result to a Base64 string. This is a memory-efficient way to handle
// binary data from a stream before the final encoding.
async function readFileAsBase64Stream(filePath: string): Promise<string> {
return new Promise((resolve, reject) => {
const stream = createReadStream(filePath, { encoding: 'base64' });
let data = '';
const stream = createReadStream(filePath);
const chunks: Buffer[] = [];
stream.on('data', (chunk) => {
data += chunk;
chunks.push(chunk as Buffer);
});
stream.on('end', () => {
const finalBuffer = Buffer.concat(chunks);
resolve(finalBuffer.toString('base64'));
});
stream.on('end', () => resolve(data));
stream.on('error', (err) => reject(err));
});
}
@@ -631,11 +634,11 @@ server.setRequestHandler(CallToolRequestSchema, async (request) => {
throw new Error(`Invalid arguments for read_text_file: ${parsed.error}`);
}
const validPath = await validatePath(parsed.data.path);
if (parsed.data.head && parsed.data.tail) {
throw new Error("Cannot specify both head and tail parameters simultaneously");
}
if (parsed.data.tail) {
// Use memory-efficient tail implementation for large files
const tailContent = await tailFile(validPath, parsed.data.tail);
@@ -643,7 +646,7 @@ server.setRequestHandler(CallToolRequestSchema, async (request) => {
content: [{ type: "text", text: tailContent }],
};
}
if (parsed.data.head) {
// Use memory-efficient head implementation for large files
const headContent = await headFile(validPath, parsed.data.head);
@@ -651,7 +654,7 @@ server.setRequestHandler(CallToolRequestSchema, async (request) => {
content: [{ type: "text", text: headContent }],
};
}
const content = await fs.readFile(validPath, "utf-8");
return {
content: [{ type: "text", text: content }],
@@ -686,7 +689,7 @@ server.setRequestHandler(CallToolRequestSchema, async (request) => {
? "audio"
: "blob";
return {
content: [{ type, data, mimeType } as any],
content: [{ type, data, mimeType }],
};
}
@@ -794,7 +797,7 @@ server.setRequestHandler(CallToolRequestSchema, async (request) => {
}
const validPath = await validatePath(parsed.data.path);
const entries = await fs.readdir(validPath, { withFileTypes: true });
// Get detailed information for each entry
const detailedEntries = await Promise.all(
entries.map(async (entry) => {
@@ -817,7 +820,7 @@ server.setRequestHandler(CallToolRequestSchema, async (request) => {
}
})
);
// Sort entries based on sortBy parameter
const sortedEntries = [...detailedEntries].sort((a, b) => {
if (parsed.data.sortBy === 'size') {
@@ -826,29 +829,29 @@ server.setRequestHandler(CallToolRequestSchema, async (request) => {
// Default sort by name
return a.name.localeCompare(b.name);
});
// Format the output
const formattedEntries = sortedEntries.map(entry =>
const formattedEntries = sortedEntries.map(entry =>
`${entry.isDirectory ? "[DIR]" : "[FILE]"} ${entry.name.padEnd(30)} ${
entry.isDirectory ? "" : formatSize(entry.size).padStart(10)
}`
);
// Add summary
const totalFiles = detailedEntries.filter(e => !e.isDirectory).length;
const totalDirs = detailedEntries.filter(e => e.isDirectory).length;
const totalSize = detailedEntries.reduce((sum, entry) => sum + (entry.isDirectory ? 0 : entry.size), 0);
const summary = [
"",
`Total: ${totalFiles} files, ${totalDirs} directories`,
`Combined size: ${formatSize(totalSize)}`
];
return {
content: [{
type: "text",
text: [...formattedEntries, ...summary].join("\n")
content: [{
type: "text",
text: [...formattedEntries, ...summary].join("\n")
}],
};
}

View File

@@ -20,7 +20,7 @@
"test": "jest --config=jest.config.cjs --coverage"
},
"dependencies": {
"@modelcontextprotocol/sdk": "^1.12.3",
"@modelcontextprotocol/sdk": "^1.16.0",
"diff": "^5.1.0",
"glob": "^10.3.10",
"minimatch": "^10.0.1",
@@ -38,4 +38,4 @@
"ts-node": "^10.9.2",
"typescript": "^5.8.2"
}
}
}