Merge branch 'main' into oracle-mcp

This commit is contained in:
Marcelo Ochoa
2025-06-17 10:28:25 -03:00
committed by GitHub
3 changed files with 216 additions and 6 deletions

View File

@@ -45,6 +45,7 @@ The following reference servers are now archived and can be found at [servers-ar
Official integrations are maintained by companies building production ready MCP servers for their platforms.
- <img height="12" width="12" src="https://www.21st.dev/favicon.ico" alt="21st.dev Logo" /> **[21st.dev Magic](https://github.com/21st-dev/magic-mcp)** - Create crafted UI components inspired by the best 21st.dev design engineers.
- <img height="12" width="12" src="https://framerusercontent.com/images/LpSK1tSZweomrAHOMAj9Gea96lA.svg" alt="Paragon Logo" /> **[ActionKit by Paragon](https://github.com/useparagon/paragon-mcp)** - Connect to 130+ SaaS integrations (e.g. Slack, Salesforce, Gmail) with Paragons [ActionKit](https://www.useparagon.com/actionkit) API.
- <img height="12" width="12" src="https://invoxx-public-bucket.s3.eu-central-1.amazonaws.com/frontend-resources/adfin-logo-small.svg" alt="Adfin Logo" /> **[Adfin](https://github.com/Adfin-Engineering/mcp-server-adfin)** - The only platform you need to get paid - all payments in one place, invoicing and accounting reconciliations with [Adfin](https://www.adfin.com/).
- <img height="12" width="12" src="https://www.agentql.com/favicon/favicon.png" alt="AgentQL Logo" /> **[AgentQL](https://github.com/tinyfish-io/agentql-mcp)** - Enable AI agents to get structured data from unstructured web with [AgentQL](https://www.agentql.com/).
- <img height="12" width="12" src="https://agentrpc.com/favicon.ico" alt="AgentRPC Logo" /> **[AgentRPC](https://github.com/agentrpc/agentrpc)** - Connect to any function, any language, across network boundaries using [AgentRPC](https://www.agentrpc.com/).
@@ -781,6 +782,7 @@ These are high-level frameworks that make it easier to build MCP servers or clie
* **[MCP-Framework](https://mcp-framework.com)** Build MCP servers with elegance and speed in Typescript. Comes with a CLI to create your project with `mcp create app`. Get started with your first server in under 5 minutes by **[Alex Andru](https://github.com/QuantGeekDev)**
* **[Next.js MCP Server Template](https://github.com/vercel-labs/mcp-for-next.js)** (Typescript) - A starter Next.js project that uses the MCP Adapter to allow MCP clients to connect and access resources.
* **[Quarkus MCP Server SDK](https://github.com/quarkiverse/quarkus-mcp-server)** (Java)
* **[SAP ABAP MCP Server SDK](https://github.com/abap-ai/mcp)** - Build SAP ABAP based MCP servers. ABAP 7.52 based with 7.02 downport; runs on R/3 & S/4HANA on-premises, currently not cloud-ready.
* **[Spring AI MCP Server](https://docs.spring.io/spring-ai/reference/api/mcp/mcp-server-boot-starter-docs.html)** - Provides auto-configuration for setting up an MCP server in Spring Boot applications.
* **[Template MCP Server](https://github.com/mcpdotdirect/template-mcp-server)** - A CLI tool to create a new Model Context Protocol server project with TypeScript support, dual transport options, and an extensible structure
* **[Vercel MCP Adapter](https://github.com/vercel/mcp-adapter)** (Typescript) - A simple package to start serving an MCP server on most major JS meta-frameworks including Next, Nuxt, Svelte, and more.
@@ -789,6 +791,7 @@ These are high-level frameworks that make it easier to build MCP servers or clie
* **[codemirror-mcp](https://github.com/marimo-team/codemirror-mcp)** - CodeMirror extension that implements the Model Context Protocol (MCP) for resource mentions and prompt commands
* **[Spring AI MCP Client](https://docs.spring.io/spring-ai/reference/api/mcp/mcp-client-boot-starter-docs.html)** - Provides auto-configuration for MCP client functionality in Spring Boot applications.
* **[MCP CLI Client](https://github.com/vincent-pli/mcp-cli-host)** - A CLI host application that enables Large Language Models (LLMs) to interact with external tools through the Model Context Protocol (MCP).
## 📚 Resources

View File

@@ -97,6 +97,8 @@ async function validatePath(requestedPath: string): Promise<string> {
// Schema definitions
const ReadFileArgsSchema = z.object({
path: z.string(),
tail: z.number().optional().describe('If provided, returns only the last N lines of the file'),
head: z.number().optional().describe('If provided, returns only the first N lines of the file')
});
const ReadMultipleFilesArgsSchema = z.object({
@@ -127,6 +129,11 @@ const ListDirectoryArgsSchema = z.object({
path: z.string(),
});
const ListDirectoryWithSizesArgsSchema = z.object({
path: z.string(),
sortBy: z.enum(['name', 'size']).optional().default('name').describe('Sort entries by name or size'),
});
const DirectoryTreeArgsSchema = z.object({
path: z.string(),
});
@@ -330,6 +337,107 @@ async function applyFileEdits(
return formattedDiff;
}
// Helper functions
function formatSize(bytes: number): string {
const units = ['B', 'KB', 'MB', 'GB', 'TB'];
if (bytes === 0) return '0 B';
const i = Math.floor(Math.log(bytes) / Math.log(1024));
if (i === 0) return `${bytes} ${units[i]}`;
return `${(bytes / Math.pow(1024, i)).toFixed(2)} ${units[i]}`;
}
// Memory-efficient implementation to get the last N lines of a file
async function tailFile(filePath: string, numLines: number): Promise<string> {
const CHUNK_SIZE = 1024; // Read 1KB at a time
const stats = await fs.stat(filePath);
const fileSize = stats.size;
if (fileSize === 0) return '';
// Open file for reading
const fileHandle = await fs.open(filePath, 'r');
try {
const lines: string[] = [];
let position = fileSize;
let chunk = Buffer.alloc(CHUNK_SIZE);
let linesFound = 0;
let remainingText = '';
// Read chunks from the end of the file until we have enough lines
while (position > 0 && linesFound < numLines) {
const size = Math.min(CHUNK_SIZE, position);
position -= size;
const { bytesRead } = await fileHandle.read(chunk, 0, size, position);
if (!bytesRead) break;
// Get the chunk as a string and prepend any remaining text from previous iteration
const readData = chunk.slice(0, bytesRead).toString('utf-8');
const chunkText = readData + remainingText;
// Split by newlines and count
const chunkLines = normalizeLineEndings(chunkText).split('\n');
// If this isn't the end of the file, the first line is likely incomplete
// Save it to prepend to the next chunk
if (position > 0) {
remainingText = chunkLines[0];
chunkLines.shift(); // Remove the first (incomplete) line
}
// Add lines to our result (up to the number we need)
for (let i = chunkLines.length - 1; i >= 0 && linesFound < numLines; i--) {
lines.unshift(chunkLines[i]);
linesFound++;
}
}
return lines.join('\n');
} finally {
await fileHandle.close();
}
}
// New function to get the first N lines of a file
async function headFile(filePath: string, numLines: number): Promise<string> {
const fileHandle = await fs.open(filePath, 'r');
try {
const lines: string[] = [];
let buffer = '';
let bytesRead = 0;
const chunk = Buffer.alloc(1024); // 1KB buffer
// Read chunks and count lines until we have enough or reach EOF
while (lines.length < numLines) {
const result = await fileHandle.read(chunk, 0, chunk.length, bytesRead);
if (result.bytesRead === 0) break; // End of file
bytesRead += result.bytesRead;
buffer += chunk.slice(0, result.bytesRead).toString('utf-8');
const newLineIndex = buffer.lastIndexOf('\n');
if (newLineIndex !== -1) {
const completeLines = buffer.slice(0, newLineIndex).split('\n');
buffer = buffer.slice(newLineIndex + 1);
for (const line of completeLines) {
lines.push(line);
if (lines.length >= numLines) break;
}
}
}
// If there is leftover content and we still need lines, add it
if (buffer.length > 0 && lines.length < numLines) {
lines.push(buffer);
}
return lines.join('\n');
} finally {
await fileHandle.close();
}
}
// Tool handlers
server.setRequestHandler(ListToolsRequestSchema, async () => {
return {
@@ -340,7 +448,9 @@ server.setRequestHandler(ListToolsRequestSchema, async () => {
"Read the complete contents of a file from the file system. " +
"Handles various text encodings and provides detailed error messages " +
"if the file cannot be read. Use this tool when you need to examine " +
"the contents of a single file. Only works within allowed directories.",
"the contents of a single file. Use the 'head' parameter to read only " +
"the first N lines of a file, or the 'tail' parameter to read only " +
"the last N lines of a file. Only works within allowed directories.",
inputSchema: zodToJsonSchema(ReadFileArgsSchema) as ToolInput,
},
{
@@ -387,6 +497,15 @@ server.setRequestHandler(ListToolsRequestSchema, async () => {
"finding specific files within a directory. Only works within allowed directories.",
inputSchema: zodToJsonSchema(ListDirectoryArgsSchema) as ToolInput,
},
{
name: "list_directory_with_sizes",
description:
"Get a detailed listing of all files and directories in a specified path, including sizes. " +
"Results clearly distinguish between files and directories with [FILE] and [DIR] " +
"prefixes. This tool is useful for understanding directory structure and " +
"finding specific files within a directory. Only works within allowed directories.",
inputSchema: zodToJsonSchema(ListDirectoryWithSizesArgsSchema) as ToolInput,
},
{
name: "directory_tree",
description:
@@ -451,6 +570,27 @@ server.setRequestHandler(CallToolRequestSchema, async (request) => {
throw new Error(`Invalid arguments for read_file: ${parsed.error}`);
}
const validPath = await validatePath(parsed.data.path);
if (parsed.data.head && parsed.data.tail) {
throw new Error("Cannot specify both head and tail parameters simultaneously");
}
if (parsed.data.tail) {
// Use memory-efficient tail implementation for large files
const tailContent = await tailFile(validPath, parsed.data.tail);
return {
content: [{ type: "text", text: tailContent }],
};
}
if (parsed.data.head) {
// Use memory-efficient head implementation for large files
const headContent = await headFile(validPath, parsed.data.head);
return {
content: [{ type: "text", text: headContent }],
};
}
const content = await fs.readFile(validPath, "utf-8");
return {
content: [{ type: "text", text: content }],
@@ -530,11 +670,77 @@ server.setRequestHandler(CallToolRequestSchema, async (request) => {
};
}
case "directory_tree": {
const parsed = DirectoryTreeArgsSchema.safeParse(args);
if (!parsed.success) {
throw new Error(`Invalid arguments for directory_tree: ${parsed.error}`);
case "list_directory_with_sizes": {
const parsed = ListDirectoryWithSizesArgsSchema.safeParse(args);
if (!parsed.success) {
throw new Error(`Invalid arguments for list_directory_with_sizes: ${parsed.error}`);
}
const validPath = await validatePath(parsed.data.path);
const entries = await fs.readdir(validPath, { withFileTypes: true });
// Get detailed information for each entry
const detailedEntries = await Promise.all(
entries.map(async (entry) => {
const entryPath = path.join(validPath, entry.name);
try {
const stats = await fs.stat(entryPath);
return {
name: entry.name,
isDirectory: entry.isDirectory(),
size: stats.size,
mtime: stats.mtime
};
} catch (error) {
return {
name: entry.name,
isDirectory: entry.isDirectory(),
size: 0,
mtime: new Date(0)
};
}
})
);
// Sort entries based on sortBy parameter
const sortedEntries = [...detailedEntries].sort((a, b) => {
if (parsed.data.sortBy === 'size') {
return b.size - a.size; // Descending by size
}
// Default sort by name
return a.name.localeCompare(b.name);
});
// Format the output
const formattedEntries = sortedEntries.map(entry =>
`${entry.isDirectory ? "[DIR]" : "[FILE]"} ${entry.name.padEnd(30)} ${
entry.isDirectory ? "" : formatSize(entry.size).padStart(10)
}`
);
// Add summary
const totalFiles = detailedEntries.filter(e => !e.isDirectory).length;
const totalDirs = detailedEntries.filter(e => e.isDirectory).length;
const totalSize = detailedEntries.reduce((sum, entry) => sum + (entry.isDirectory ? 0 : entry.size), 0);
const summary = [
"",
`Total: ${totalFiles} files, ${totalDirs} directories`,
`Combined size: ${formatSize(totalSize)}`
];
return {
content: [{
type: "text",
text: [...formattedEntries, ...summary].join("\n")
}],
};
}
case "directory_tree": {
const parsed = DirectoryTreeArgsSchema.safeParse(args);
if (!parsed.success) {
throw new Error(`Invalid arguments for directory_tree: ${parsed.error}`);
}
interface TreeEntry {
name: string;

View File

@@ -288,12 +288,13 @@ If you are doing local development, there are two ways to test your changes:
"mcpServers": {
"git": {
"command": "uv",
"args": [
"args": [
"--directory",
"/<path to mcp-servers>/mcp-servers/src/git",
"run",
"mcp-server-git"
]
}
}
}
```