diff --git a/README.md b/README.md
index d0efe7e3..e230c714 100644
--- a/README.md
+++ b/README.md
@@ -158,6 +158,7 @@ Official integrations are maintained by companies building production ready MCP
-
**[CTERA Portal](https://github.com/ctera/mcp-ctera-core)** - CTERA Portal is a multi-tenant, multi-cloud platform that delivers a global namespace and unified management across petabytes of distributed content.
-
**[Cycode](https://github.com/cycodehq/cycode-cli#mcp-command-experiment)** - Boost security in your dev lifecycle via SAST, SCA, Secrets & IaC scanning with [Cycode](https://cycode.com/).
-
**[Dart](https://github.com/its-dart/dart-mcp-server)** - Interact with task, doc, and project data in [Dart](https://itsdart.com), an AI-native project management tool
+-
**[Databricks](https://docs.databricks.com/aws/en/generative-ai/mcp/)** - Connect to data, AI tools & agents, and the rest of the Databricks platform using turnkey managed MCP servers. Or, host your own custom MCP servers within the Databricks security and data governance boundary.
-
**[DataHub](https://github.com/acryldata/mcp-server-datahub)** - Search your data assets, traverse data lineage, write SQL queries, and more using [DataHub](https://datahub.com/) metadata.
-
**[Daytona](https://github.com/daytonaio/daytona/tree/main/apps/cli/mcp)** - Fast and secure execution of your AI generated code with [Daytona](https://daytona.io) sandboxes
-
**[Debugg.AI](https://github.com/debugg-ai/debugg-ai-mcp)** - Zero-Config, Fully AI-Managed End-to-End Testing for any code gen platform via [Debugg.AI](https://debugg.ai) remote browsing test agents.
@@ -167,6 +168,7 @@ Official integrations are maintained by companies building production ready MCP
-
**[DevHub](https://github.com/devhub/devhub-cms-mcp)** - Manage and utilize website content within the [DevHub](https://www.devhub.com) CMS platform
-
**[DevRev](https://github.com/devrev/mcp-server)** - An MCP server to integrate with DevRev APIs to search through your DevRev Knowledge Graph where objects can be imported from diff. Sources listed [here](https://devrev.ai/docs/import#available-sources).
-
**[DexPaprika (CoinPaprika)](https://github.com/coinpaprika/dexpaprika-mcp)** - Access real-time DEX data, liquidity pools, token information, and trading analytics across multiple blockchain networks with [DexPaprika](https://dexpaprika.com) by CoinPaprika.
+-
**[Dolt](https://github.com/dolthub/dolt-mcp)** - The official MCP server for version-controlled [Dolt](https://doltdb.com/) databases.
-
**[Drata](https://drata.com/mcp)** - Get hands-on with our experimental MCP server—bringing real-time compliance intelligence into your AI workflows.
-
**[Dumpling AI](https://github.com/Dumpling-AI/mcp-server-dumplingai)** - Access data, web scraping, and document conversion APIs by [Dumpling AI](https://www.dumplingai.com/)
-
**[Dynatrace](https://github.com/dynatrace-oss/dynatrace-mcp)** - Manage and interact with the [Dynatrace Platform ](https://www.dynatrace.com/platform) for real-time observability and monitoring.
@@ -340,6 +342,7 @@ Official integrations are maintained by companies building production ready MCP
-
**[Prisma](https://www.prisma.io/docs/postgres/mcp-server)** - Create and manage Prisma Postgres databases
-
**[Probe.dev](https://docs.probe.dev/guides/mcp-integration)** - Comprehensive media analysis and validation powered by [Probe.dev](https://probe.dev). Hosted MCP server with FFprobe, MediaInfo, and Probe Report analysis capabilities.
-
**[ProdE](https://github.com/CuriousBox-AI/ProdE-mcp)** - Your 24/7 production engineer that preserves context across multiple codebases.
+-
**[Program Integrity Alliance (PIA)](https://github.com/Program-Integrity-Alliance/pia-mcp-local)** - Local and Hosted MCP servers providing AI-friendly access to U.S. Government Open Datasets. Also available on [Docker MCP Catalog](https://hub.docker.com/mcp/explore?search=PIA). See [our website](https://programintegrity.org) for more details.
-
**[PromptHouse](https://github.com/newtype-01/prompthouse-mcp)** - Personal prompt library with MCP integration for AI clients.
-
**[proxymock](https://docs.speedscale.com/proxymock/reference/mcp/)** - An MCP server that automatically generates tests and mocks by recording a live app.
-
**[PubNub](https://github.com/pubnub/pubnub-mcp-server)** - Retrieves context for developing with PubNub SDKs and calling APIs.
@@ -354,11 +357,14 @@ Official integrations are maintained by companies building production ready MCP
- **[Raygun](https://github.com/MindscapeHQ/mcp-server-raygun)** - Interact with your crash reporting and real using monitoring data on your Raygun account
-
**[Razorpay](https://github.com/razorpay/razorpay-mcp-server)** - Razorpay's official MCP server
-
**[Recraft](https://github.com/recraft-ai/mcp-recraft-server)** - Generate raster and vector (SVG) images using [Recraft](https://recraft.ai). Also you can edit, upscale images, create your own styles, and vectorize raster images
+-
**[Red Hat Insights](https://github.com/RedHatInsights/insights-mcp)** - Interact with [Red Hat Insights](https://www.redhat.com/en/technologies/management/insights) - build images, manage vulnerabilities, or view targeted recommendations.
-
**[Redis](https://github.com/redis/mcp-redis/)** - The Redis official MCP Server offers an interface to manage and search data in Redis.
-
**[Redis Cloud API](https://github.com/redis/mcp-redis-cloud/)** - The Redis Cloud API MCP Server allows you to manage your Redis Cloud resources using natural language.
-
**[Reexpress](https://github.com/ReexpressAI/reexpress_mcp_server)** - Enable Similarity-Distance-Magnitude statistical verification for your search, software, and data science workflows
-
**[Reltio](https://github.com/reltio-ai/reltio-mcp-server)** - A lightweight, plugin-based MCP server designed to perform advanced entity matching with language models in Reltio environments.
-
**[Rember](https://github.com/rember/rember-mcp)** - Create spaced repetition flashcards in [Rember](https://rember.com) to remember anything you learn in your chats
+-
**[Render](https://render.com/docs/mcp-server)** - The official Render MCP server: spin up new services, run queries against your databases, and debug rapidly with direct access to service metrics and logs.
+-
**[ReportPortal](https://github.com/reportportal/reportportal-mcp-server)** - explore and analyze automated test results from [ReportPortal](https://reportportal.io) using your favourite LLM.
-
**[Revit](https://github.com/NonicaTeam/AI-Connector-for-Revit)** - Connect and interact with your Revit models live.
-
**[Rill Data](https://docs.rilldata.com/explore/mcp)** - Interact with Rill Data to query and analyze your data.
-
**[Riza](https://github.com/riza-io/riza-mcp)** - Arbitrary code execution and tool-use platform for LLMs by [Riza](https://riza.io)
@@ -366,6 +372,7 @@ Official integrations are maintained by companies building production ready MCP
-
**[Rodin](https://github.com/DeemosTech/rodin-api-mcp)** - Generate 3D Models with [Hyper3D Rodin](https://hyper3d.ai)
-
**[Root Signals](https://github.com/root-signals/root-signals-mcp)** - Improve and quality control your outputs with evaluations using LLM-as-Judge
- **[Routine](https://github.com/routineco/mcp-server)** - MCP server to interact with [Routine](https://routine.co/): calendars, tasks, notes, etc.
+-
**[Rube](https://github.com/ComposioHQ/Rube)** - Rube is a Model Context Protocol (MCP) server that connects your AI tools to 500+ apps like Gmail, Slack, GitHub, and Notion. Simply install it in your AI client, authenticate once with your apps, and start asking your AI to perform real actions like "Send an email" or "Create a task."
-
**[SafeDep](https://github.com/safedep/vet/blob/main/docs/mcp.md)** - SafeDep `vet-mcp` helps in vetting open source packages for security risks—such as vulnerabilities and malicious code—before they're used in your project, especially with AI-generated code suggestions.
-
**[SafeLine](https://github.com/chaitin/SafeLine/tree/main/mcp_server)** - [SafeLine](https://safepoint.cloud/landing/safeline) is a self-hosted WAF(Web Application Firewall) to protect your web apps from attacks and exploits.
-
**[ScrAPI](https://github.com/DevEnterpriseSoftware/scrapi-mcp)** - Web scraping using [ScrAPI](https://scrapi.tech). Extract website content that is difficult to access because of bot detection, captchas or even geolocation restrictions.
@@ -410,6 +417,7 @@ Official integrations are maintained by companies building production ready MCP
-
**[Trade Agent](https://github.com/Trade-Agent/trade-agent-mcp)** - Execute stock and crypto trades on your brokerage via [Trade Agent](https://thetradeagent.ai)
-
**[Twelve Data](https://github.com/twelvedata/mcp)** — Integrate your AI agents with real-time and historical financial market data through our official [Twelve Data](https://twelvedata.com) MCP server.
-
**[Twilio](https://github.com/twilio-labs/mcp)** - Interact with [Twilio](https://www.twilio.com/en-us) APIs to send SMS messages, manage phone numbers, configure your account, and more.
+-
**[Tencent RTC](https://github.com/Tencent-RTC/mcp)** - The MCP Server enables AI IDEs to more effectively understand and use [Tencent's Real-Time Communication](https://trtc.io/) SDKs and APIs, which significantly streamlines the process for developers to build audio/video call applications.
-
**[Uberall](https://github.com/uberall/uberall-mcp-server)** – Manage multi - location presence, including listings, reviews, and social posting, via [uberall](https://uberall.com).
-
**[Unblocked](https://docs.getunblocked.com/unblocked-mcp)** Help your AI-powered IDEs generate faster, more accurate code by giving them access to context from Slack, Confluence, Google Docs, JIRA, and more with [Unblocked](https://getunblocked.com).
-
**[UnifAI](https://github.com/unifai-network/unifai-mcp-server)** - Dynamically search and call tools using [UnifAI Network](https://unifai.network)
@@ -548,6 +556,7 @@ A growing set of community-developed and maintained servers demonstrates various
- **[ChessPal Chess Engine (stockfish)](https://github.com/wilson-urdaneta/chesspal-mcp-engine)** - A Stockfish-powered chess engine exposed as an MCP server. Calculates best moves and supports both HTTP/SSE and stdio transports.
- **[Chroma](https://github.com/privetin/chroma)** - Vector database server for semantic document search and metadata filtering, built on Chroma
- **[Chrome history](https://github.com/vincent-pli/chrome-history-mcp)** - Talk with AI about your browser history, get fun ^_^
+- **[CIViC](https://github.com/QuentinCody/civic-mcp-server)** - MCP server for the Clinical Interpretation of Variants in Cancer (CIViC) database, providing access to clinical variant interpretations and genomic evidence for cancer research.
- **[Claude Thread Continuity](https://github.com/peless/claude-thread-continuity)** - Persistent memory system enabling Claude Desktop conversations to resume with full context across sessions. Maintains conversation history, project states, and user preferences for seamless multi-session workflows.
- **[ClaudePost](https://github.com/ZilongXue/claude-post)** - ClaudePost enables seamless email management for Gmail, offering secure features like email search, reading, and sending.
- **[CLDGeminiPDF Analyzer](https://github.com/tfll37/CLDGeminiPDF-Analyzer)** - MCP server tool enabling sharing large PDF files to Google LLMs via API for further/additional analysis and response retrieval to Claude Desktop.
@@ -571,9 +580,11 @@ A growing set of community-developed and maintained servers demonstrates various
- **[consult7](https://github.com/szeider/consult7)** - Analyze large codebases and document collections using high-context models via OpenRouter, OpenAI, or Google AI -- very useful, e.g., with Claude Code
- **[Contentful-mcp](https://github.com/ivo-toby/contentful-mcp)** - Read, update, delete, publish content in your [Contentful](https://contentful.com) space(s) from this MCP Server.
- **[Context Crystallizer](https://github.com/hubertciebiada/context-crystallizer)** - AI Context Engineering tool that transforms large repositories into crystallized, AI-consumable knowledge through systematic analysis and optimization.
+- **[MCP Context Provider](https://github.com/doobidoo/MCP-Context-Provider)** - Static server that provides AI models with persistent tool-specific context and rules, preventing context loss between chat sessions and enabling consistent behavior across interactions.
- **[context-portal](https://github.com/GreatScottyMac/context-portal)** - Context Portal (ConPort) is a memory bank database system that effectively builds a project-specific knowledge graph, capturing entities like decisions, progress, and architecture, along with their relationships. This serves as a powerful backend for Retrieval Augmented Generation (RAG), enabling AI assistants to access precise, up-to-date project information.
- **[cplusplus-mcp](https://github.com/kandrwmrtn/cplusplus_mcp)** - Semantic C++ code analysis using libclang. Enables Claude to understand C++ codebases through AST parsing rather than text search - find classes, navigate inheritance, trace function calls, and explore code relationships.
- **[CreateveAI Nexus](https://github.com/spgoodman/createveai-nexus-server)** - Open-Source Bridge Between AI Agents and Enterprise Systems, with simple custom API plug-in capabilities (including close compatibility with ComfyUI nodes), support for Copilot Studio's MCP agent integations, and support for Azure deployment in secure environments with secrets stored in Azure Key Vault, as well as straightforward on-premises deployment.
+- **[CRASH](https://github.com/nikkoxgonzales/crash-mcp)** - MCP server for structured, iterative reasoning and thinking with flexible validation, confidence tracking, revision mechanisms, and branching support.
- **[Creatify](https://github.com/TSavo/creatify-mcp)** - MCP Server that exposes Creatify AI API capabilities for AI video generation, including avatar videos, URL-to-video conversion, text-to-speech, and AI-powered editing tools.
- **[Cronlytic](https://github.com/Cronlytic/cronlytic-mcp-server)** - Create CRUD operations for serverless cron jobs through [Cronlytic](https://cronlytic.com) MCP Server
- **[crypto-feargreed-mcp](https://github.com/kukapay/crypto-feargreed-mcp)** - Providing real-time and historical Crypto Fear & Greed Index data.
@@ -589,9 +600,12 @@ A growing set of community-developed and maintained servers demonstrates various
- **[Databricks](https://github.com/JordiNeil/mcp-databricks-server)** - Allows LLMs to run SQL queries, list and get details of jobs executions in a Databricks account.
- **[Databricks Genie](https://github.com/yashshingvi/databricks-genie-MCP)** - A server that connects to the Databricks Genie, allowing LLMs to ask natural language questions, run SQL queries, and interact with Databricks conversational agents.
- **[Databricks Smart SQL](https://github.com/RafaelCartenet/mcp-databricks-server)** - Leveraging Databricks Unity Catalog metadata, perform smart efficient SQL queries to solve Ad-hoc queries and explore data.
+- **[DataCite](https://github.com/QuentinCody/datacite-mcp-server)** - Unofficial MCP server for DataCite, providing access to research data and publication metadata through DataCite's REST API and GraphQL interface for scholarly research discovery.
- **[Datadog](https://github.com/GeLi2001/datadog-mcp-server)** - Datadog MCP Server for application tracing, monitoring, dashboard, incidents queries built on official datadog api.
- **[Dataset Viewer](https://github.com/privetin/dataset-viewer)** - Browse and analyze Hugging Face datasets with features like search, filtering, statistics, and data export
- **[DataWorks](https://github.com/aliyun/alibabacloud-dataworks-mcp-server)** - A Model Context Protocol (MCP) server that provides tools for AI, allowing it to interact with the [DataWorks](https://www.alibabacloud.com/help/en/dataworks/) Open API through a standardized interface. This implementation is based on the Alibaba Cloud Open API and enables AI agents to perform cloud resources operations seamlessly.
+- **[Data4library](https://github.com/isnow890/data4library-mcp)** (by isnow890) - MCP server for Korea's Library Information Naru API, providing comprehensive access to public library data, book searches, loan status, reading statistics, and GPS-based nearby library discovery across South Korea.
+
- **[DaVinci Resolve](https://github.com/samuelgursky/davinci-resolve-mcp)** - MCP server integration for DaVinci Resolve providing powerful tools for video editing, color grading, media management, and project control.
- **[DBHub](https://github.com/bytebase/dbhub/)** - Universal database MCP server connecting to MySQL, MariaDB, PostgreSQL, and SQL Server.
- **[Deebo](https://github.com/snagasuri/deebo-prototype)** – Agentic debugging MCP server that helps AI coding agents delegate and fix hard bugs through isolated multi-agent hypothesis testing.
@@ -607,6 +621,7 @@ A growing set of community-developed and maintained servers demonstrates various
- **[DevDb](https://github.com/damms005/devdb-vscode?tab=readme-ov-file#mcp-configuration)** - An MCP server that runs right inside the IDE, for connecting to MySQL, Postgres, SQLite, and MSSQL databases.
- **[DevOps AI Toolkit](https://github.com/vfarcic/dot-ai)** - AI-powered development productivity platform that enhances software development workflows through intelligent automation and AI-driven assistance.
- **[DevOps-MCP](https://github.com/wangkanai/devops-mcp)** - Dynamic Azure DevOps MCP server with directory-based authentication switching, supporting work items, repositories, builds, pipelines, and multi-project management with local configuration files.
+- **[DGIdb](https://github.com/QuentinCody/dgidb-mcp-server)** - MCP server for the Drug Gene Interaction Database (DGIdb), providing access to drug-gene interaction data, druggable genome information, and pharmacogenomics research.
- **[Dicom](https://github.com/ChristianHinge/dicom-mcp)** - An MCP server to query and retrieve medical images and for parsing and reading dicom-encapsulated documents (pdf etc.).
- **[Dify](https://github.com/YanxingLiu/dify-mcp-server)** - A simple implementation of an MCP server for dify workflows.
- **[Discogs](https://github.com/cswkim/discogs-mcp-server)** - An MCP server that connects to the Discogs API for interacting with your music collection.
@@ -636,6 +651,7 @@ A growing set of community-developed and maintained servers demonstrates various
- **[Email](https://github.com/Shy2593666979/mcp-server-email)** - This server enables users to send emails through various email providers, including Gmail, Outlook, Yahoo, Sina, Sohu, 126, 163, and QQ Mail. It also supports attaching files from specified directories, making it easy to upload attachments along with the email content.
- **[Email SMTP](https://github.com/egyptianego17/email-mcp-server)** - A simple MCP server that lets your AI agent send emails and attach files through SMTP.
- **[Enhance Prompt](https://github.com/FelixFoster/mcp-enhance-prompt)** - An MCP service for enhance you prompt.
+- **[Entrez](https://github.com/QuentinCody/entrez-mcp-server)** - Unofficial MCP server for NCBI Entrez databases, providing access to PubMed articles, gene information, protein data, and other biomedical research resources through NCBI's E-utilities API.
- **[Ergo Blockchain MCP](https://github.com/marctheshark3/ergo-mcp)** -An MCP server to integrate Ergo Blockchain Node and Explorer APIs for checking address balances, analyzing transactions, viewing transaction history, performing forensic analysis of addresses, searching for tokens, and monitoring network status.
- **[ESP MCP Server](https://github.com/horw/esp-mcp)** - An MCP server that integrates ESP IDF commands like building and flashing code for ESP Microcontrollers using an LLM.
- **[Eunomia](https://github.com/whataboutyou-ai/eunomia-MCP-server)** - Extension of the Eunomia framework that connects Eunomia instruments with MCP servers
@@ -658,12 +674,14 @@ A growing set of community-developed and maintained servers demonstrates various
- **[Federal Reserve Economic Data (FRED)](https://github.com/stefanoamorelli/fred-mcp-server)** (by Stefano Amorelli) - Community developed MCP server to interact with the Federal Reserve Economic Data.
- **[Fetch](https://github.com/zcaceres/fetch-mcp)** - A server that flexibly fetches HTML, JSON, Markdown, or plaintext.
- **[Feyod](https://github.com/jeroenvdmeer/feyod-mcp)** - A server that answers questions about football matches, and specialised in the football club Feyenoord.
+- **[Fast Filesystem](https://github.com/efforthye/fast-filesystem-mcp)** - Advanced filesystem operations with large file handling capabilities and Claude-optimized features. Provides fast file reading/writing, sequential reading for large files, directory operations, file search, and streaming writes with backup & recovery.
- **[FHIR](https://github.com/wso2/fhir-mcp-server)** - A Model Context Protocol server that provides seamless, standardized access to Fast Healthcare Interoperability Resources (FHIR) data from any compatible FHIR server. Designed for easy integration with AI tools, developer workflows, and healthcare applications, it enables natural language and programmatic search, retrieval, and analysis of clinical data.
- **[Fibaro HC3](https://github.com/coding-sailor/mcp-server-hc3)** - MCP server for Fibaro Home Center 3 smart home systems.
- **[Figma](https://github.com/GLips/Figma-Context-MCP)** - Give your coding agent direct access to Figma file data, helping it one-shot design implementation.
- **[Figma](https://github.com/paulvandermeijs/figma-mcp)** - A blazingly fast MCP server to read and export your Figma design files.
- **[Files](https://github.com/flesler/mcp-files)** - Enables agents to quickly find and edit code in a codebase with surgical precision. Find symbols, edit them everywhere.
- **[FileSystem Server](https://github.com/Oncorporation/filesystem_server)** - Local MCP server for Visual Studio 2022 that provides code-workspace functionality by giving AI agents selective access to project folders and files
+- **[finmap.org](https://github.com/finmap-org/mcp-server)** MCP server provides comprehensive historical data from the US, UK, Russian and Turkish stock exchanges. Access sectors, tickers, company profiles, market cap, volume, value, and trade counts, as well as treemap and histogram visualizations.
- **[Firebase](https://github.com/gannonh/firebase-mcp)** - Server to interact with Firebase services including Firebase Authentication, Firestore, and Firebase Storage.
- **[FireCrawl](https://github.com/vrknetha/mcp-server-firecrawl)** - Advanced web scraping with JavaScript rendering, PDF support, and smart rate limiting
- **[Fish Audio](https://github.com/da-okazaki/mcp-fish-audio-server)** - Text-to-Speech integration with Fish Audio's API, supporting multiple voices, streaming, and real-time playback
@@ -842,6 +860,7 @@ A growing set of community-developed and maintained servers demonstrates various
- **[MCP Server Generator](https://github.com/SerhatUzbas/mcp-server-generator)** - An MCP server that creates and manages MCP servers! Helps both non-technical users and developers build custom JavaScript MCP servers with AI guidance, automatic dependency management, and Claude Desktop integration.
- **[MCP STDIO to Streamable HTTP Adapter](https://github.com/pyroprompts/mcp-stdio-to-streamable-http-adapter)** - Connect to Streamable HTTP MCP Servers even if the MCP Client only supports STDIO.
- **[MCP-Ambari-API](https://github.com/call518/MCP-Ambari-API)** - Model Context Protocol (MCP) server for Apache Ambari API integration. This project provides tools for managing Hadoop clusters, including service operations, configuration management, status monitoring, and request tracking.
+- **[MCP-PostgreSQL-Ops](https://github.com/call518/MCP-PostgreSQL-Ops)** - Model Context Protocol (MCP) server for Apache Ambari API integration. This project provides tools for managing Hadoop clusters, including service operations, configuration management, status monitoring, and request tracking.
- **[mcp-containerd](https://github.com/jokemanfire/mcp-containerd)** - The containerd MCP implemented by Rust supports the operation of the CRI interface.
- **[MCP-Database-Server](https://github.com/executeautomation/mcp-database-server)** - Fastest way to interact with your Database such as SQL Server, SQLite and PostgreSQL
- **[mcp-grep](https://github.com/erniebrodeur/mcp-grep)** - Python-based MCP server that brings grep functionality to LLMs. Supports common grep features including pattern searching, case-insensitive matching, context lines, and recursive directory searches.
@@ -849,6 +868,7 @@ A growing set of community-developed and maintained servers demonstrates various
- **[mcp-local-rag](https://github.com/nkapila6/mcp-local-rag)** - "primitive" RAG-like web search model context protocol (MCP) server that runs locally using Google's MediaPipe Text Embedder and DuckDuckGo Search.
- **[mcp-mcp](https://github.com/wojtyniak/mcp-mcp)** - Meta-MCP Server that acts as a tool discovery service for MCP clients.
- **[mcp-meme-sticky](https://github.com/nkapila6/mcp-meme-sticky)** - Make memes or stickers using MCP server for WhatsApp or Telegram.
+- **[mcp-memory-service](https://github.com/doobidoo/mcp-memory-service)** - Universal MCP memory service providing semantic memory search, persistent storage, and autonomous memory consolidation for AI assistants across 13+ AI applications.
- **[MCP-NixOS](https://github.com/utensils/mcp-nixos)** - A Model Context Protocol server that provides AI assistants with accurate, real-time information about NixOS packages, system options, Home Manager settings, and nix-darwin macOS configurations.
- **[mcp-open-library](https://github.com/8enSmith/mcp-open-library)** - A Model Context Protocol (MCP) server for the Open Library API that enables AI assistants to search for book and author information.
- **[mcp-proxy](https://github.com/sparfenyuk/mcp-proxy)** - Connect to MCP servers that run on SSE transport, or expose stdio servers as an SSE server.
@@ -912,6 +932,7 @@ A growing set of community-developed and maintained servers demonstrates various
- **[NAVER](https://github.com/pfldy2850/py-mcp-naver)** (by pfldy2850) - This MCP server provides tools to interact with various Naver services, such as searching blogs, news, books, and more.
- **[Naver](https://github.com/isnow890/naver-search-mcp)** (by isnow890) - MCP server for Naver Search API integration, supporting blog, news, shopping search and DataLab analytics features.
- **[NBA](https://github.com/Taidgh-Robinson/nba-mcp-server)** - This MCP server provides tools to fetch recent and historical NBA games including basic and advanced statistics.
+- **[NCI GDC](https://github.com/QuentinCody/nci-gdc-mcp-server)** - Unofficial MCP server for the National Cancer Institute's Genomic Data Commons (GDC), providing access to harmonized cancer genomic and clinical data for oncology research.
- **[Neo4j](https://github.com/da-okazaki/mcp-neo4j-server)** - A community built server that interacts with Neo4j Graph Database.
- **[Neovim](https://github.com/bigcodegen/mcp-neovim-server)** - An MCP Server for your Neovim session.
- **[Netbird](https://github.com/aantti/mcp-netbird)** - List and analyze Netbird network peers, groups, policies, and more.
@@ -953,9 +974,11 @@ A growing set of community-developed and maintained servers demonstrates various
- **[OpenLink Generic Python Open Database Connectivity](https://github.com/OpenLinkSoftware/mcp-pyodbc-server)** - Generic Database Management System (DBMS) access via Open Database Connectivity (ODBC) Connectors (Drivers) for PyODBC
- **[OpenLink Generic SQLAlchemy Object-Relational Database Connectivity for PyODBC](https://github.com/OpenLinkSoftware/mcp-sqlalchemy-server)** - Generic Database Management System (DBMS) access via SQLAlchemy (PyODBC) Connectors (Drivers)
- **[OpenMetadata](https://github.com/yangkyeongmo/mcp-server-openmetadata)** - MCP Server for OpenMetadata, an open-source metadata management platform.
+- **[OpenNeuro](https://github.com/QuentinCody/open-neuro-mcp-server)** - Unofficial MCP server for OpenNeuro, providing access to open neuroimaging datasets, study metadata, and brain imaging data for neuroscience research and analysis.
- **[OpenReview](https://github.com/anyakors/openreview-mcp-server)** - An MCP server for [OpenReview](https://openreview.net/) to fetch, read and save manuscripts from AI/ML conferences.
- **[OpenRPC](https://github.com/shanejonas/openrpc-mpc-server)** - Interact with and discover JSON-RPC APIs via [OpenRPC](https://open-rpc.org).
- **[OpenStack](https://github.com/wangsqly0407/openstack-mcp-server)** - MCP server implementation that provides OpenStack interaction.
+- **[Open Targets](https://github.com/QuentinCody/open-targets-mcp-server)** - Unofficial MCP server for the Open Targets Platform, providing access to target-disease associations, drug discovery data, and therapeutic hypothesis generation for biomedical research.
- **[OpenWeather](https://github.com/mschneider82/mcp-openweather)** - Interact with the free openweathermap API to get the current and forecast weather for a location.
- **[Operative WebEvalAgent](https://github.com/Operative-Sh/web-eval-agent)** (by [Operative.sh](https://www.operative.sh)) - An MCP server to test, debug, and fix web applications autonomously.
- **[OPNSense MCP](https://github.com/vespo92/OPNSenseMCP)** - MCP Server for OPNSense Firewall Management and API access
@@ -977,6 +1000,7 @@ A growing set of community-developed and maintained servers demonstrates various
- **[PDMT](https://github.com/paiml/pdmt)** - Pragmatic Deterministic MCP Templating - High-performance deterministic templating library with comprehensive todo validation, quality enforcement, and 0.0 temperature generation for reproducible outputs.
- **[Peacock for VS Code](https://github.com/johnpapa/peacock-mcp)** - MCP Server for the Peacock extension for VS Code, coloring your world, one Code editor at a time. The main goal of the project is to show how an MCP server can be used to interact with APIs.
- **[persistproc](https://github.com/irskep/persistproc)** - MCP server + command line tool that allows agents to see & control long-running processes like web servers.
+- **[Pharos](https://github.com/QuentinCody/pharos-mcp-server)** - Unofficial MCP server for the Pharos database by the National Center for Advancing Translational Sciences (NCATS), providing access to target, drug, and disease information for drug discovery research.
- **[Phone MCP](https://github.com/hao-cyber/phone-mcp)** - 📱 A powerful plugin that lets you control your Android phone. Enables AI agents to perform complex tasks like automatically playing music based on weather or making calls and sending texts.
- **[PIF](https://github.com/hungryrobot1/MCP-PIF)** - A Personal Intelligence Framework (PIF), providing tools for file operations, structured reasoning, and journal-based documentation to support continuity and evolving human-AI collaboration across sessions.
- **[Pinecone](https://github.com/sirmews/mcp-pinecone)** - MCP server for searching and uploading records to Pinecone. Allows for simple RAG features, leveraging Pinecone's Inference API.
@@ -993,6 +1017,7 @@ A growing set of community-developed and maintained servers demonstrates various
- **[Prefect](https://github.com/allen-munsch/mcp-prefect)** - MCP Server for workflow orchestration and ELT/ETL with Prefect Server, and Prefect Cloud [https://www.prefect.io/] using the `prefect` python client.
- **[Productboard](https://github.com/kenjihikmatullah/productboard-mcp)** - Integrate the Productboard API into agentic workflows via MCP.
- **[Prometheus](https://github.com/pab1it0/prometheus-mcp-server)** - Query and analyze Prometheus - open-source monitoring system.
+- **[Prometheus (TypeScript)](https://github.com/yanmxa/prometheus-mcp-server)** - Enable AI assistants to query Prometheus using natural language with TypeScript implementation.
- **[PubChem](https://github.com/sssjiang/pubchem_mcp_server)** - extract drug information from pubchem API.
- **[PubMed](https://github.com/JackKuo666/PubMed-MCP-Server)** - Enable AI assistants to search, access, and analyze PubMed articles through a simple MCP interface.
- **[Pulumi](https://github.com/dogukanakkaya/pulumi-mcp-server)** - MCP Server to Interact with Pulumi API, creates and lists Stacks
@@ -1013,6 +1038,7 @@ A growing set of community-developed and maintained servers demonstrates various
- **[RAG Web Browser](https://github.com/apify/mcp-server-rag-web-browser)** An MCP server for Apify's open-source RAG Web Browser [Actor](https://apify.com/apify/rag-web-browser) to perform web searches, scrape URLs, and return content in Markdown.
- **[Raindrop.io](https://github.com/hiromitsusasaki/raindrop-io-mcp-server)** - An integration that allows LLMs to interact with Raindrop.io bookmarks using the Model Context Protocol (MCP).
- **[Random Number](https://github.com/zazencodes/random-number-mcp)** - Provides LLMs with essential random generation abilities, built entirely on Python's standard library.
+- **[RCSB PDB](https://github.com/QuentinCody/rcsb-pdb-mcp-server)** - Unofficial MCP server for the Research Collaboratory for Structural Bioinformatics Protein Data Bank (RCSB PDB), providing access to 3D protein structures, experimental data, and structural bioinformatics information.
- **[Reaper](https://github.com/dschuler36/reaper-mcp-server)** - Interact with your [Reaper](https://www.reaper.fm/) (Digital Audio Workstation) projects.
- **[Redbee](https://github.com/Tamsi/redbee-mcp)** - Redbee MCP server that provides support for interacting with Redbee API.
- **[Redis](https://github.com/GongRzhe/REDIS-MCP-Server)** - Redis database operations and caching microservice server with support for key-value operations, expiration management, and pattern-based key listing.
@@ -1025,6 +1051,7 @@ A growing set of community-developed and maintained servers demonstrates various
- **[Revit MCP](https://github.com/revit-mcp)** - A service implementing the MCP protocol for Autodesk Revit.
- **[Rijksmuseum](https://github.com/r-huijts/rijksmuseum-mcp)** - Interface with the Rijksmuseum API to search artworks, retrieve artwork details, access image tiles, and explore user collections.
- **[Riot Games](https://github.com/jifrozen0110/mcp-riot)** - MCP server for League of Legends – fetch player info, ranks, champion stats, and match history via Riot API.
+- **[Rohlik](https://github.com/tomaspavlin/rohlik-mcp)** - Shop groceries across the Rohlik Group platforms (Rohlik.cz, Knuspr.de, Gurkerl.at, Kifli.hu, Sezamo.ro)
- **[Rquest](https://github.com/xxxbrian/mcp-rquest)** - An MCP server providing realistic browser-like HTTP request capabilities with accurate TLS/JA3/JA4 fingerprints for bypassing anti-bot measures.
- **[Rust MCP Filesystem](https://github.com/rust-mcp-stack/rust-mcp-filesystem)** - Fast, asynchronous MCP server for efficient handling of various filesystem operations built with the power of Rust.
- **[SafetySearch](https://github.com/surabhya/SafetySearch)** - Real-time FDA food safety data: recalls, adverse events, analysis.
@@ -1131,6 +1158,7 @@ A growing set of community-developed and maintained servers demonstrates various
- **[Tyk API Management](https://github.com/TykTechnologies/tyk-dashboard-mcp)** - Chat with all of your organization's managed APIs and perform other API lifecycle operations, managing tokens, users, analytics, and more.
- **[Typesense](https://github.com/suhail-ak-s/mcp-typesense-server)** - A Model Context Protocol (MCP) server implementation that provides AI models with access to Typesense search capabilities. This server enables LLMs to discover, search, and analyze data stored in Typesense collections.
- **[UniFi Dream Machine](https://github.com/sabler/mcp-unifi)** An MCP server that gets your network telemetry from the UniFi Site Manager and your local UniFi router.
+- **[UniProt](https://github.com/QuentinCody/uniprot-mcp-server)** - Unofficial MCP server for UniProt, providing access to protein sequence data, functional annotations, taxonomic information, and cross-references for proteomics and bioinformatics research.
- **[uniswap-poolspy-mcp](https://github.com/kukapay/uniswap-poolspy-mcp)** - An MCP server that tracks newly created liquidity pools on Uniswap across nine blockchain networks.
- **[uniswap-trader-mcp](https://github.com/kukapay/uniswap-trader-mcp)** -An MCP server for AI agents to automate token swaps on Uniswap DEX across multiple blockchains.
- **[Unity Catalog](https://github.com/ognis1205/mcp-server-unitycatalog)** - An MCP server that enables LLMs to interact with Unity Catalog AI, supporting CRUD operations on Unity Catalog Functions and executing them as MCP tools.
@@ -1150,7 +1178,7 @@ A growing set of community-developed and maintained servers demonstrates various
- **[Video Still Capture](https://github.com/13rac1/videocapture-mcp)** - 📷 Capture video stills from an OpenCV-compatible webcam or other video source.
- **[Virtual location (Google Street View,etc.)](https://github.com/mfukushim/map-traveler-mcp)** - Integrates Google Map, Google Street View, PixAI, Stability.ai, ComfyUI API and Bluesky to provide a virtual location simulation in LLM (written in Effect.ts)
- **[VMware Fusion](https://github.com/yeahdongcn/vmware-fusion-mcp-server)** - Manage VMware Fusion virtual machines via the Fusion REST API.
-- **[Voice MCP](https://github.com/mbailey/voice-mcp)** - Enable voice conversations with Claude using any OpenAI-compatible STT/TTS service ([voice-mcp.com](https://voice-mcp.com))
+- **[Voice Mode](https://github.com/mbailey/voicemode)** - Enable voice conversations with Claude using any OpenAI-compatible STT/TTS service ([voicemode.ai](https://voicemode.ai))
- **[Voice Status Report](https://github.com/tomekkorbak/voice-status-report-mcp-server)** - An MCP server that provides voice status updates using OpenAI's text-to-speech API, to be used with Cursor or Claude Code.
- **[VolcEngine TOS](https://github.com/dinghuazhou/sample-mcp-server-tos)** - A sample MCP server for VolcEngine TOS that flexibly get objects from TOS.
- **[Voyp](https://github.com/paulotaylor/voyp-mcp)** - VOYP MCP server for making calls using Artificial Intelligence.
@@ -1165,6 +1193,7 @@ A growing set of community-developed and maintained servers demonstrates various
- **[WhatsApp MCP Server](https://github.com/lharries/whatsapp-mcp)** - MCP server for your personal WhatsApp handling individuals, groups, searching and sending.
- **[Whois MCP](https://github.com/bharathvaj-ganesan/whois-mcp)** - MCP server that performs whois lookup against domain, IP, ASN and TLD.
- **[Wikidata MCP](https://github.com/zzaebok/mcp-wikidata)** - Wikidata MCP server that interact with Wikidata, by searching identifiers, extracting metadata, and executing sparql query.
+- **[Wikidata SPARQL](https://github.com/QuentinCody/wikidata-sparql-mcp-server)** - Unofficial REMOTE MCP server for Wikidata's SPARQL endpoint, providing access to structured knowledge data, entity relationships, and semantic queries for research and data analysis.
- **[Wikipedia MCP](https://github.com/Rudra-ravi/wikipedia-mcp)** - Access and search Wikipedia articles via MCP for AI-powered information retrieval.
- **[WildFly MCP](https://github.com/wildfly-extras/wildfly-mcp)** - WildFly MCP server that enables LLM to interact with running WildFly servers (retrieve metrics, logs, invoke operations, ...).
- **[Windows CLI](https://github.com/SimonB97/win-cli-mcp-server)** - MCP server for secure command-line interactions on Windows systems, enabling controlled access to PowerShell, CMD, and Git Bash shells.
@@ -1193,6 +1222,7 @@ A growing set of community-developed and maintained servers demonstrates various
- **[YouTube Video Summarizer](https://github.com/nabid-pf/youtube-video-summarizer-mcp)** - Summarize lengthy youtube videos.
- **[yutu](https://github.com/eat-pray-ai/yutu)** - A fully functional MCP server and CLI for YouTube to automate YouTube operation.
- **[ZapCap](https://github.com/bogdan01m/zapcap-mcp-server)** - MCP server for ZapCap API providing video caption and B-roll generation via natural language
+- **[ZincBind](https://github.com/QuentinCody/zincbind-mcp-server)** - Unofficial MCP server for ZincBind, providing access to a comprehensive database of zinc binding sites in proteins, structural coordination data, and metalloproteomics research information.
- **[Zoom](https://github.com/Prathamesh0901/zoom-mcp-server/tree/main)** - Create, update, read and delete your zoom meetings.
## 📚 Frameworks
@@ -1200,6 +1230,7 @@ These are high-level frameworks that make it easier to build MCP servers or clie
### For servers
+* **[Anubis MCP](https://github.com/zoedsoupe/anubis-mcp)** (Elixir) - A high-performance and high-level Model Context Protocol (MCP) implementation in Elixir. Think like "Live View" for MCP.
* **[ModelFetch](https://github.com/phuctm97/modelfetch/)** (TypeScript) - Runtime-agnostic SDK to create and deploy MCP servers anywhere TypeScript/JavaScript runs
* **[EasyMCP](https://github.com/zcaceres/easy-mcp/)** (TypeScript)
* **[FastAPI to MCP auto generator](https://github.com/tadata-org/fastapi_mcp)** – A zero-configuration tool for automatically exposing FastAPI endpoints as MCP tools by **[Tadata](https://tadata.com/)**
@@ -1222,7 +1253,6 @@ These are high-level frameworks that make it easier to build MCP servers or clie
* **[Template MCP Server](https://github.com/mcpdotdirect/template-mcp-server)** - A CLI tool to create a new Model Context Protocol server project with TypeScript support, dual transport options, and an extensible structure
* **[AgentR Universal MCP SDK](https://github.com/universal-mcp/universal-mcp)** - A python SDK to build MCP Servers with inbuilt credential management by **[Agentr](https://agentr.dev/home)**
* **[Vercel MCP Adapter](https://github.com/vercel/mcp-adapter)** (TypeScript) - A simple package to start serving an MCP server on most major JS meta-frameworks including Next, Nuxt, Svelte, and more.
-* **[Hermes MCP](https://github.com/cloudwalk/hermes-mcp)** (Elixir) - A high-performance and high-level Model Context Protocol (MCP) implementation in Elixir. Think like "Live View" for MCP.
* **[PHP MCP Server](https://github.com/php-mcp/server)** (PHP) - Core PHP implementation for the Model Context Protocol (MCP) server
### For clients
diff --git a/package-lock.json b/package-lock.json
index 6a9bac93..c07a7418 100644
--- a/package-lock.json
+++ b/package-lock.json
@@ -1849,10 +1849,11 @@
}
},
"node_modules/brace-expansion": {
- "version": "1.1.11",
- "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-1.1.11.tgz",
- "integrity": "sha512-iCuPHDFgrHX7H2vEI/5xpz07zSHB00TpugqhmYtVmMO6518mCuRMoOYFldEBl0g187ufozdaHgWKcYFb61qGiA==",
+ "version": "1.1.12",
+ "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-1.1.12.tgz",
+ "integrity": "sha512-9T9UjW3r0UW5c1Q7GTwllptXwhvYmEzFhzMfZ9H7FQWt+uZePjZPjBP/W1ZEyZ1twGWom5/56TF4lPcqjnDHcg==",
"dev": true,
+ "license": "MIT",
"dependencies": {
"balanced-match": "^1.0.0",
"concat-map": "0.0.1"
@@ -6156,7 +6157,7 @@
},
"src/filesystem": {
"name": "@modelcontextprotocol/server-filesystem",
- "version": "0.6.2",
+ "version": "0.6.3",
"license": "MIT",
"dependencies": {
"@modelcontextprotocol/sdk": "^1.17.0",
diff --git a/src/filesystem/README.md b/src/filesystem/README.md
index a6154431..0f456f3c 100644
--- a/src/filesystem/README.md
+++ b/src/filesystem/README.md
@@ -73,6 +73,7 @@ The server's directory access control follows this flow:
- `head` (number, optional): First N lines
- `tail` (number, optional): Last N lines
- Always treats the file as UTF-8 text regardless of extension
+ - Cannot specify both `head` and `tail` simultaneously
- **read_media_file**
- Read an image or audio file
@@ -119,6 +120,23 @@ The server's directory access control follows this flow:
- List directory contents with [FILE] or [DIR] prefixes
- Input: `path` (string)
+- **list_directory_with_sizes**
+ - List directory contents with [FILE] or [DIR] prefixes, including file sizes
+ - Inputs:
+ - `path` (string): Directory path to list
+ - `sortBy` (string, optional): Sort entries by "name" or "size" (default: "name")
+ - Returns detailed listing with file sizes and summary statistics
+ - Shows total files, directories, and combined size
+
+- **directory_tree**
+ - Get a recursive tree view of files and directories as a JSON structure
+ - Input: `path` (string): Starting directory path
+ - Returns JSON structure with:
+ - `name`: File/directory name
+ - `type`: "file" or "directory"
+ - `children`: Array of child entries (for directories only)
+ - Output is formatted with 2-space indentation for readability
+
- **move_file**
- Move or rename files and directories
- Inputs:
diff --git a/src/filesystem/__tests__/lib.test.ts b/src/filesystem/__tests__/lib.test.ts
new file mode 100644
index 00000000..76d36792
--- /dev/null
+++ b/src/filesystem/__tests__/lib.test.ts
@@ -0,0 +1,701 @@
+import { describe, it, expect, beforeEach, afterEach, jest } from '@jest/globals';
+import fs from 'fs/promises';
+import path from 'path';
+import os from 'os';
+import {
+ // Pure utility functions
+ formatSize,
+ normalizeLineEndings,
+ createUnifiedDiff,
+ // Security & validation functions
+ validatePath,
+ setAllowedDirectories,
+ // File operations
+ getFileStats,
+ readFileContent,
+ writeFileContent,
+ // Search & filtering functions
+ searchFilesWithValidation,
+ // File editing functions
+ applyFileEdits,
+ tailFile,
+ headFile
+} from '../lib.js';
+
+// Mock fs module
+jest.mock('fs/promises');
+const mockFs = fs as jest.Mocked;
+
+describe('Lib Functions', () => {
+ beforeEach(() => {
+ jest.clearAllMocks();
+ // Set up allowed directories for tests
+ const allowedDirs = process.platform === 'win32' ? ['C:\\Users\\test', 'C:\\temp', 'C:\\allowed'] : ['/home/user', '/tmp', '/allowed'];
+ setAllowedDirectories(allowedDirs);
+ });
+
+ afterEach(() => {
+ jest.restoreAllMocks();
+ // Clear allowed directories after tests
+ setAllowedDirectories([]);
+ });
+
+ describe('Pure Utility Functions', () => {
+ describe('formatSize', () => {
+ it('formats bytes correctly', () => {
+ expect(formatSize(0)).toBe('0 B');
+ expect(formatSize(512)).toBe('512 B');
+ expect(formatSize(1024)).toBe('1.00 KB');
+ expect(formatSize(1536)).toBe('1.50 KB');
+ expect(formatSize(1048576)).toBe('1.00 MB');
+ expect(formatSize(1073741824)).toBe('1.00 GB');
+ expect(formatSize(1099511627776)).toBe('1.00 TB');
+ });
+
+ it('handles edge cases', () => {
+ expect(formatSize(1023)).toBe('1023 B');
+ expect(formatSize(1025)).toBe('1.00 KB');
+ expect(formatSize(1048575)).toBe('1024.00 KB');
+ });
+
+ it('handles very large numbers beyond TB', () => {
+ // The function only supports up to TB, so very large numbers will show as TB
+ expect(formatSize(1024 * 1024 * 1024 * 1024 * 1024)).toBe('1024.00 TB');
+ expect(formatSize(Number.MAX_SAFE_INTEGER)).toContain('TB');
+ });
+
+ it('handles negative numbers', () => {
+ // Negative numbers will result in NaN for the log calculation
+ expect(formatSize(-1024)).toContain('NaN');
+ expect(formatSize(-0)).toBe('0 B');
+ });
+
+ it('handles decimal numbers', () => {
+ expect(formatSize(1536.5)).toBe('1.50 KB');
+ expect(formatSize(1023.9)).toBe('1023.9 B');
+ });
+
+ it('handles very small positive numbers', () => {
+ expect(formatSize(1)).toBe('1 B');
+ expect(formatSize(0.5)).toBe('0.5 B');
+ expect(formatSize(0.1)).toBe('0.1 B');
+ });
+ });
+
+ describe('normalizeLineEndings', () => {
+ it('converts CRLF to LF', () => {
+ expect(normalizeLineEndings('line1\r\nline2\r\nline3')).toBe('line1\nline2\nline3');
+ });
+
+ it('leaves LF unchanged', () => {
+ expect(normalizeLineEndings('line1\nline2\nline3')).toBe('line1\nline2\nline3');
+ });
+
+ it('handles mixed line endings', () => {
+ expect(normalizeLineEndings('line1\r\nline2\nline3\r\n')).toBe('line1\nline2\nline3\n');
+ });
+
+ it('handles empty string', () => {
+ expect(normalizeLineEndings('')).toBe('');
+ });
+ });
+
+ describe('createUnifiedDiff', () => {
+ it('creates diff for simple changes', () => {
+ const original = 'line1\nline2\nline3';
+ const modified = 'line1\nmodified line2\nline3';
+ const diff = createUnifiedDiff(original, modified, 'test.txt');
+
+ expect(diff).toContain('--- test.txt');
+ expect(diff).toContain('+++ test.txt');
+ expect(diff).toContain('-line2');
+ expect(diff).toContain('+modified line2');
+ });
+
+ it('handles CRLF normalization', () => {
+ const original = 'line1\r\nline2\r\n';
+ const modified = 'line1\nmodified line2\n';
+ const diff = createUnifiedDiff(original, modified);
+
+ expect(diff).toContain('-line2');
+ expect(diff).toContain('+modified line2');
+ });
+
+ it('handles identical content', () => {
+ const content = 'line1\nline2\nline3';
+ const diff = createUnifiedDiff(content, content);
+
+ // Should not contain any +/- lines for identical content (excluding header lines)
+ expect(diff.split('\n').filter((line: string) => line.startsWith('+++') || line.startsWith('---'))).toHaveLength(2);
+ expect(diff.split('\n').filter((line: string) => line.startsWith('+') && !line.startsWith('+++'))).toHaveLength(0);
+ expect(diff.split('\n').filter((line: string) => line.startsWith('-') && !line.startsWith('---'))).toHaveLength(0);
+ });
+
+ it('handles empty content', () => {
+ const diff = createUnifiedDiff('', '');
+ expect(diff).toContain('--- file');
+ expect(diff).toContain('+++ file');
+ });
+
+ it('handles default filename parameter', () => {
+ const diff = createUnifiedDiff('old', 'new');
+ expect(diff).toContain('--- file');
+ expect(diff).toContain('+++ file');
+ });
+
+ it('handles custom filename', () => {
+ const diff = createUnifiedDiff('old', 'new', 'custom.txt');
+ expect(diff).toContain('--- custom.txt');
+ expect(diff).toContain('+++ custom.txt');
+ });
+ });
+ });
+
+ describe('Security & Validation Functions', () => {
+ describe('validatePath', () => {
+ // Use Windows-compatible paths for testing
+ const allowedDirs = process.platform === 'win32' ? ['C:\\Users\\test', 'C:\\temp'] : ['/home/user', '/tmp'];
+
+ beforeEach(() => {
+ mockFs.realpath.mockImplementation(async (path: any) => path.toString());
+ });
+
+ it('validates allowed paths', async () => {
+ const testPath = process.platform === 'win32' ? 'C:\\Users\\test\\file.txt' : '/home/user/file.txt';
+ const result = await validatePath(testPath);
+ expect(result).toBe(testPath);
+ });
+
+ it('rejects disallowed paths', async () => {
+ const testPath = process.platform === 'win32' ? 'C:\\Windows\\System32\\file.txt' : '/etc/passwd';
+ await expect(validatePath(testPath))
+ .rejects.toThrow('Access denied - path outside allowed directories');
+ });
+
+ it('handles non-existent files by checking parent directory', async () => {
+ const newFilePath = process.platform === 'win32' ? 'C:\\Users\\test\\newfile.txt' : '/home/user/newfile.txt';
+ const parentPath = process.platform === 'win32' ? 'C:\\Users\\test' : '/home/user';
+
+ // Create an error with the ENOENT code that the implementation checks for
+ const enoentError = new Error('ENOENT') as NodeJS.ErrnoException;
+ enoentError.code = 'ENOENT';
+
+ mockFs.realpath
+ .mockRejectedValueOnce(enoentError)
+ .mockResolvedValueOnce(parentPath);
+
+ const result = await validatePath(newFilePath);
+ expect(result).toBe(path.resolve(newFilePath));
+ });
+
+ it('rejects when parent directory does not exist', async () => {
+ const newFilePath = process.platform === 'win32' ? 'C:\\Users\\test\\nonexistent\\newfile.txt' : '/home/user/nonexistent/newfile.txt';
+
+ // Create errors with the ENOENT code
+ const enoentError1 = new Error('ENOENT') as NodeJS.ErrnoException;
+ enoentError1.code = 'ENOENT';
+ const enoentError2 = new Error('ENOENT') as NodeJS.ErrnoException;
+ enoentError2.code = 'ENOENT';
+
+ mockFs.realpath
+ .mockRejectedValueOnce(enoentError1)
+ .mockRejectedValueOnce(enoentError2);
+
+ await expect(validatePath(newFilePath))
+ .rejects.toThrow('Parent directory does not exist');
+ });
+ });
+ });
+
+ describe('File Operations', () => {
+ describe('getFileStats', () => {
+ it('returns file statistics', async () => {
+ const mockStats = {
+ size: 1024,
+ birthtime: new Date('2023-01-01'),
+ mtime: new Date('2023-01-02'),
+ atime: new Date('2023-01-03'),
+ isDirectory: () => false,
+ isFile: () => true,
+ mode: 0o644
+ };
+
+ mockFs.stat.mockResolvedValueOnce(mockStats as any);
+
+ const result = await getFileStats('/test/file.txt');
+
+ expect(result).toEqual({
+ size: 1024,
+ created: new Date('2023-01-01'),
+ modified: new Date('2023-01-02'),
+ accessed: new Date('2023-01-03'),
+ isDirectory: false,
+ isFile: true,
+ permissions: '644'
+ });
+ });
+
+ it('handles directory statistics', async () => {
+ const mockStats = {
+ size: 4096,
+ birthtime: new Date('2023-01-01'),
+ mtime: new Date('2023-01-02'),
+ atime: new Date('2023-01-03'),
+ isDirectory: () => true,
+ isFile: () => false,
+ mode: 0o755
+ };
+
+ mockFs.stat.mockResolvedValueOnce(mockStats as any);
+
+ const result = await getFileStats('/test/dir');
+
+ expect(result.isDirectory).toBe(true);
+ expect(result.isFile).toBe(false);
+ expect(result.permissions).toBe('755');
+ });
+ });
+
+ describe('readFileContent', () => {
+ it('reads file with default encoding', async () => {
+ mockFs.readFile.mockResolvedValueOnce('file content');
+
+ const result = await readFileContent('/test/file.txt');
+
+ expect(result).toBe('file content');
+ expect(mockFs.readFile).toHaveBeenCalledWith('/test/file.txt', 'utf-8');
+ });
+
+ it('reads file with custom encoding', async () => {
+ mockFs.readFile.mockResolvedValueOnce('file content');
+
+ const result = await readFileContent('/test/file.txt', 'ascii');
+
+ expect(result).toBe('file content');
+ expect(mockFs.readFile).toHaveBeenCalledWith('/test/file.txt', 'ascii');
+ });
+ });
+
+ describe('writeFileContent', () => {
+ it('writes file content', async () => {
+ mockFs.writeFile.mockResolvedValueOnce(undefined);
+
+ await writeFileContent('/test/file.txt', 'new content');
+
+ expect(mockFs.writeFile).toHaveBeenCalledWith('/test/file.txt', 'new content', { encoding: "utf-8", flag: 'wx' });
+ });
+ });
+
+ });
+
+ describe('Search & Filtering Functions', () => {
+ describe('searchFilesWithValidation', () => {
+ beforeEach(() => {
+ mockFs.realpath.mockImplementation(async (path: any) => path.toString());
+ });
+
+
+ it('excludes files matching exclude patterns', async () => {
+ const mockEntries = [
+ { name: 'test.txt', isDirectory: () => false },
+ { name: 'test.log', isDirectory: () => false },
+ { name: 'node_modules', isDirectory: () => true }
+ ];
+
+ mockFs.readdir.mockResolvedValueOnce(mockEntries as any);
+
+ const testDir = process.platform === 'win32' ? 'C:\\allowed\\dir' : '/allowed/dir';
+ const allowedDirs = process.platform === 'win32' ? ['C:\\allowed'] : ['/allowed'];
+
+ // Mock realpath to return the same path for validation to pass
+ mockFs.realpath.mockImplementation(async (inputPath: any) => {
+ const pathStr = inputPath.toString();
+ // Return the path as-is for validation
+ return pathStr;
+ });
+
+ const result = await searchFilesWithValidation(
+ testDir,
+ 'test',
+ allowedDirs,
+ { excludePatterns: ['*.log', 'node_modules'] }
+ );
+
+ const expectedResult = process.platform === 'win32' ? 'C:\\allowed\\dir\\test.txt' : '/allowed/dir/test.txt';
+ expect(result).toEqual([expectedResult]);
+ });
+
+ it('handles validation errors during search', async () => {
+ const mockEntries = [
+ { name: 'test.txt', isDirectory: () => false },
+ { name: 'invalid_file.txt', isDirectory: () => false }
+ ];
+
+ mockFs.readdir.mockResolvedValueOnce(mockEntries as any);
+
+ // Mock validatePath to throw error for invalid_file.txt
+ mockFs.realpath.mockImplementation(async (path: any) => {
+ if (path.toString().includes('invalid_file.txt')) {
+ throw new Error('Access denied');
+ }
+ return path.toString();
+ });
+
+ const testDir = process.platform === 'win32' ? 'C:\\allowed\\dir' : '/allowed/dir';
+ const allowedDirs = process.platform === 'win32' ? ['C:\\allowed'] : ['/allowed'];
+
+ const result = await searchFilesWithValidation(
+ testDir,
+ 'test',
+ allowedDirs,
+ {}
+ );
+
+ // Should only return the valid file, skipping the invalid one
+ const expectedResult = process.platform === 'win32' ? 'C:\\allowed\\dir\\test.txt' : '/allowed/dir/test.txt';
+ expect(result).toEqual([expectedResult]);
+ });
+
+ it('handles complex exclude patterns with wildcards', async () => {
+ const mockEntries = [
+ { name: 'test.txt', isDirectory: () => false },
+ { name: 'test.backup', isDirectory: () => false },
+ { name: 'important_test.js', isDirectory: () => false }
+ ];
+
+ mockFs.readdir.mockResolvedValueOnce(mockEntries as any);
+
+ const testDir = process.platform === 'win32' ? 'C:\\allowed\\dir' : '/allowed/dir';
+ const allowedDirs = process.platform === 'win32' ? ['C:\\allowed'] : ['/allowed'];
+
+ const result = await searchFilesWithValidation(
+ testDir,
+ 'test',
+ allowedDirs,
+ { excludePatterns: ['*.backup'] }
+ );
+
+ const expectedResults = process.platform === 'win32' ? [
+ 'C:\\allowed\\dir\\test.txt',
+ 'C:\\allowed\\dir\\important_test.js'
+ ] : [
+ '/allowed/dir/test.txt',
+ '/allowed/dir/important_test.js'
+ ];
+ expect(result).toEqual(expectedResults);
+ });
+ });
+ });
+
+ describe('File Editing Functions', () => {
+ describe('applyFileEdits', () => {
+ beforeEach(() => {
+ mockFs.readFile.mockResolvedValue('line1\nline2\nline3\n');
+ mockFs.writeFile.mockResolvedValue(undefined);
+ });
+
+ it('applies simple text replacement', async () => {
+ const edits = [
+ { oldText: 'line2', newText: 'modified line2' }
+ ];
+
+ mockFs.rename.mockResolvedValueOnce(undefined);
+
+ const result = await applyFileEdits('/test/file.txt', edits, false);
+
+ expect(result).toContain('modified line2');
+ // Should write to temporary file then rename
+ expect(mockFs.writeFile).toHaveBeenCalledWith(
+ expect.stringMatching(/\/test\/file\.txt\.[a-f0-9]+\.tmp$/),
+ 'line1\nmodified line2\nline3\n',
+ 'utf-8'
+ );
+ expect(mockFs.rename).toHaveBeenCalledWith(
+ expect.stringMatching(/\/test\/file\.txt\.[a-f0-9]+\.tmp$/),
+ '/test/file.txt'
+ );
+ });
+
+ it('handles dry run mode', async () => {
+ const edits = [
+ { oldText: 'line2', newText: 'modified line2' }
+ ];
+
+ const result = await applyFileEdits('/test/file.txt', edits, true);
+
+ expect(result).toContain('modified line2');
+ expect(mockFs.writeFile).not.toHaveBeenCalled();
+ });
+
+ it('applies multiple edits sequentially', async () => {
+ const edits = [
+ { oldText: 'line1', newText: 'first line' },
+ { oldText: 'line3', newText: 'third line' }
+ ];
+
+ mockFs.rename.mockResolvedValueOnce(undefined);
+
+ await applyFileEdits('/test/file.txt', edits, false);
+
+ expect(mockFs.writeFile).toHaveBeenCalledWith(
+ expect.stringMatching(/\/test\/file\.txt\.[a-f0-9]+\.tmp$/),
+ 'first line\nline2\nthird line\n',
+ 'utf-8'
+ );
+ expect(mockFs.rename).toHaveBeenCalledWith(
+ expect.stringMatching(/\/test\/file\.txt\.[a-f0-9]+\.tmp$/),
+ '/test/file.txt'
+ );
+ });
+
+ it('handles whitespace-flexible matching', async () => {
+ mockFs.readFile.mockResolvedValue(' line1\n line2\n line3\n');
+
+ const edits = [
+ { oldText: 'line2', newText: 'modified line2' }
+ ];
+
+ mockFs.rename.mockResolvedValueOnce(undefined);
+
+ await applyFileEdits('/test/file.txt', edits, false);
+
+ expect(mockFs.writeFile).toHaveBeenCalledWith(
+ expect.stringMatching(/\/test\/file\.txt\.[a-f0-9]+\.tmp$/),
+ ' line1\n modified line2\n line3\n',
+ 'utf-8'
+ );
+ expect(mockFs.rename).toHaveBeenCalledWith(
+ expect.stringMatching(/\/test\/file\.txt\.[a-f0-9]+\.tmp$/),
+ '/test/file.txt'
+ );
+ });
+
+ it('throws error for non-matching edits', async () => {
+ const edits = [
+ { oldText: 'nonexistent line', newText: 'replacement' }
+ ];
+
+ await expect(applyFileEdits('/test/file.txt', edits, false))
+ .rejects.toThrow('Could not find exact match for edit');
+ });
+
+ it('handles complex multi-line edits with indentation', async () => {
+ mockFs.readFile.mockResolvedValue('function test() {\n console.log("hello");\n return true;\n}');
+
+ const edits = [
+ {
+ oldText: ' console.log("hello");\n return true;',
+ newText: ' console.log("world");\n console.log("test");\n return false;'
+ }
+ ];
+
+ mockFs.rename.mockResolvedValueOnce(undefined);
+
+ await applyFileEdits('/test/file.js', edits, false);
+
+ expect(mockFs.writeFile).toHaveBeenCalledWith(
+ expect.stringMatching(/\/test\/file\.js\.[a-f0-9]+\.tmp$/),
+ 'function test() {\n console.log("world");\n console.log("test");\n return false;\n}',
+ 'utf-8'
+ );
+ expect(mockFs.rename).toHaveBeenCalledWith(
+ expect.stringMatching(/\/test\/file\.js\.[a-f0-9]+\.tmp$/),
+ '/test/file.js'
+ );
+ });
+
+ it('handles edits with different indentation patterns', async () => {
+ mockFs.readFile.mockResolvedValue(' if (condition) {\n doSomething();\n }');
+
+ const edits = [
+ {
+ oldText: 'doSomething();',
+ newText: 'doSomethingElse();\n doAnotherThing();'
+ }
+ ];
+
+ mockFs.rename.mockResolvedValueOnce(undefined);
+
+ await applyFileEdits('/test/file.js', edits, false);
+
+ expect(mockFs.writeFile).toHaveBeenCalledWith(
+ expect.stringMatching(/\/test\/file\.js\.[a-f0-9]+\.tmp$/),
+ ' if (condition) {\n doSomethingElse();\n doAnotherThing();\n }',
+ 'utf-8'
+ );
+ expect(mockFs.rename).toHaveBeenCalledWith(
+ expect.stringMatching(/\/test\/file\.js\.[a-f0-9]+\.tmp$/),
+ '/test/file.js'
+ );
+ });
+
+ it('handles CRLF line endings in file content', async () => {
+ mockFs.readFile.mockResolvedValue('line1\r\nline2\r\nline3\r\n');
+
+ const edits = [
+ { oldText: 'line2', newText: 'modified line2' }
+ ];
+
+ mockFs.rename.mockResolvedValueOnce(undefined);
+
+ await applyFileEdits('/test/file.txt', edits, false);
+
+ expect(mockFs.writeFile).toHaveBeenCalledWith(
+ expect.stringMatching(/\/test\/file\.txt\.[a-f0-9]+\.tmp$/),
+ 'line1\nmodified line2\nline3\n',
+ 'utf-8'
+ );
+ expect(mockFs.rename).toHaveBeenCalledWith(
+ expect.stringMatching(/\/test\/file\.txt\.[a-f0-9]+\.tmp$/),
+ '/test/file.txt'
+ );
+ });
+ });
+
+ describe('tailFile', () => {
+ it('handles empty files', async () => {
+ mockFs.stat.mockResolvedValue({ size: 0 } as any);
+
+ const result = await tailFile('/test/empty.txt', 5);
+
+ expect(result).toBe('');
+ expect(mockFs.open).not.toHaveBeenCalled();
+ });
+
+ it('calls stat to check file size', async () => {
+ mockFs.stat.mockResolvedValue({ size: 100 } as any);
+
+ // Mock file handle with proper typing
+ const mockFileHandle = {
+ read: jest.fn(),
+ close: jest.fn()
+ } as any;
+
+ mockFileHandle.read.mockResolvedValue({ bytesRead: 0 });
+ mockFileHandle.close.mockResolvedValue(undefined);
+
+ mockFs.open.mockResolvedValue(mockFileHandle);
+
+ await tailFile('/test/file.txt', 2);
+
+ expect(mockFs.stat).toHaveBeenCalledWith('/test/file.txt');
+ expect(mockFs.open).toHaveBeenCalledWith('/test/file.txt', 'r');
+ });
+
+ it('handles files with content and returns last lines', async () => {
+ mockFs.stat.mockResolvedValue({ size: 50 } as any);
+
+ const mockFileHandle = {
+ read: jest.fn(),
+ close: jest.fn()
+ } as any;
+
+ // Simulate reading file content in chunks
+ mockFileHandle.read
+ .mockResolvedValueOnce({ bytesRead: 20, buffer: Buffer.from('line3\nline4\nline5\n') })
+ .mockResolvedValueOnce({ bytesRead: 0 });
+ mockFileHandle.close.mockResolvedValue(undefined);
+
+ mockFs.open.mockResolvedValue(mockFileHandle);
+
+ const result = await tailFile('/test/file.txt', 2);
+
+ expect(mockFileHandle.close).toHaveBeenCalled();
+ });
+
+ it('handles read errors gracefully', async () => {
+ mockFs.stat.mockResolvedValue({ size: 100 } as any);
+
+ const mockFileHandle = {
+ read: jest.fn(),
+ close: jest.fn()
+ } as any;
+
+ mockFileHandle.read.mockResolvedValue({ bytesRead: 0 });
+ mockFileHandle.close.mockResolvedValue(undefined);
+
+ mockFs.open.mockResolvedValue(mockFileHandle);
+
+ await tailFile('/test/file.txt', 5);
+
+ expect(mockFileHandle.close).toHaveBeenCalled();
+ });
+ });
+
+ describe('headFile', () => {
+ it('opens file for reading', async () => {
+ // Mock file handle with proper typing
+ const mockFileHandle = {
+ read: jest.fn(),
+ close: jest.fn()
+ } as any;
+
+ mockFileHandle.read.mockResolvedValue({ bytesRead: 0 });
+ mockFileHandle.close.mockResolvedValue(undefined);
+
+ mockFs.open.mockResolvedValue(mockFileHandle);
+
+ await headFile('/test/file.txt', 2);
+
+ expect(mockFs.open).toHaveBeenCalledWith('/test/file.txt', 'r');
+ });
+
+ it('handles files with content and returns first lines', async () => {
+ const mockFileHandle = {
+ read: jest.fn(),
+ close: jest.fn()
+ } as any;
+
+ // Simulate reading file content with newlines
+ mockFileHandle.read
+ .mockResolvedValueOnce({ bytesRead: 20, buffer: Buffer.from('line1\nline2\nline3\n') })
+ .mockResolvedValueOnce({ bytesRead: 0 });
+ mockFileHandle.close.mockResolvedValue(undefined);
+
+ mockFs.open.mockResolvedValue(mockFileHandle);
+
+ const result = await headFile('/test/file.txt', 2);
+
+ expect(mockFileHandle.close).toHaveBeenCalled();
+ });
+
+ it('handles files with leftover content', async () => {
+ const mockFileHandle = {
+ read: jest.fn(),
+ close: jest.fn()
+ } as any;
+
+ // Simulate reading file content without final newline
+ mockFileHandle.read
+ .mockResolvedValueOnce({ bytesRead: 15, buffer: Buffer.from('line1\nline2\nend') })
+ .mockResolvedValueOnce({ bytesRead: 0 });
+ mockFileHandle.close.mockResolvedValue(undefined);
+
+ mockFs.open.mockResolvedValue(mockFileHandle);
+
+ const result = await headFile('/test/file.txt', 5);
+
+ expect(mockFileHandle.close).toHaveBeenCalled();
+ });
+
+ it('handles reaching requested line count', async () => {
+ const mockFileHandle = {
+ read: jest.fn(),
+ close: jest.fn()
+ } as any;
+
+ // Simulate reading exactly the requested number of lines
+ mockFileHandle.read
+ .mockResolvedValueOnce({ bytesRead: 12, buffer: Buffer.from('line1\nline2\n') })
+ .mockResolvedValueOnce({ bytesRead: 0 });
+ mockFileHandle.close.mockResolvedValue(undefined);
+
+ mockFs.open.mockResolvedValue(mockFileHandle);
+
+ const result = await headFile('/test/file.txt', 2);
+
+ expect(mockFileHandle.close).toHaveBeenCalled();
+ });
+ });
+ });
+});
diff --git a/src/filesystem/__tests__/path-utils.test.ts b/src/filesystem/__tests__/path-utils.test.ts
index 00df4e04..8768de20 100644
--- a/src/filesystem/__tests__/path-utils.test.ts
+++ b/src/filesystem/__tests__/path-utils.test.ts
@@ -162,6 +162,12 @@ describe('Path Utilities', () => {
expect(result).not.toContain('~');
});
+ it('expands bare ~ to home directory', () => {
+ const result = expandHome('~');
+ expect(result).not.toContain('~');
+ expect(result.length).toBeGreaterThan(0);
+ });
+
it('leaves other paths unchanged', () => {
expect(expandHome('C:/test')).toBe('C:/test');
});
diff --git a/src/filesystem/__tests__/path-validation.test.ts b/src/filesystem/__tests__/path-validation.test.ts
index 38a72573..06c65398 100644
--- a/src/filesystem/__tests__/path-validation.test.ts
+++ b/src/filesystem/__tests__/path-validation.test.ts
@@ -4,6 +4,49 @@ import * as fs from 'fs/promises';
import * as os from 'os';
import { isPathWithinAllowedDirectories } from '../path-validation.js';
+/**
+ * Check if the current environment supports symlink creation
+ */
+async function checkSymlinkSupport(): Promise {
+ const testDir = await fs.mkdtemp(path.join(os.tmpdir(), 'symlink-test-'));
+ try {
+ const targetFile = path.join(testDir, 'target.txt');
+ const linkFile = path.join(testDir, 'link.txt');
+
+ await fs.writeFile(targetFile, 'test');
+ await fs.symlink(targetFile, linkFile);
+
+ // If we get here, symlinks are supported
+ return true;
+ } catch (error) {
+ // EPERM indicates no symlink permissions
+ if ((error as NodeJS.ErrnoException).code === 'EPERM') {
+ return false;
+ }
+ // Other errors might indicate a real problem
+ throw error;
+ } finally {
+ await fs.rm(testDir, { recursive: true, force: true });
+ }
+}
+
+// Global variable to store symlink support status
+let symlinkSupported: boolean | null = null;
+
+/**
+ * Get cached symlink support status, checking once per test run
+ */
+async function getSymlinkSupport(): Promise {
+ if (symlinkSupported === null) {
+ symlinkSupported = await checkSymlinkSupport();
+ if (!symlinkSupported) {
+ console.log('\n⚠️ Symlink tests will be skipped - symlink creation not supported in this environment');
+ console.log(' On Windows, enable Developer Mode or run as Administrator to enable symlink tests');
+ }
+ }
+ return symlinkSupported;
+}
+
describe('Path Validation', () => {
it('allows exact directory match', () => {
const allowed = ['/home/user/project'];
@@ -587,6 +630,12 @@ describe('Path Validation', () => {
});
it('demonstrates symlink race condition allows writing outside allowed directories', async () => {
+ const symlinkSupported = await getSymlinkSupport();
+ if (!symlinkSupported) {
+ console.log(' ⏭️ Skipping symlink race condition test - symlinks not supported');
+ return;
+ }
+
const allowed = [allowedDir];
await expect(fs.access(testPath)).rejects.toThrow();
@@ -603,6 +652,12 @@ describe('Path Validation', () => {
});
it('shows timing differences between validation approaches', async () => {
+ const symlinkSupported = await getSymlinkSupport();
+ if (!symlinkSupported) {
+ console.log(' ⏭️ Skipping timing validation test - symlinks not supported');
+ return;
+ }
+
const allowed = [allowedDir];
const validation1 = isPathWithinAllowedDirectories(testPath, allowed);
@@ -618,6 +673,12 @@ describe('Path Validation', () => {
});
it('validates directory creation timing', async () => {
+ const symlinkSupported = await getSymlinkSupport();
+ if (!symlinkSupported) {
+ console.log(' ⏭️ Skipping directory creation timing test - symlinks not supported');
+ return;
+ }
+
const allowed = [allowedDir];
const testDir = path.join(allowedDir, 'newdir');
@@ -632,6 +693,12 @@ describe('Path Validation', () => {
});
it('demonstrates exclusive file creation behavior', async () => {
+ const symlinkSupported = await getSymlinkSupport();
+ if (!symlinkSupported) {
+ console.log(' ⏭️ Skipping exclusive file creation test - symlinks not supported');
+ return;
+ }
+
const allowed = [allowedDir];
await fs.symlink(targetFile, testPath);
@@ -644,6 +711,12 @@ describe('Path Validation', () => {
});
it('should use resolved parent paths for non-existent files', async () => {
+ const symlinkSupported = await getSymlinkSupport();
+ if (!symlinkSupported) {
+ console.log(' ⏭️ Skipping resolved parent paths test - symlinks not supported');
+ return;
+ }
+
const allowed = [allowedDir];
const symlinkDir = path.join(allowedDir, 'link');
@@ -662,6 +735,12 @@ describe('Path Validation', () => {
});
it('demonstrates parent directory symlink traversal', async () => {
+ const symlinkSupported = await getSymlinkSupport();
+ if (!symlinkSupported) {
+ console.log(' ⏭️ Skipping parent directory symlink traversal test - symlinks not supported');
+ return;
+ }
+
const allowed = [allowedDir];
const deepPath = path.join(allowedDir, 'sub1', 'sub2', 'file.txt');
@@ -682,6 +761,12 @@ describe('Path Validation', () => {
});
it('should prevent race condition between validatePath and file operation', async () => {
+ const symlinkSupported = await getSymlinkSupport();
+ if (!symlinkSupported) {
+ console.log(' ⏭️ Skipping race condition prevention test - symlinks not supported');
+ return;
+ }
+
const allowed = [allowedDir];
const racePath = path.join(allowedDir, 'race-file.txt');
const targetFile = path.join(forbiddenDir, 'target.txt');
@@ -730,6 +815,12 @@ describe('Path Validation', () => {
});
it('should handle symlinks that point within allowed directories', async () => {
+ const symlinkSupported = await getSymlinkSupport();
+ if (!symlinkSupported) {
+ console.log(' ⏭️ Skipping symlinks within allowed directories test - symlinks not supported');
+ return;
+ }
+
const allowed = [allowedDir];
const targetFile = path.join(allowedDir, 'target.txt');
const symlinkPath = path.join(allowedDir, 'symlink.txt');
@@ -756,6 +847,12 @@ describe('Path Validation', () => {
});
it('should prevent overwriting files through symlinks pointing outside allowed directories', async () => {
+ const symlinkSupported = await getSymlinkSupport();
+ if (!symlinkSupported) {
+ console.log(' ⏭️ Skipping symlink overwrite prevention test - symlinks not supported');
+ return;
+ }
+
const allowed = [allowedDir];
const legitFile = path.join(allowedDir, 'existing.txt');
const targetFile = path.join(forbiddenDir, 'target.txt');
@@ -786,6 +883,12 @@ describe('Path Validation', () => {
});
it('demonstrates race condition in read operations', async () => {
+ const symlinkSupported = await getSymlinkSupport();
+ if (!symlinkSupported) {
+ console.log(' ⏭️ Skipping race condition in read operations test - symlinks not supported');
+ return;
+ }
+
const allowed = [allowedDir];
const legitFile = path.join(allowedDir, 'readable.txt');
const secretFile = path.join(forbiddenDir, 'secret.txt');
@@ -812,6 +915,12 @@ describe('Path Validation', () => {
});
it('verifies rename does not follow symlinks', async () => {
+ const symlinkSupported = await getSymlinkSupport();
+ if (!symlinkSupported) {
+ console.log(' ⏭️ Skipping rename symlink test - symlinks not supported');
+ return;
+ }
+
const allowed = [allowedDir];
const tempFile = path.join(allowedDir, 'temp.txt');
const targetSymlink = path.join(allowedDir, 'target-symlink.txt');
diff --git a/src/filesystem/index.ts b/src/filesystem/index.ts
index 09b0bd7a..310cfa6b 100644
--- a/src/filesystem/index.ts
+++ b/src/filesystem/index.ts
@@ -12,14 +12,23 @@ import {
import fs from "fs/promises";
import { createReadStream } from "fs";
import path from "path";
-import os from 'os';
-import { randomBytes } from 'crypto';
import { z } from "zod";
import { zodToJsonSchema } from "zod-to-json-schema";
-import { diffLines, createTwoFilesPatch } from 'diff';
-import { minimatch } from 'minimatch';
-import { isPathWithinAllowedDirectories } from './path-validation.js';
+import { normalizePath, expandHome } from './path-utils.js';
import { getValidRootDirectories } from './roots-utils.js';
+import {
+ // Function imports
+ formatSize,
+ validatePath,
+ getFileStats,
+ readFileContent,
+ writeFileContent,
+ searchFilesWithValidation,
+ applyFileEdits,
+ tailFile,
+ headFile,
+ setAllowedDirectories,
+} from './lib.js';
// Command line argument parsing
const args = process.argv.slice(2);
@@ -31,25 +40,14 @@ if (args.length === 0) {
console.error("At least one directory must be provided by EITHER method for the server to operate.");
}
-// Normalize all paths consistently
-function normalizePath(p: string): string {
- return path.normalize(p);
-}
-
-function expandHome(filepath: string): string {
- if (filepath.startsWith('~/') || filepath === '~') {
- return path.join(os.homedir(), filepath.slice(1));
- }
- return filepath;
-}
-
// Store allowed directories in normalized and resolved form
let allowedDirectories = await Promise.all(
args.map(async (dir) => {
const expanded = expandHome(dir);
const absolute = path.resolve(expanded);
try {
- // Resolve symlinks in allowed directories during startup
+ // Security: Resolve symlinks in allowed directories during startup
+ // This ensures we know the real paths and can validate against them later
const resolved = await fs.realpath(absolute);
return normalizePath(resolved);
} catch (error) {
@@ -61,9 +59,9 @@ let allowedDirectories = await Promise.all(
);
// Validate that all directories exist and are accessible
-await Promise.all(args.map(async (dir) => {
+await Promise.all(allowedDirectories.map(async (dir) => {
try {
- const stats = await fs.stat(expandHome(dir));
+ const stats = await fs.stat(dir);
if (!stats.isDirectory()) {
console.error(`Error: ${dir} is not a directory`);
process.exit(1);
@@ -74,47 +72,8 @@ await Promise.all(args.map(async (dir) => {
}
}));
-// Security utilities
-async function validatePath(requestedPath: string): Promise {
- const expandedPath = expandHome(requestedPath);
- const absolute = path.isAbsolute(expandedPath)
- ? path.resolve(expandedPath)
- : path.resolve(process.cwd(), expandedPath);
-
- const normalizedRequested = normalizePath(absolute);
-
- // Check if path is within allowed directories
- const isAllowed = isPathWithinAllowedDirectories(normalizedRequested, allowedDirectories);
- if (!isAllowed) {
- throw new Error(`Access denied - path outside allowed directories: ${absolute} not in ${allowedDirectories.join(', ')}`);
- }
-
- // Handle symlinks by checking their real path
- try {
- const realPath = await fs.realpath(absolute);
- const normalizedReal = normalizePath(realPath);
- if (!isPathWithinAllowedDirectories(normalizedReal, allowedDirectories)) {
- throw new Error(`Access denied - symlink target outside allowed directories: ${realPath} not in ${allowedDirectories.join(', ')}`);
- }
- return realPath;
- } catch (error) {
- // For new files that don't exist yet, verify parent directory
- if ((error as NodeJS.ErrnoException).code === 'ENOENT') {
- const parentDir = path.dirname(absolute);
- try {
- const realParentPath = await fs.realpath(parentDir);
- const normalizedParent = normalizePath(realParentPath);
- if (!isPathWithinAllowedDirectories(normalizedParent, allowedDirectories)) {
- throw new Error(`Access denied - parent directory outside allowed directories: ${realParentPath} not in ${allowedDirectories.join(', ')}`);
- }
- return absolute;
- } catch {
- throw new Error(`Parent directory does not exist: ${parentDir}`);
- }
- }
- throw error;
- }
-}
+// Initialize the global allowedDirectories in lib.ts
+setAllowedDirectories(allowedDirectories);
// Schema definitions
const ReadTextFileArgsSchema = z.object({
@@ -182,16 +141,6 @@ const GetFileInfoArgsSchema = z.object({
const ToolInputSchema = ToolSchema.shape.inputSchema;
type ToolInput = z.infer;
-interface FileInfo {
- size: number;
- created: Date;
- modified: Date;
- accessed: Date;
- isDirectory: boolean;
- isFile: boolean;
- permissions: string;
-}
-
// Server setup
const server = new Server(
{
@@ -205,277 +154,6 @@ const server = new Server(
},
);
-// Tool implementations
-async function getFileStats(filePath: string): Promise {
- const stats = await fs.stat(filePath);
- return {
- size: stats.size,
- created: stats.birthtime,
- modified: stats.mtime,
- accessed: stats.atime,
- isDirectory: stats.isDirectory(),
- isFile: stats.isFile(),
- permissions: stats.mode.toString(8).slice(-3),
- };
-}
-
-async function searchFiles(
- rootPath: string,
- pattern: string,
- excludePatterns: string[] = []
-): Promise {
- const results: string[] = [];
-
- async function search(currentPath: string) {
- const entries = await fs.readdir(currentPath, { withFileTypes: true });
-
- for (const entry of entries) {
- const fullPath = path.join(currentPath, entry.name);
-
- try {
- // Validate each path before processing
- await validatePath(fullPath);
-
- // Check if path matches any exclude pattern
- const relativePath = path.relative(rootPath, fullPath);
- const shouldExclude = excludePatterns.some(pattern => {
- const globPattern = pattern.includes('*') ? pattern : `**/${pattern}/**`;
- return minimatch(relativePath, globPattern, { dot: true });
- });
-
- if (shouldExclude) {
- continue;
- }
-
- if (entry.name.toLowerCase().includes(pattern.toLowerCase())) {
- results.push(fullPath);
- }
-
- if (entry.isDirectory()) {
- await search(fullPath);
- }
- } catch (error) {
- // Skip invalid paths during search
- continue;
- }
- }
- }
-
- await search(rootPath);
- return results;
-}
-
-// file editing and diffing utilities
-function normalizeLineEndings(text: string): string {
- return text.replace(/\r\n/g, '\n');
-}
-
-function createUnifiedDiff(originalContent: string, newContent: string, filepath: string = 'file'): string {
- // Ensure consistent line endings for diff
- const normalizedOriginal = normalizeLineEndings(originalContent);
- const normalizedNew = normalizeLineEndings(newContent);
-
- return createTwoFilesPatch(
- filepath,
- filepath,
- normalizedOriginal,
- normalizedNew,
- 'original',
- 'modified'
- );
-}
-
-async function applyFileEdits(
- filePath: string,
- edits: Array<{oldText: string, newText: string}>,
- dryRun = false
-): Promise {
- // Read file content and normalize line endings
- const content = normalizeLineEndings(await fs.readFile(filePath, 'utf-8'));
-
- // Apply edits sequentially
- let modifiedContent = content;
- for (const edit of edits) {
- const normalizedOld = normalizeLineEndings(edit.oldText);
- const normalizedNew = normalizeLineEndings(edit.newText);
-
- // If exact match exists, use it
- if (modifiedContent.includes(normalizedOld)) {
- modifiedContent = modifiedContent.replace(normalizedOld, normalizedNew);
- continue;
- }
-
- // Otherwise, try line-by-line matching with flexibility for whitespace
- const oldLines = normalizedOld.split('\n');
- const contentLines = modifiedContent.split('\n');
- let matchFound = false;
-
- for (let i = 0; i <= contentLines.length - oldLines.length; i++) {
- const potentialMatch = contentLines.slice(i, i + oldLines.length);
-
- // Compare lines with normalized whitespace
- const isMatch = oldLines.every((oldLine, j) => {
- const contentLine = potentialMatch[j];
- return oldLine.trim() === contentLine.trim();
- });
-
- if (isMatch) {
- // Preserve original indentation of first line
- const originalIndent = contentLines[i].match(/^\s*/)?.[0] || '';
- const newLines = normalizedNew.split('\n').map((line, j) => {
- if (j === 0) return originalIndent + line.trimStart();
- // For subsequent lines, try to preserve relative indentation
- const oldIndent = oldLines[j]?.match(/^\s*/)?.[0] || '';
- const newIndent = line.match(/^\s*/)?.[0] || '';
- if (oldIndent && newIndent) {
- const relativeIndent = newIndent.length - oldIndent.length;
- return originalIndent + ' '.repeat(Math.max(0, relativeIndent)) + line.trimStart();
- }
- return line;
- });
-
- contentLines.splice(i, oldLines.length, ...newLines);
- modifiedContent = contentLines.join('\n');
- matchFound = true;
- break;
- }
- }
-
- if (!matchFound) {
- throw new Error(`Could not find exact match for edit:\n${edit.oldText}`);
- }
- }
-
- // Create unified diff
- const diff = createUnifiedDiff(content, modifiedContent, filePath);
-
- // Format diff with appropriate number of backticks
- let numBackticks = 3;
- while (diff.includes('`'.repeat(numBackticks))) {
- numBackticks++;
- }
- const formattedDiff = `${'`'.repeat(numBackticks)}diff\n${diff}${'`'.repeat(numBackticks)}\n\n`;
-
- if (!dryRun) {
- // Security: Use atomic rename to prevent race conditions where symlinks
- // could be created between validation and write. Rename operations
- // replace the target file atomically and don't follow symlinks.
- const tempPath = `${filePath}.${randomBytes(16).toString('hex')}.tmp`;
- try {
- await fs.writeFile(tempPath, modifiedContent, 'utf-8');
- await fs.rename(tempPath, filePath);
- } catch (error) {
- try {
- await fs.unlink(tempPath);
- } catch {}
- throw error;
- }
- }
-
- return formattedDiff;
-}
-
-// Helper functions
-function formatSize(bytes: number): string {
- const units = ['B', 'KB', 'MB', 'GB', 'TB'];
- if (bytes === 0) return '0 B';
-
- const i = Math.floor(Math.log(bytes) / Math.log(1024));
- if (i === 0) return `${bytes} ${units[i]}`;
-
- return `${(bytes / Math.pow(1024, i)).toFixed(2)} ${units[i]}`;
-}
-
-// Memory-efficient implementation to get the last N lines of a file
-async function tailFile(filePath: string, numLines: number): Promise {
- const CHUNK_SIZE = 1024; // Read 1KB at a time
- const stats = await fs.stat(filePath);
- const fileSize = stats.size;
-
- if (fileSize === 0) return '';
-
- // Open file for reading
- const fileHandle = await fs.open(filePath, 'r');
- try {
- const lines: string[] = [];
- let position = fileSize;
- let chunk = Buffer.alloc(CHUNK_SIZE);
- let linesFound = 0;
- let remainingText = '';
-
- // Read chunks from the end of the file until we have enough lines
- while (position > 0 && linesFound < numLines) {
- const size = Math.min(CHUNK_SIZE, position);
- position -= size;
-
- const { bytesRead } = await fileHandle.read(chunk, 0, size, position);
- if (!bytesRead) break;
-
- // Get the chunk as a string and prepend any remaining text from previous iteration
- const readData = chunk.slice(0, bytesRead).toString('utf-8');
- const chunkText = readData + remainingText;
-
- // Split by newlines and count
- const chunkLines = normalizeLineEndings(chunkText).split('\n');
-
- // If this isn't the end of the file, the first line is likely incomplete
- // Save it to prepend to the next chunk
- if (position > 0) {
- remainingText = chunkLines[0];
- chunkLines.shift(); // Remove the first (incomplete) line
- }
-
- // Add lines to our result (up to the number we need)
- for (let i = chunkLines.length - 1; i >= 0 && linesFound < numLines; i--) {
- lines.unshift(chunkLines[i]);
- linesFound++;
- }
- }
-
- return lines.join('\n');
- } finally {
- await fileHandle.close();
- }
-}
-
-// New function to get the first N lines of a file
-async function headFile(filePath: string, numLines: number): Promise {
- const fileHandle = await fs.open(filePath, 'r');
- try {
- const lines: string[] = [];
- let buffer = '';
- let bytesRead = 0;
- const chunk = Buffer.alloc(1024); // 1KB buffer
-
- // Read chunks and count lines until we have enough or reach EOF
- while (lines.length < numLines) {
- const result = await fileHandle.read(chunk, 0, chunk.length, bytesRead);
- if (result.bytesRead === 0) break; // End of file
- bytesRead += result.bytesRead;
- buffer += chunk.slice(0, result.bytesRead).toString('utf-8');
-
- const newLineIndex = buffer.lastIndexOf('\n');
- if (newLineIndex !== -1) {
- const completeLines = buffer.slice(0, newLineIndex).split('\n');
- buffer = buffer.slice(newLineIndex + 1);
- for (const line of completeLines) {
- lines.push(line);
- if (lines.length >= numLines) break;
- }
- }
- }
-
- // If there is leftover content and we still need lines, add it
- if (buffer.length > 0 && lines.length < numLines) {
- lines.push(buffer);
- }
-
- return lines.join('\n');
- } finally {
- await fileHandle.close();
- }
-}
-
// Reads a file as a stream of buffers, concatenates them, and then encodes
// the result to a Base64 string. This is a memory-efficient way to handle
// binary data from a stream before the final encoding.
@@ -662,8 +340,7 @@ server.setRequestHandler(CallToolRequestSchema, async (request) => {
content: [{ type: "text", text: headContent }],
};
}
-
- const content = await fs.readFile(validPath, "utf-8");
+ const content = await readFileContent(validPath);
return {
content: [{ type: "text", text: content }],
};
@@ -710,7 +387,7 @@ server.setRequestHandler(CallToolRequestSchema, async (request) => {
parsed.data.paths.map(async (filePath: string) => {
try {
const validPath = await validatePath(filePath);
- const content = await fs.readFile(validPath, "utf-8");
+ const content = await readFileContent(validPath);
return `${filePath}:\n${content}\n`;
} catch (error) {
const errorMessage = error instanceof Error ? error.message : String(error);
@@ -729,31 +406,7 @@ server.setRequestHandler(CallToolRequestSchema, async (request) => {
throw new Error(`Invalid arguments for write_file: ${parsed.error}`);
}
const validPath = await validatePath(parsed.data.path);
-
- try {
- // Security: 'wx' flag ensures exclusive creation - fails if file/symlink exists,
- // preventing writes through pre-existing symlinks
- await fs.writeFile(validPath, parsed.data.content, { encoding: "utf-8", flag: 'wx' });
- } catch (error) {
- if ((error as NodeJS.ErrnoException).code === 'EEXIST') {
- // Security: Use atomic rename to prevent race conditions where symlinks
- // could be created between validation and write. Rename operations
- // replace the target file atomically and don't follow symlinks.
- const tempPath = `${validPath}.${randomBytes(16).toString('hex')}.tmp`;
- try {
- await fs.writeFile(tempPath, parsed.data.content, 'utf-8');
- await fs.rename(tempPath, validPath);
- } catch (renameError) {
- try {
- await fs.unlink(tempPath);
- } catch {}
- throw renameError;
- }
- } else {
- throw error;
- }
- }
-
+ await writeFileContent(validPath, parsed.data.content);
return {
content: [{ type: "text", text: `Successfully wrote to ${parsed.data.path}` }],
};
@@ -870,43 +523,43 @@ server.setRequestHandler(CallToolRequestSchema, async (request) => {
throw new Error(`Invalid arguments for directory_tree: ${parsed.error}`);
}
- interface TreeEntry {
- name: string;
- type: 'file' | 'directory';
- children?: TreeEntry[];
- }
+ interface TreeEntry {
+ name: string;
+ type: 'file' | 'directory';
+ children?: TreeEntry[];
+ }
- async function buildTree(currentPath: string): Promise {
- const validPath = await validatePath(currentPath);
- const entries = await fs.readdir(validPath, {withFileTypes: true});
- const result: TreeEntry[] = [];
+ async function buildTree(currentPath: string): Promise {
+ const validPath = await validatePath(currentPath);
+ const entries = await fs.readdir(validPath, {withFileTypes: true});
+ const result: TreeEntry[] = [];
- for (const entry of entries) {
- const entryData: TreeEntry = {
- name: entry.name,
- type: entry.isDirectory() ? 'directory' : 'file'
- };
+ for (const entry of entries) {
+ const entryData: TreeEntry = {
+ name: entry.name,
+ type: entry.isDirectory() ? 'directory' : 'file'
+ };
- if (entry.isDirectory()) {
- const subPath = path.join(currentPath, entry.name);
- entryData.children = await buildTree(subPath);
- }
-
- result.push(entryData);
+ if (entry.isDirectory()) {
+ const subPath = path.join(currentPath, entry.name);
+ entryData.children = await buildTree(subPath);
}
- return result;
+ result.push(entryData);
}
- const treeData = await buildTree(parsed.data.path);
- return {
- content: [{
- type: "text",
- text: JSON.stringify(treeData, null, 2)
- }],
- };
+ return result;
}
+ const treeData = await buildTree(parsed.data.path);
+ return {
+ content: [{
+ type: "text",
+ text: JSON.stringify(treeData, null, 2)
+ }],
+ };
+ }
+
case "move_file": {
const parsed = MoveFileArgsSchema.safeParse(args);
if (!parsed.success) {
@@ -926,7 +579,7 @@ server.setRequestHandler(CallToolRequestSchema, async (request) => {
throw new Error(`Invalid arguments for search_files: ${parsed.error}`);
}
const validPath = await validatePath(parsed.data.path);
- const results = await searchFiles(validPath, parsed.data.pattern, parsed.data.excludePatterns);
+ const results = await searchFilesWithValidation(validPath, parsed.data.pattern, allowedDirectories, { excludePatterns: parsed.data.excludePatterns });
return {
content: [{ type: "text", text: results.length > 0 ? results.join("\n") : "No matches found" }],
};
@@ -972,6 +625,7 @@ async function updateAllowedDirectoriesFromRoots(requestedRoots: Root[]) {
const validatedRootDirs = await getValidRootDirectories(requestedRoots);
if (validatedRootDirs.length > 0) {
allowedDirectories = [...validatedRootDirs];
+ setAllowedDirectories(allowedDirectories); // Update the global state in lib.ts
console.error(`Updated allowed directories from MCP roots: ${validatedRootDirs.length} valid directories`);
} else {
console.error("No valid root directories provided by client");
diff --git a/src/filesystem/lib.ts b/src/filesystem/lib.ts
new file mode 100644
index 00000000..40cb316e
--- /dev/null
+++ b/src/filesystem/lib.ts
@@ -0,0 +1,392 @@
+import fs from "fs/promises";
+import path from "path";
+import os from 'os';
+import { randomBytes } from 'crypto';
+import { diffLines, createTwoFilesPatch } from 'diff';
+import { minimatch } from 'minimatch';
+import { normalizePath, expandHome } from './path-utils.js';
+import { isPathWithinAllowedDirectories } from './path-validation.js';
+
+// Global allowed directories - set by the main module
+let allowedDirectories: string[] = [];
+
+// Function to set allowed directories from the main module
+export function setAllowedDirectories(directories: string[]): void {
+ allowedDirectories = [...directories];
+}
+
+// Function to get current allowed directories
+export function getAllowedDirectories(): string[] {
+ return [...allowedDirectories];
+}
+
+// Type definitions
+interface FileInfo {
+ size: number;
+ created: Date;
+ modified: Date;
+ accessed: Date;
+ isDirectory: boolean;
+ isFile: boolean;
+ permissions: string;
+}
+
+export interface SearchOptions {
+ excludePatterns?: string[];
+}
+
+export interface SearchResult {
+ path: string;
+ isDirectory: boolean;
+}
+
+// Pure Utility Functions
+export function formatSize(bytes: number): string {
+ const units = ['B', 'KB', 'MB', 'GB', 'TB'];
+ if (bytes === 0) return '0 B';
+
+ const i = Math.floor(Math.log(bytes) / Math.log(1024));
+
+ if (i < 0 || i === 0) return `${bytes} ${units[0]}`;
+
+ const unitIndex = Math.min(i, units.length - 1);
+ return `${(bytes / Math.pow(1024, unitIndex)).toFixed(2)} ${units[unitIndex]}`;
+}
+
+export function normalizeLineEndings(text: string): string {
+ return text.replace(/\r\n/g, '\n');
+}
+
+export function createUnifiedDiff(originalContent: string, newContent: string, filepath: string = 'file'): string {
+ // Ensure consistent line endings for diff
+ const normalizedOriginal = normalizeLineEndings(originalContent);
+ const normalizedNew = normalizeLineEndings(newContent);
+
+ return createTwoFilesPatch(
+ filepath,
+ filepath,
+ normalizedOriginal,
+ normalizedNew,
+ 'original',
+ 'modified'
+ );
+}
+
+// Security & Validation Functions
+export async function validatePath(requestedPath: string): Promise {
+ const expandedPath = expandHome(requestedPath);
+ const absolute = path.isAbsolute(expandedPath)
+ ? path.resolve(expandedPath)
+ : path.resolve(process.cwd(), expandedPath);
+
+ const normalizedRequested = normalizePath(absolute);
+
+ // Security: Check if path is within allowed directories before any file operations
+ const isAllowed = isPathWithinAllowedDirectories(normalizedRequested, allowedDirectories);
+ if (!isAllowed) {
+ throw new Error(`Access denied - path outside allowed directories: ${absolute} not in ${allowedDirectories.join(', ')}`);
+ }
+
+ // Security: Handle symlinks by checking their real path to prevent symlink attacks
+ // This prevents attackers from creating symlinks that point outside allowed directories
+ try {
+ const realPath = await fs.realpath(absolute);
+ const normalizedReal = normalizePath(realPath);
+ if (!isPathWithinAllowedDirectories(normalizedReal, allowedDirectories)) {
+ throw new Error(`Access denied - symlink target outside allowed directories: ${realPath} not in ${allowedDirectories.join(', ')}`);
+ }
+ return realPath;
+ } catch (error) {
+ // Security: For new files that don't exist yet, verify parent directory
+ // This ensures we can't create files in unauthorized locations
+ if ((error as NodeJS.ErrnoException).code === 'ENOENT') {
+ const parentDir = path.dirname(absolute);
+ try {
+ const realParentPath = await fs.realpath(parentDir);
+ const normalizedParent = normalizePath(realParentPath);
+ if (!isPathWithinAllowedDirectories(normalizedParent, allowedDirectories)) {
+ throw new Error(`Access denied - parent directory outside allowed directories: ${realParentPath} not in ${allowedDirectories.join(', ')}`);
+ }
+ return absolute;
+ } catch {
+ throw new Error(`Parent directory does not exist: ${parentDir}`);
+ }
+ }
+ throw error;
+ }
+}
+
+
+// File Operations
+export async function getFileStats(filePath: string): Promise {
+ const stats = await fs.stat(filePath);
+ return {
+ size: stats.size,
+ created: stats.birthtime,
+ modified: stats.mtime,
+ accessed: stats.atime,
+ isDirectory: stats.isDirectory(),
+ isFile: stats.isFile(),
+ permissions: stats.mode.toString(8).slice(-3),
+ };
+}
+
+export async function readFileContent(filePath: string, encoding: string = 'utf-8'): Promise {
+ return await fs.readFile(filePath, encoding as BufferEncoding);
+}
+
+export async function writeFileContent(filePath: string, content: string): Promise {
+ try {
+ // Security: 'wx' flag ensures exclusive creation - fails if file/symlink exists,
+ // preventing writes through pre-existing symlinks
+ await fs.writeFile(filePath, content, { encoding: "utf-8", flag: 'wx' });
+ } catch (error) {
+ if ((error as NodeJS.ErrnoException).code === 'EEXIST') {
+ // Security: Use atomic rename to prevent race conditions where symlinks
+ // could be created between validation and write. Rename operations
+ // replace the target file atomically and don't follow symlinks.
+ const tempPath = `${filePath}.${randomBytes(16).toString('hex')}.tmp`;
+ try {
+ await fs.writeFile(tempPath, content, 'utf-8');
+ await fs.rename(tempPath, filePath);
+ } catch (renameError) {
+ try {
+ await fs.unlink(tempPath);
+ } catch {}
+ throw renameError;
+ }
+ } else {
+ throw error;
+ }
+ }
+}
+
+
+// File Editing Functions
+interface FileEdit {
+ oldText: string;
+ newText: string;
+}
+
+export async function applyFileEdits(
+ filePath: string,
+ edits: FileEdit[],
+ dryRun: boolean = false
+): Promise {
+ // Read file content and normalize line endings
+ const content = normalizeLineEndings(await fs.readFile(filePath, 'utf-8'));
+
+ // Apply edits sequentially
+ let modifiedContent = content;
+ for (const edit of edits) {
+ const normalizedOld = normalizeLineEndings(edit.oldText);
+ const normalizedNew = normalizeLineEndings(edit.newText);
+
+ // If exact match exists, use it
+ if (modifiedContent.includes(normalizedOld)) {
+ modifiedContent = modifiedContent.replace(normalizedOld, normalizedNew);
+ continue;
+ }
+
+ // Otherwise, try line-by-line matching with flexibility for whitespace
+ const oldLines = normalizedOld.split('\n');
+ const contentLines = modifiedContent.split('\n');
+ let matchFound = false;
+
+ for (let i = 0; i <= contentLines.length - oldLines.length; i++) {
+ const potentialMatch = contentLines.slice(i, i + oldLines.length);
+
+ // Compare lines with normalized whitespace
+ const isMatch = oldLines.every((oldLine, j) => {
+ const contentLine = potentialMatch[j];
+ return oldLine.trim() === contentLine.trim();
+ });
+
+ if (isMatch) {
+ // Preserve original indentation of first line
+ const originalIndent = contentLines[i].match(/^\s*/)?.[0] || '';
+ const newLines = normalizedNew.split('\n').map((line, j) => {
+ if (j === 0) return originalIndent + line.trimStart();
+ // For subsequent lines, try to preserve relative indentation
+ const oldIndent = oldLines[j]?.match(/^\s*/)?.[0] || '';
+ const newIndent = line.match(/^\s*/)?.[0] || '';
+ if (oldIndent && newIndent) {
+ const relativeIndent = newIndent.length - oldIndent.length;
+ return originalIndent + ' '.repeat(Math.max(0, relativeIndent)) + line.trimStart();
+ }
+ return line;
+ });
+
+ contentLines.splice(i, oldLines.length, ...newLines);
+ modifiedContent = contentLines.join('\n');
+ matchFound = true;
+ break;
+ }
+ }
+
+ if (!matchFound) {
+ throw new Error(`Could not find exact match for edit:\n${edit.oldText}`);
+ }
+ }
+
+ // Create unified diff
+ const diff = createUnifiedDiff(content, modifiedContent, filePath);
+
+ // Format diff with appropriate number of backticks
+ let numBackticks = 3;
+ while (diff.includes('`'.repeat(numBackticks))) {
+ numBackticks++;
+ }
+ const formattedDiff = `${'`'.repeat(numBackticks)}diff\n${diff}${'`'.repeat(numBackticks)}\n\n`;
+
+ if (!dryRun) {
+ // Security: Use atomic rename to prevent race conditions where symlinks
+ // could be created between validation and write. Rename operations
+ // replace the target file atomically and don't follow symlinks.
+ const tempPath = `${filePath}.${randomBytes(16).toString('hex')}.tmp`;
+ try {
+ await fs.writeFile(tempPath, modifiedContent, 'utf-8');
+ await fs.rename(tempPath, filePath);
+ } catch (error) {
+ try {
+ await fs.unlink(tempPath);
+ } catch {}
+ throw error;
+ }
+ }
+
+ return formattedDiff;
+}
+
+// Memory-efficient implementation to get the last N lines of a file
+export async function tailFile(filePath: string, numLines: number): Promise {
+ const CHUNK_SIZE = 1024; // Read 1KB at a time
+ const stats = await fs.stat(filePath);
+ const fileSize = stats.size;
+
+ if (fileSize === 0) return '';
+
+ // Open file for reading
+ const fileHandle = await fs.open(filePath, 'r');
+ try {
+ const lines: string[] = [];
+ let position = fileSize;
+ let chunk = Buffer.alloc(CHUNK_SIZE);
+ let linesFound = 0;
+ let remainingText = '';
+
+ // Read chunks from the end of the file until we have enough lines
+ while (position > 0 && linesFound < numLines) {
+ const size = Math.min(CHUNK_SIZE, position);
+ position -= size;
+
+ const { bytesRead } = await fileHandle.read(chunk, 0, size, position);
+ if (!bytesRead) break;
+
+ // Get the chunk as a string and prepend any remaining text from previous iteration
+ const readData = chunk.slice(0, bytesRead).toString('utf-8');
+ const chunkText = readData + remainingText;
+
+ // Split by newlines and count
+ const chunkLines = normalizeLineEndings(chunkText).split('\n');
+
+ // If this isn't the end of the file, the first line is likely incomplete
+ // Save it to prepend to the next chunk
+ if (position > 0) {
+ remainingText = chunkLines[0];
+ chunkLines.shift(); // Remove the first (incomplete) line
+ }
+
+ // Add lines to our result (up to the number we need)
+ for (let i = chunkLines.length - 1; i >= 0 && linesFound < numLines; i--) {
+ lines.unshift(chunkLines[i]);
+ linesFound++;
+ }
+ }
+
+ return lines.join('\n');
+ } finally {
+ await fileHandle.close();
+ }
+}
+
+// New function to get the first N lines of a file
+export async function headFile(filePath: string, numLines: number): Promise {
+ const fileHandle = await fs.open(filePath, 'r');
+ try {
+ const lines: string[] = [];
+ let buffer = '';
+ let bytesRead = 0;
+ const chunk = Buffer.alloc(1024); // 1KB buffer
+
+ // Read chunks and count lines until we have enough or reach EOF
+ while (lines.length < numLines) {
+ const result = await fileHandle.read(chunk, 0, chunk.length, bytesRead);
+ if (result.bytesRead === 0) break; // End of file
+ bytesRead += result.bytesRead;
+ buffer += chunk.slice(0, result.bytesRead).toString('utf-8');
+
+ const newLineIndex = buffer.lastIndexOf('\n');
+ if (newLineIndex !== -1) {
+ const completeLines = buffer.slice(0, newLineIndex).split('\n');
+ buffer = buffer.slice(newLineIndex + 1);
+ for (const line of completeLines) {
+ lines.push(line);
+ if (lines.length >= numLines) break;
+ }
+ }
+ }
+
+ // If there is leftover content and we still need lines, add it
+ if (buffer.length > 0 && lines.length < numLines) {
+ lines.push(buffer);
+ }
+
+ return lines.join('\n');
+ } finally {
+ await fileHandle.close();
+ }
+}
+
+export async function searchFilesWithValidation(
+ rootPath: string,
+ pattern: string,
+ allowedDirectories: string[],
+ options: SearchOptions = {}
+): Promise {
+ const { excludePatterns = [] } = options;
+ const results: string[] = [];
+
+ async function search(currentPath: string) {
+ const entries = await fs.readdir(currentPath, { withFileTypes: true });
+
+ for (const entry of entries) {
+ const fullPath = path.join(currentPath, entry.name);
+
+ try {
+ await validatePath(fullPath);
+
+ const relativePath = path.relative(rootPath, fullPath);
+ const shouldExclude = excludePatterns.some(excludePattern => {
+ const globPattern = excludePattern.includes('*') ? excludePattern : `**/${excludePattern}/**`;
+ return minimatch(relativePath, globPattern, { dot: true });
+ });
+
+ if (shouldExclude) continue;
+
+ if (entry.name.toLowerCase().includes(pattern.toLowerCase())) {
+ results.push(fullPath);
+ }
+
+ if (entry.isDirectory()) {
+ await search(fullPath);
+ }
+ } catch {
+ continue;
+ }
+ }
+ }
+
+ await search(rootPath);
+ return results;
+}
diff --git a/src/filesystem/package.json b/src/filesystem/package.json
index 4d3ac320..faeefa54 100644
--- a/src/filesystem/package.json
+++ b/src/filesystem/package.json
@@ -1,6 +1,6 @@
{
"name": "@modelcontextprotocol/server-filesystem",
- "version": "0.6.2",
+ "version": "0.6.3",
"description": "MCP server for filesystem access",
"license": "MIT",
"author": "Anthropic, PBC (https://anthropic.com)",
diff --git a/src/filesystem/path-validation.ts b/src/filesystem/path-validation.ts
index ee0c97d7..972e9c49 100644
--- a/src/filesystem/path-validation.ts
+++ b/src/filesystem/path-validation.ts
@@ -68,10 +68,19 @@ export function isPathWithinAllowedDirectories(absolutePath: string, allowedDire
}
// Special case for root directory to avoid double slash
+ // On Windows, we need to check if both paths are on the same drive
if (normalizedDir === path.sep) {
return normalizedPath.startsWith(path.sep);
}
+ // On Windows, also check for drive root (e.g., "C:\")
+ if (path.sep === '\\' && normalizedDir.match(/^[A-Za-z]:\\?$/)) {
+ // Ensure both paths are on the same drive
+ const dirDrive = normalizedDir.charAt(0).toLowerCase();
+ const pathDrive = normalizedPath.charAt(0).toLowerCase();
+ return pathDrive === dirDrive && normalizedPath.startsWith(normalizedDir.replace(/\\?$/, '\\'));
+ }
+
return normalizedPath.startsWith(normalizedDir + path.sep);
});
-}
\ No newline at end of file
+}
diff --git a/src/time/src/mcp_server_time/server.py b/src/time/src/mcp_server_time/server.py
index e3d353bd..b8ca4e2f 100644
--- a/src/time/src/mcp_server_time/server.py
+++ b/src/time/src/mcp_server_time/server.py
@@ -22,6 +22,7 @@ class TimeTools(str, Enum):
class TimeResult(BaseModel):
timezone: str
datetime: str
+ day_of_week: str
is_dst: bool
@@ -64,6 +65,7 @@ class TimeServer:
return TimeResult(
timezone=timezone_name,
datetime=current_time.isoformat(timespec="seconds"),
+ day_of_week=current_time.strftime("%A"),
is_dst=bool(current_time.dst()),
)
@@ -104,11 +106,13 @@ class TimeServer:
source=TimeResult(
timezone=source_tz,
datetime=source_time.isoformat(timespec="seconds"),
+ day_of_week=source_time.strftime("%A"),
is_dst=bool(source_time.dst()),
),
target=TimeResult(
timezone=target_tz,
datetime=target_time.isoformat(timespec="seconds"),
+ day_of_week=target_time.strftime("%A"),
is_dst=bool(target_time.dst()),
),
time_difference=time_diff_str,