Merge branch 'main' into claude/issue-2526-20250824-0240

This commit is contained in:
Ola Hungerford
2026-02-03 19:59:23 -07:00
committed by GitHub
110 changed files with 11780 additions and 8014 deletions

View File

@@ -2,6 +2,12 @@
## Description
## Publishing Your Server
**Note: We are no longer accepting PRs to add servers to the README.** Instead, please publish your server to the [MCP Server Registry](https://github.com/modelcontextprotocol/registry) to make it discoverable to the MCP ecosystem.
To publish your server, follow the [quickstart guide](https://github.com/modelcontextprotocol/registry/blob/main/docs/modelcontextprotocol-io/quickstart.mdx). You can browse published servers at [https://registry.modelcontextprotocol.io/](https://registry.modelcontextprotocol.io/).
## Server Details
<!-- If modifying an existing server, provide details -->
- Server: <!-- e.g., filesystem, github -->

View File

@@ -26,13 +26,13 @@ jobs:
actions: read
steps:
- name: Checkout repository
uses: actions/checkout@v4
uses: actions/checkout@v6
with:
fetch-depth: 1
- name: Run Claude Code
id: claude
uses: anthropics/claude-code-action@beta
uses: anthropics/claude-code-action@v1
with:
anthropic_api_key: ${{ secrets.ANTHROPIC_API_KEY }}
@@ -42,10 +42,8 @@ jobs:
# Trigger when assigned to an issue
assignee_trigger: "claude"
# Allow Claude to run bash
# This should be safe given the repo is already public
allowed_tools: "Bash"
custom_instructions: |
If posting a comment to GitHub, give a concise summary of the comment at the top and put all the details in a <details> block.
claude_args: |
--mcp-config .mcp.json
--allowedTools "Bash,mcp__mcp-docs,WebFetch"
--append-system-prompt "If posting a comment to GitHub, give a concise summary of the comment at the top and put all the details in a <details> block. When working on MCP-related code or reviewing MCP-related changes, use the mcp-docs MCP server to look up the latest protocol documentation. For schema details, reference https://github.com/modelcontextprotocol/modelcontextprotocol/tree/main/schema which contains versioned schemas in JSON (schema.json) and TypeScript (schema.ts) formats."

View File

@@ -14,7 +14,7 @@ jobs:
outputs:
packages: ${{ steps.find-packages.outputs.packages }}
steps:
- uses: actions/checkout@v4
- uses: actions/checkout@v6
- name: Find Python packages
id: find-packages
@@ -31,13 +31,13 @@ jobs:
name: Test ${{ matrix.package }}
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- uses: actions/checkout@v6
- name: Install uv
uses: astral-sh/setup-uv@v3
- name: Set up Python
uses: actions/setup-python@v5
uses: actions/setup-python@v6
with:
python-version-file: "src/${{ matrix.package }}/.python-version"
@@ -68,19 +68,19 @@ jobs:
name: Build ${{ matrix.package }}
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- uses: actions/checkout@v6
- name: Install uv
uses: astral-sh/setup-uv@v3
- name: Set up Python
uses: actions/setup-python@v5
uses: actions/setup-python@v6
with:
python-version-file: "src/${{ matrix.package }}/.python-version"
- name: Install dependencies
working-directory: src/${{ matrix.package }}
run: uv sync --frozen --all-extras --dev
run: uv sync --locked --all-extras --dev
- name: Run pyright
working-directory: src/${{ matrix.package }}
@@ -91,7 +91,7 @@ jobs:
run: uv build
- name: Upload artifacts
uses: actions/upload-artifact@v4
uses: actions/upload-artifact@v6
with:
name: dist-${{ matrix.package }}
path: src/${{ matrix.package }}/dist/
@@ -112,7 +112,7 @@ jobs:
steps:
- name: Download artifacts
uses: actions/download-artifact@v4
uses: actions/download-artifact@v7
with:
name: dist-${{ matrix.package }}
path: dist/

111
.github/workflows/readme-pr-check.yml vendored Normal file
View File

@@ -0,0 +1,111 @@
name: README PR Check
on:
pull_request:
types: [opened]
paths:
- 'README.md'
issue_comment:
types: [created]
jobs:
check-readme-only:
if: github.event_name == 'pull_request'
runs-on: ubuntu-latest
permissions:
contents: read
pull-requests: write
steps:
- name: Check files and comment if README-only
uses: actions/github-script@v8
with:
script: |
const { owner, repo } = context.repo;
const prNumber = context.payload.pull_request.number;
const { data: files } = await github.rest.pulls.listFiles({ owner, repo, pull_number: prNumber });
if (files.length !== 1 || files[0].filename !== 'README.md') {
console.log('PR modifies files other than README, skipping');
return;
}
// Check if we've already commented
const { data: comments } = await github.rest.issues.listComments({ owner, repo, issue_number: prNumber });
if (comments.some(c => c.user.login === 'github-actions[bot]' && c.body.includes('no longer accepting PRs to add new servers'))) {
console.log('Already commented on this PR, skipping');
return;
}
await github.rest.issues.addLabels({ owner, repo, issue_number: prNumber, labels: ['readme: pending'] });
await github.rest.issues.createComment({
owner,
repo,
issue_number: prNumber,
body: [
'Thanks for your contribution!',
'',
'**We are no longer accepting PRs to add new servers to the README.** The server lists are deprecated and will eventually be removed entirely, replaced by the registry.',
'',
'👉 **To add a new MCP server:** Please publish it to the [MCP Server Registry](https://github.com/modelcontextprotocol/registry) instead. You can browse published servers at [registry.modelcontextprotocol.io](https://registry.modelcontextprotocol.io/).',
'',
'👉 **If this PR updates or removes an existing entry:** We do still accept these changes. Please reply with `/i-promise-this-is-not-a-new-server` to continue.',
'',
'If this PR is adding a new server, please close it and submit to the registry instead.',
].join('\n'),
});
handle-confirmation:
if: github.event_name == 'issue_comment' && github.event.issue.pull_request && contains(github.event.comment.body, '/i-promise-this-is-not-a-new-server')
runs-on: ubuntu-latest
permissions:
pull-requests: write
steps:
- name: Swap labels and minimize comments
uses: actions/github-script@v8
with:
script: |
const { owner, repo } = context.repo;
const prNumber = context.payload.issue.number;
// Check if pending label exists
const { data: labels } = await github.rest.issues.listLabelsOnIssue({ owner, repo, issue_number: prNumber });
if (!labels.some(l => l.name === 'readme: pending')) {
console.log('No pending label found, skipping');
return;
}
// Swap labels
try {
await github.rest.issues.removeLabel({ owner, repo, issue_number: prNumber, name: 'readme: pending' });
} catch (e) {}
await github.rest.issues.addLabels({ owner, repo, issue_number: prNumber, labels: ['readme: ready for review'] });
// Find the bot's original comment
const { data: comments } = await github.rest.issues.listComments({ owner, repo, issue_number: prNumber });
const botComment = comments.find(c =>
c.user.login === 'github-actions[bot]' &&
c.body.includes('no longer accepting PRs to add new servers')
);
// Minimize both comments via GraphQL
const minimizeComment = async (nodeId) => {
await github.graphql(`
mutation($id: ID!) {
minimizeComment(input: {subjectId: $id, classifier: RESOLVED}) {
minimizedComment { isMinimized }
}
}
`, { id: nodeId });
};
if (botComment) {
await minimizeComment(botComment.node_id);
}
// Only minimize user's comment if it's just the command
const userComment = context.payload.comment.body.trim();
if (userComment === '/i-promise-this-is-not-a-new-server') {
await minimizeComment(context.payload.comment.node_id);
}

View File

@@ -8,13 +8,14 @@ on:
jobs:
create-metadata:
runs-on: ubuntu-latest
if: github.repository_owner == 'modelcontextprotocol'
outputs:
hash: ${{ steps.last-release.outputs.hash }}
version: ${{ steps.create-version.outputs.version}}
npm_packages: ${{ steps.create-npm-packages.outputs.npm_packages}}
pypi_packages: ${{ steps.create-pypi-packages.outputs.pypi_packages}}
steps:
- uses: actions/checkout@v4
- uses: actions/checkout@v6
with:
fetch-depth: 0
@@ -42,7 +43,7 @@ jobs:
cat RELEASE_NOTES.md
- name: Release notes
uses: actions/upload-artifact@v4
uses: actions/upload-artifact@v6
with:
name: release-notes
path: RELEASE_NOTES.md
@@ -68,10 +69,12 @@ jobs:
if: ${{ needs.create-metadata.outputs.npm_packages != '[]' || needs.create-metadata.outputs.pypi_packages != '[]' }}
runs-on: ubuntu-latest
environment: release
permissions:
contents: write
outputs:
changes_made: ${{ steps.commit.outputs.changes_made }}
steps:
- uses: actions/checkout@v4
- uses: actions/checkout@v6
with:
fetch-depth: 0
@@ -115,7 +118,7 @@ jobs:
id-token: write # Required for trusted publishing
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- uses: actions/checkout@v6
with:
ref: ${{ needs.create-metadata.outputs.version }}
@@ -123,7 +126,7 @@ jobs:
uses: astral-sh/setup-uv@v5
- name: Set up Python
uses: actions/setup-python@v5
uses: actions/setup-python@v6
with:
python-version-file: "src/${{ matrix.package }}/.python-version"
@@ -155,11 +158,11 @@ jobs:
environment: release
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- uses: actions/checkout@v6
with:
ref: ${{ needs.create-metadata.outputs.version }}
- uses: actions/setup-node@v4
- uses: actions/setup-node@v6
with:
node-version: 22
cache: npm
@@ -192,16 +195,19 @@ jobs:
create-release:
needs: [update-packages, create-metadata, publish-pypi, publish-npm]
if: needs.update-packages.outputs.changes_made == 'true'
if: |
always() &&
needs.update-packages.outputs.changes_made == 'true' &&
(needs.publish-pypi.result == 'success' || needs.publish-npm.result == 'success')
runs-on: ubuntu-latest
environment: release
permissions:
contents: write
steps:
- uses: actions/checkout@v4
- uses: actions/checkout@v6
- name: Download release notes
uses: actions/download-artifact@v4
uses: actions/download-artifact@v7
with:
name: release-notes

View File

@@ -14,7 +14,7 @@ jobs:
outputs:
packages: ${{ steps.find-packages.outputs.packages }}
steps:
- uses: actions/checkout@v4
- uses: actions/checkout@v6
- name: Find JS packages
id: find-packages
working-directory: src
@@ -30,9 +30,9 @@ jobs:
name: Test ${{ matrix.package }}
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- uses: actions/checkout@v6
- uses: actions/setup-node@v4
- uses: actions/setup-node@v6
with:
node-version: 22
cache: npm
@@ -41,21 +41,9 @@ jobs:
working-directory: src/${{ matrix.package }}
run: npm ci
- name: Check if tests exist
id: check-tests
working-directory: src/${{ matrix.package }}
run: |
if npm run test --silent 2>/dev/null; then
echo "has-tests=true" >> $GITHUB_OUTPUT
else
echo "has-tests=false" >> $GITHUB_OUTPUT
fi
continue-on-error: true
- name: Run tests
if: steps.check-tests.outputs.has-tests == 'true'
working-directory: src/${{ matrix.package }}
run: npm test
run: npm test --if-present
build:
needs: [detect-packages, test]
@@ -65,9 +53,9 @@ jobs:
name: Build ${{ matrix.package }}
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- uses: actions/checkout@v6
- uses: actions/setup-node@v4
- uses: actions/setup-node@v6
with:
node-version: 22
cache: npm
@@ -96,8 +84,8 @@ jobs:
id-token: write
steps:
- uses: actions/checkout@v4
- uses: actions/setup-node@v4
- uses: actions/checkout@v6
- uses: actions/setup-node@v6
with:
node-version: 22
cache: npm

5
.gitignore vendored
View File

@@ -122,6 +122,10 @@ dist
# Stores VSCode versions used for testing VSCode extensions
.vscode-test
# IDEs
.idea/
.vscode/
# yarn v2
.yarn/cache
.yarn/unplugged
@@ -298,3 +302,4 @@ cython_debug/
# and can be added to the global gitignore or merged into this file. For a more nuclear
# option (not recommended) you can uncomment the following to ignore the entire idea folder.
#.idea/
.claude/settings.local.json

8
.mcp.json Normal file
View File

@@ -0,0 +1,8 @@
{
"mcpServers": {
"mcp-docs": {
"type": "http",
"url": "https://modelcontextprotocol.io/mcp"
}
}
}

View File

@@ -1 +0,0 @@
{}

View File

@@ -1,105 +1,40 @@
# Contributing to MCP Servers
Thank you for your interest in contributing to the Model Context Protocol (MCP) servers! This document provides guidelines and instructions for contributing.
Thanks for your interest in contributing! Here's how you can help make this repo better.
## Types of Contributions
We accept changes through [the standard GitHub flow model](https://docs.github.com/en/get-started/using-github/github-flow).
### 1. New Servers
## Server Listings
The repository contains reference implementations, as well as a list of community servers.
We generally don't accept new servers into the repository. We do accept pull requests to the [README.md](./README.md)
adding a reference to your servers.
We are **no longer accepting PRs** to add server links to the README. Please publish your server to the [MCP Server Registry](https://github.com/modelcontextprotocol/registry) instead. Follow the [quickstart guide](https://github.com/modelcontextprotocol/registry/blob/main/docs/modelcontextprotocol-io/quickstart.mdx).
Please keep lists in alphabetical order to minimize merge conflicts when adding new items.
You can browse published servers using the simple UI at [https://registry.modelcontextprotocol.io/](https://registry.modelcontextprotocol.io/).
- Check the [modelcontextprotocol.io](https://modelcontextprotocol.io) documentation
- Ensure your server doesn't duplicate existing functionality
- Consider whether your server would be generally useful to others
- Follow [security best practices](https://modelcontextprotocol.io/docs/concepts/transports#security-considerations) from the MCP documentation
- Create a PR adding a link to your server to the [README.md](./README.md).
## Server Implementations
### 2. Improvements to Existing Servers
Enhancements to existing servers are welcome! This includes:
We welcome:
- **Bug fixes** — Help us squash those pesky bugs.
- **Usability improvements** — Making servers easier to use for humans and agents.
- **Enhancements that demonstrate MCP protocol features** — We encourage contributions that help reference servers better illustrate underutilized aspects of the MCP protocol beyond just Tools, such as Resources, Prompts, or Roots. For example, adding Roots support to filesystem-server helps showcase this important but lesser-known feature.
- Bug fixes
- Performance improvements
- New features
- Security enhancements
We're more selective about:
- **Other new features** — Especially if they're not crucial to the server's core purpose or are highly opinionated. The existing servers are reference servers meant to inspire the community. If you need specific features, we encourage you to build enhanced versions and publish them to the [MCP Server Registry](https://github.com/modelcontextprotocol/registry)! We think a diverse ecosystem of servers is beneficial for everyone.
### 3. Documentation
Documentation improvements are always welcome:
We don't accept:
- **New server implementations** — We encourage you to publish them to the [MCP Server Registry](https://github.com/modelcontextprotocol/registry) instead.
- Fixing typos or unclear instructions
- Adding examples
- Improving setup instructions
- Adding troubleshooting guides
## Testing
## Getting Started
When adding or configuring tests for servers implemented in TypeScript, use **vitest** as the test framework. Vitest provides better ESM support, faster test execution, and a more modern testing experience.
1. Fork the repository
2. Clone your fork:
```bash
git clone https://github.com/your-username/servers.git
```
3. Add the upstream remote:
```bash
git remote add upstream https://github.com/modelcontextprotocol/servers.git
```
4. Create a branch:
```bash
git checkout -b my-feature
```
## Documentation
## Development Guidelines
Improvements to existing documentation is welcome - although generally we'd prefer ergonomic improvements than documenting pain points if possible!
### Code Style
- Follow the existing code style in the repository
- Include appropriate type definitions
- Add comments for complex logic
### Documentation
- Include a detailed README.md in your server directory
- Document all configuration options
- Provide setup instructions
- Include usage examples
### Security
- Follow security best practices
- Implement proper input validation
- Handle errors appropriately
- Document security considerations
## Submitting Changes
1. Commit your changes:
```bash
git add .
git commit -m "Description of changes"
```
2. Push to your fork:
```bash
git push origin my-feature
```
3. Create a Pull Request through GitHub
### Pull Request Guidelines
- Thoroughly test your changes
- Fill out the pull request template completely
- Link any related issues
- Provide clear description of changes
- Include any necessary documentation updates
- Add screenshots for UI changes
- List any breaking changes
We're more selective about adding wholly new documentation, especially in ways that aren't vendor neutral (e.g. how to run a particular server with a particular client).
## Community
- Participate in [GitHub Discussions](https://github.com/orgs/modelcontextprotocol/discussions)
- Follow the [Code of Conduct](CODE_OF_CONDUCT.md)
[Learn how the MCP community communicates](https://modelcontextprotocol.io/community/communication).
## Questions?
- Check the [documentation](https://modelcontextprotocol.io)
- Ask in GitHub Discussions
Thank you for contributing to MCP Servers!
Thank you for helping make MCP servers better for everyone!

337
README.md

File diff suppressed because it is too large Load Diff

View File

@@ -3,7 +3,13 @@ Thank you for helping us keep our MCP servers secure.
The **reference servers** in this repo are maintained by [Anthropic](https://www.anthropic.com/) as part of the Model Context Protocol project.
The security of our systems and user data is Anthropics top priority. We appreciate the work of security researchers acting in good faith in identifying and reporting potential vulnerabilities.
The security of our systems and user data is Anthropic's top priority. We appreciate the work of security researchers acting in good faith in identifying and reporting potential vulnerabilities.
## Important Notice
The servers in this repository are **reference implementations** intended to demonstrate MCP features and SDK usage. They serve as educational examples for developers building their own MCP servers, not as production-ready solutions.
**Bug bounties are not awarded for security vulnerabilities found in these reference servers.** Our bug bounty program applies exclusively to the [MCP SDKs](https://github.com/modelcontextprotocol) maintained by Anthropic. If you discover a vulnerability in an MCP SDK that is maintained by Anthropic, please report it through our vulnerability disclosure program below.
## Vulnerability Disclosure Program

6477
package-lock.json generated

File diff suppressed because it is too large Load Diff

View File

@@ -97,6 +97,9 @@ class PyPiPackage:
with open(self.path / "pyproject.toml", "w") as f:
f.write(tomlkit.dumps(data))
# Regenerate uv.lock to match the updated pyproject.toml
subprocess.run(["uv", "lock"], cwd=self.path, check=True)
def has_changes(path: Path, git_hash: GitHash) -> bool:
"""Check if any files changed between current state and git hash"""

View File

@@ -0,0 +1,4 @@
packages
dist
README.md
node_modules

52
src/everything/AGENTS.md Normal file
View File

@@ -0,0 +1,52 @@
# MCP "Everything" Server - Development Guidelines
## Build, Test & Run Commands
- Build: `npm run build` - Compiles TypeScript to JavaScript
- Watch mode: `npm run watch` - Watches for changes and rebuilds automatically
- Run STDIO server: `npm run start:stdio` - Starts the MCP server using stdio transport
- Run SSE server: `npm run start:sse` - Starts the MCP server with SSE transport
- Run StreamableHttp server: `npm run start:stremableHttp` - Starts the MCP server with StreamableHttp transport
- Prepare release: `npm run prepare` - Builds the project for publishing
## Code Style Guidelines
- Use ES modules with `.js` extension in import paths
- Strictly type all functions and variables with TypeScript
- Follow zod schema patterns for tool input validation
- Prefer async/await over callbacks and Promise chains
- Place all imports at top of file, grouped by external then internal
- Use descriptive variable names that clearly indicate purpose
- Implement proper cleanup for timers and resources in server shutdown
- Handle errors with try/catch blocks and provide clear error messages
- Use consistent indentation (2 spaces) and trailing commas in multi-line objects
- Match existing code style, import order, and module layout in the respective folder.
- Use camelCase for variables/functions,
- Use PascalCase for types/classes,
- Use UPPER_CASE for constants
- Use kebab-case for file names and registered tools, prompts, and resources.
- Use verbs for tool names, e.g., `get-annotated-message` instead of `annotated-message`
## Extending the Server
The Everything Server is designed to be extended at well-defined points.
See [Extension Points](docs/extension.md) and [Project Structure](docs/structure.md).
The server factory is `src/everything/server/index.ts` and registers all features during startup as well as handling post-connection setup.
### High-level
- Tools live under `src/everything/tools/` and are registered via `registerTools(server)`.
- Resources live under `src/everything/resources/` and are registered via `registerResources(server)`.
- Prompts live under `src/everything/prompts/` and are registered via `registerPrompts(server)`.
- Subscriptions and simulated update routines are under `src/everything/resources/subscriptions.ts`.
- Logging helpers are under `src/everything/server/logging.ts`.
- Transport managers are under `src/everything/transports/`.
### When adding a new feature
- Follow the existing file/module pattern in its folder (naming, exports, and registration function).
- Export a `registerX(server)` function that registers new items with the MCP SDK in the same style as existing ones.
- Wire your new module into the central index (e.g., update `tools/index.ts`, `resources/index.ts`, or `prompts/index.ts`).
- Ensure schemas (for tools) are accurate JSON Schema and include helpful descriptions and examples.
`server/index.ts` and usages in `logging.ts` and `subscriptions.ts`.
- Keep the docs in `src/everything/docs/` up to date if you add or modify noteworthy features.

View File

@@ -1,20 +0,0 @@
# MCP "Everything" Server - Development Guidelines
## Build, Test & Run Commands
- Build: `npm run build` - Compiles TypeScript to JavaScript
- Watch mode: `npm run watch` - Watches for changes and rebuilds automatically
- Run server: `npm run start` - Starts the MCP server using stdio transport
- Run SSE server: `npm run start:sse` - Starts the MCP server with SSE transport
- Prepare release: `npm run prepare` - Builds the project for publishing
## Code Style Guidelines
- Use ES modules with `.js` extension in import paths
- Strictly type all functions and variables with TypeScript
- Follow zod schema patterns for tool input validation
- Prefer async/await over callbacks and Promise chains
- Place all imports at top of file, grouped by external then internal
- Use descriptive variable names that clearly indicate purpose
- Implement proper cleanup for timers and resources in server shutdown
- Follow camelCase for variables/functions, PascalCase for types/classes, UPPER_CASE for constants
- Handle errors with try/catch blocks and provide clear error messages
- Use consistent indentation (2 spaces) and trailing commas in multi-line objects

View File

@@ -1,147 +1,17 @@
# Everything MCP Server
**[Architecture](docs/architecture.md)
| [Project Structure](docs/structure.md)
| [Startup Process](docs/startup.md)
| [Server Features](docs/features.md)
| [Extension Points](docs/extension.md)
| [How It Works](docs/how-it-works.md)**
This MCP server attempts to exercise all the features of the MCP protocol. It is not intended to be a useful server, but rather a test server for builders of MCP clients. It implements prompts, tools, resources, sampling, and more to showcase MCP capabilities.
## Components
## Tools, Resources, Prompts, and Other Features
### Tools
1. `echo`
- Simple tool to echo back input messages
- Input:
- `message` (string): Message to echo back
- Returns: Text content with echoed message
2. `add`
- Adds two numbers together
- Inputs:
- `a` (number): First number
- `b` (number): Second number
- Returns: Text result of the addition
3. `longRunningOperation`
- Demonstrates progress notifications for long operations
- Inputs:
- `duration` (number, default: 10): Duration in seconds
- `steps` (number, default: 5): Number of progress steps
- Returns: Completion message with duration and steps
- Sends progress notifications during execution
4. `printEnv`
- Prints all environment variables
- Useful for debugging MCP server configuration
- No inputs required
- Returns: JSON string of all environment variables
5. `sampleLLM`
- Demonstrates LLM sampling capability using MCP sampling feature
- Inputs:
- `prompt` (string): The prompt to send to the LLM
- `maxTokens` (number, default: 100): Maximum tokens to generate
- Returns: Generated LLM response
6. `getTinyImage`
- Returns a small test image
- No inputs required
- Returns: Base64 encoded PNG image data
7. `annotatedMessage`
- Demonstrates how annotations can be used to provide metadata about content
- Inputs:
- `messageType` (enum: "error" | "success" | "debug"): Type of message to demonstrate different annotation patterns
- `includeImage` (boolean, default: false): Whether to include an example image
- Returns: Content with varying annotations:
- Error messages: High priority (1.0), visible to both user and assistant
- Success messages: Medium priority (0.7), user-focused
- Debug messages: Low priority (0.3), assistant-focused
- Optional image: Medium priority (0.5), user-focused
- Example annotations:
```json
{
"priority": 1.0,
"audience": ["user", "assistant"]
}
```
8. `getResourceReference`
- Returns a resource reference that can be used by MCP clients
- Inputs:
- `resourceId` (number, 1-100): ID of the resource to reference
- Returns: A resource reference with:
- Text introduction
- Embedded resource with `type: "resource"`
- Text instruction for using the resource URI
9. `startElicitation`
- Initiates an elicitation (interaction) within the MCP client.
- Inputs:
- `color` (string): Favorite color
- `number` (number, 1-100): Favorite number
- `pets` (enum): Favorite pet
- Returns: Confirmation of the elicitation demo with selection summary.
10. `structuredContent`
- Demonstrates a tool returning structured content using the example in the specification
- Provides an output schema to allow testing of client SHOULD advisory to validate the result using the schema
- Inputs:
- `location` (string): A location or ZIP code, mock data is returned regardless of value
- Returns: a response with
- `structuredContent` field conformant to the output schema
- A backward compatible Text Content field, a SHOULD advisory in the specification
### Resources
The server provides 100 test resources in two formats:
- Even numbered resources:
- Plaintext format
- URI pattern: `test://static/resource/{even_number}`
- Content: Simple text description
- Odd numbered resources:
- Binary blob format
- URI pattern: `test://static/resource/{odd_number}`
- Content: Base64 encoded binary data
Resource features:
- Supports pagination (10 items per page)
- Allows subscribing to resource updates
- Demonstrates resource templates
- Auto-updates subscribed resources every 5 seconds
### Prompts
1. `simple_prompt`
- Basic prompt without arguments
- Returns: Single message exchange
2. `complex_prompt`
- Advanced prompt demonstrating argument handling
- Required arguments:
- `temperature` (number): Temperature setting
- Optional arguments:
- `style` (string): Output style preference
- Returns: Multi-turn conversation with images
3. `resource_prompt`
- Demonstrates embedding resource references in prompts
- Required arguments:
- `resourceId` (number): ID of the resource to embed (1-100)
- Returns: Multi-turn conversation with an embedded resource reference
- Shows how to include resources directly in prompt messages
### Logging
The server sends random-leveled log messages every 15 seconds, e.g.:
```json
{
"method": "notifications/message",
"params": {
"level": "info",
"data": "Info-level message"
}
}
```
A complete list of the registered MCP primitives and other protocol features demonstrated can be found in the [Server Features](docs/features.md) document.
## Usage with Claude Desktop (uses [stdio Transport](https://modelcontextprotocol.io/specification/2025-03-26/basic/transports#stdio))
@@ -177,7 +47,7 @@ Add the configuration to your user-level MCP configuration file. Open the Comman
**Method 2: Workspace Configuration**
Alternatively, you can add the configuration to a file called `.vscode/mcp.json` in your workspace. This will allow you to share the configuration with others.
> For more details about MCP configuration in VS Code, see the [official VS Code MCP documentation](https://code.visualstudio.com/docs/copilot/mcp).
> For more details about MCP configuration in VS Code, see the [official VS Code MCP documentation](https://code.visualstudio.com/docs/copilot/customization/mcp-servers).
#### NPX

View File

@@ -0,0 +1,44 @@
# Everything Server Architecture
**Architecture
| [Project Structure](structure.md)
| [Startup Process](startup.md)
| [Server Features](features.md)
| [Extension Points](extension.md)
| [How It Works](how-it-works.md)**
This documentation summarizes the current layout and runtime architecture of the `src/everything` package.
It explains how the server starts, how transports are wired, where tools, prompts, and resources are registered, and how to extend the system.
## Highlevel Overview
### Purpose
A minimal, modular MCP server showcasing core Model Context Protocol features. It exposes simple tools, prompts, and resources, and can be run over multiple transports (STDIO, SSE, and Streamable HTTP).
### Design
A small “server factory” constructs the MCP server and registers features.
Transports are separate entry points that create/connect the server and handle network concerns.
Tools, prompts, and resources are organized in their own submodules.
### Multiclient
The server supports multiple concurrent clients. Tracking per session data is demonstrated with
resource subscriptions and simulated logging.
## Build and Distribution
- TypeScript sources are compiled into `dist/` via `npm run build`.
- The `build` script copies `docs/` into `dist/` so instruction files ship alongside the compiled server.
- The CLI bin is configured in `package.json` as `mcp-server-everything``dist/index.js`.
## [Project Structure](structure.md)
## [Startup Process](startup.md)
## [Server Features](features.md)
## [Extension Points](extension.md)
## [How It Works](how-it-works.md)

View File

@@ -0,0 +1,23 @@
# Everything Server - Extension Points
**[Architecture](architecture.md)
| [Project Structure](structure.md)
| [Startup Process](startup.md)
| [Server Features](features.md)
| Extension Points
| [How It Works](how-it-works.md)**
## Adding Tools
- Create a new file under `tools/` with your `registerXTool(server)` function that registers the tool via `server.registerTool(...)`.
- Export and call it from `tools/index.ts` inside `registerTools(server)`.
## Adding Prompts
- Create a new file under `prompts/` with your `registerXPrompt(server)` function that registers the prompt via `server.registerPrompt(...)`.
- Export and call it from `prompts/index.ts` inside `registerPrompts(server)`.
## Adding Resources
- Create a new file under `resources/` with your `registerXResources(server)` function using `server.registerResource(...)` (optionally with `ResourceTemplate`).
- Export and call it from `resources/index.ts` inside `registerResources(server)`.

View File

@@ -0,0 +1,103 @@
# Everything Server - Features
**[Architecture](architecture.md)
| [Project Structure](structure.md)
| [Startup Process](startup.md)
| Server Features
| [Extension Points](extension.md)
| [How It Works](how-it-works.md)**
## Tools
- `echo` (tools/echo.ts): Echoes the provided `message: string`. Uses Zod to validate inputs.
- `get-annotated-message` (tools/get-annotated-message.ts): Returns a `text` message annotated with `priority` and `audience` based on `messageType` (`error`, `success`, or `debug`); can optionally include an annotated `image`.
- `get-env` (tools/get-env.ts): Returns all environment variables from the running process as pretty-printed JSON text.
- `get-resource-links` (tools/get-resource-links.ts): Returns an intro `text` block followed by multiple `resource_link` items. For a requested `count` (110), alternates between dynamic Text and Blob resources using URIs from `resources/templates.ts`.
- `get-resource-reference` (tools/get-resource-reference.ts): Accepts `resourceType` (`text` or `blob`) and `resourceId` (positive integer). Returns a concrete `resource` content block (with its `uri`, `mimeType`, and data) with surrounding explanatory `text`.
- `get-roots-list` (tools/get-roots-list.ts): Returns the last list of roots sent by the client.
- `gzip-file-as-resource` (tools/gzip-file-as-resource.ts): Accepts a `name` and `data` (URL or data URI), fetches the data subject to size/time/domain constraints, compresses it, registers it as a session resource at `demo://resource/session/<name>` with `mimeType: application/gzip`, and returns either a `resource_link` (default) or an inline `resource` depending on `outputType`.
- `get-structured-content` (tools/get-structured-content.ts): Demonstrates structured responses. Accepts `location` input and returns both backwardcompatible `content` (a `text` block containing JSON) and `structuredContent` validated by an `outputSchema` (temperature, conditions, humidity).
- `get-sum` (tools/get-sum.ts): For two numbers `a` and `b` calculates and returns their sum. Uses Zod to validate inputs.
- `get-tiny-image` (tools/get-tiny-image.ts): Returns a tiny PNG MCP logo as an `image` content item with brief descriptive text before and after.
- `trigger-long-running-operation` (tools/trigger-trigger-long-running-operation.ts): Simulates a multi-step operation over a given `duration` and number of `steps`; reports progress via `notifications/progress` when a `progressToken` is provided by the client.
- `toggle-simulated-logging` (tools/toggle-simulated-logging.ts): Starts or stops simulated, randomleveled logging for the invoking session. Respects the clients selected minimum logging level.
- `toggle-subscriber-updates` (tools/toggle-subscriber-updates.ts): Starts or stops simulated resource update notifications for URIs the invoking session has subscribed to.
- `trigger-sampling-request` (tools/trigger-sampling-request.ts): Issues a `sampling/createMessage` request to the client/LLM using provided `prompt` and optional generation controls; returns the LLM's response payload.
- `simulate-research-query` (tools/simulate-research-query.ts): Demonstrates MCP Tasks (SEP-1686) with a simulated multi-stage research operation. Accepts `topic` and `ambiguous` parameters. Returns a task that progresses through stages with status updates. If `ambiguous` is true and client supports elicitation, sends an elicitation request directly to gather clarification before completing.
- `trigger-sampling-request-async` (tools/trigger-sampling-request-async.ts): Demonstrates bidirectional tasks where the server sends a sampling request that the client executes as a background task. Server polls for status and retrieves the LLM result when complete. Requires client to support `tasks.requests.sampling.createMessage`.
- `trigger-elicitation-request-async` (tools/trigger-elicitation-request-async.ts): Demonstrates bidirectional tasks where the server sends an elicitation request that the client executes as a background task. Server polls while waiting for user input. Requires client to support `tasks.requests.elicitation.create`.
## Prompts
- `simple-prompt` (prompts/simple.ts): No-argument prompt that returns a static user message.
- `args-prompt` (prompts/args.ts): Two-argument prompt with `city` (required) and `state` (optional) used to compose a question.
- `completable-prompt` (prompts/completions.ts): Demonstrates argument auto-completions with the SDKs `completable` helper; `department` completions drive context-aware `name` suggestions.
- `resource-prompt` (prompts/resource.ts): Accepts `resourceType` ("Text" or "Blob") and `resourceId` (string convertible to integer) and returns messages that include an embedded dynamic resource of the selected type generated via `resources/templates.ts`.
## Resources
- Dynamic Text: `demo://resource/dynamic/text/{index}` (content generated on the fly)
- Dynamic Blob: `demo://resource/dynamic/blob/{index}` (base64 payload generated on the fly)
- Static Documents: `demo://resource/static/document/<filename>` (serves files from `src/everything/docs/` as static file-based resources)
- Session Scoped: `demo://resource/session/<name>` (per-session resources registered dynamically; available only for the lifetime of the session)
## Resource Subscriptions and Notifications
- Simulated update notifications are optin and off by default.
- Clients may subscribe/unsubscribe to resource URIs using the MCP `resources/subscribe` and `resources/unsubscribe` requests.
- Use the `toggle-subscriber-updates` tool to start/stop a persession interval that emits `notifications/resources/updated { uri }` only for URIs that session has subscribed to.
- Multiple concurrent clients are supported; each clients subscriptions are tracked per session and notifications are delivered independently via the server instance associated with that session.
## Simulated Logging
- Simulated logging is available but off by default.
- Use the `toggle-simulated-logging` tool to start/stop periodic log messages of varying levels (debug, info, notice, warning, error, critical, alert, emergency) per session.
- Clients can control the minimum level they receive via the standard MCP `logging/setLevel` request.
## Tasks (SEP-1686)
The server advertises support for MCP Tasks, enabling long-running operations with status tracking:
- **Capabilities advertised**: `tasks.list`, `tasks.cancel`, `tasks.requests.tools.call`
- **Task Store**: Uses `InMemoryTaskStore` from SDK experimental for task lifecycle management
- **Message Queue**: Uses `InMemoryTaskMessageQueue` for task-related messaging
### Task Lifecycle
1. Client calls `tools/call` with `task: true` parameter
2. Server returns `CreateTaskResult` with `taskId` instead of immediate result
3. Client polls `tasks/get` to check status and receive `statusMessage` updates
4. When status is `completed`, client calls `tasks/result` to retrieve the final result
### Task Statuses
- `working`: Task is actively processing
- `input_required`: Task needs additional input (server sends elicitation request directly)
- `completed`: Task finished successfully
- `failed`: Task encountered an error
- `cancelled`: Task was cancelled by client
### Demo Tools
**Server-side tasks (client calls server):**
Use the `simulate-research-query` tool to exercise the full task lifecycle. Set `ambiguous: true` to trigger elicitation - the server will send an `elicitation/create` request directly and await the response before completing.
**Client-side tasks (server calls client):**
Use `trigger-sampling-request-async` or `trigger-elicitation-request-async` to demonstrate bidirectional tasks where the server sends requests that the client executes as background tasks. These require the client to advertise `tasks.requests.sampling.createMessage` or `tasks.requests.elicitation.create` capabilities respectively.
### Bidirectional Task Flow
MCP Tasks are bidirectional - both server and client can be task executors:
| Direction | Request Type | Task Executor | Demo Tool |
| ---------------- | ------------------------ | ------------- | ----------------------------------- |
| Client -> Server | `tools/call` | Server | `simulate-research-query` |
| Server -> Client | `sampling/createMessage` | Client | `trigger-sampling-request-async` |
| Server -> Client | `elicitation/create` | Client | `trigger-elicitation-request-async` |
For client-side tasks:
1. Server sends request with task metadata (e.g., `params.task.ttl`)
2. Client creates task and returns `CreateTaskResult` with `taskId`
3. Server polls `tasks/get` for status updates
4. When complete, server calls `tasks/result` to retrieve the result

View File

@@ -0,0 +1,45 @@
# Everything Server - How It Works
**[Architecture](architecture.md)
| [Project Structure](structure.md)
| [Startup Process](startup.md)
| [Server Features](features.md)
| [Extension Points](extension.md)
| How It Works**
# Conditional Tool Registration
### Module: `server/index.ts`
- Some tools require client support for the capability they demonstrate. These are:
- `get-roots-list`
- `trigger-elicitation-request`
- `trigger-sampling-request`
- Client capabilities aren't known until after initilization handshake is complete.
- Most tools are registered immediately during the Server Factory execution, prior to client connection.
- To defer registration of these commands until client capabilities are known, a `registerConditionalTools(server)` function is invoked from an `onintitialized` handler.
## Resource Subscriptions
### Module: `resources/subscriptions.ts`
- Tracks subscribers per URI: `Map<uri, Set<sessionId>>`.
- Installs handlers via `setSubscriptionHandlers(server)` to process subscribe/unsubscribe requests and keep the map updated.
- Updates are started/stopped on demand by the `toggle-subscriber-updates` tool, which calls `beginSimulatedResourceUpdates(server, sessionId)` and `stopSimulatedResourceUpdates(sessionId)`.
- `cleanup(sessionId?)` calls `stopSimulatedResourceUpdates(sessionId)` to clear intervals and remove sessionscoped state.
## Sessionscoped Resources
### Module: `resources/session.ts`
- `getSessionResourceURI(name: string)`: Builds a session resource URI: `demo://resource/session/<name>`.
- `registerSessionResource(server, resource, type, payload)`: Registers a resource with the given `uri`, `name`, and `mimeType`, returning a `resource_link`. The content is served from memory for the life of the session only. Supports `type: "text" | "blob"` and returns data in the corresponding field.
- Intended usage: tools can create and expose per-session artifacts without persisting them. For example, `tools/gzip-file-as-resource.ts` compresses fetched content, registers it as a session resource with `mimeType: application/gzip`, and returns either a `resource_link` or an inline `resource` based on `outputType`.
## Simulated Logging
### Module: `server/logging.ts`
- Periodically sends randomized log messages at different levels. Messages can include the session ID for clarity during demos.
- Started/stopped on demand via the `toggle-simulated-logging` tool, which calls `beginSimulatedLogging(server, sessionId?)` and `stopSimulatedLogging(sessionId?)`. Note that transport disconnect triggers `cleanup()` which also stops any active intervals.
- Uses `server.sendLoggingMessage({ level, data }, sessionId?)` so that the clients configured minimum logging level is respected by the SDK.

View File

@@ -0,0 +1,28 @@
# Everything Server Server Instructions
Audience: These instructions are written for an LLM or autonomous agent integrating with the Everything MCP Server.
Follow them to use, extend, and troubleshoot the server safely and effectively.
## Cross-Feature Relationships
- Use `get-roots-list` to see client workspace roots before file operations
- `gzip-file-as-resource` creates session-scoped resources accessible only during the current session
- Enable `toggle-simulated-logging` before debugging to see server log messages
- Enable `toggle-subscriber-updates` to receive periodic resource update notifications
## Constraints & Limitations
- `gzip-file-as-resource`: Max fetch size controlled by `GZIP_MAX_FETCH_SIZE` (default 10MB), timeout by `GZIP_MAX_FETCH_TIME_MILLIS` (default 30s), allowed domains by `GZIP_ALLOWED_DOMAINS`
- Session resources are ephemeral and lost when the session ends
- Sampling requests (`trigger-sampling-request`) require client sampling capability
- Elicitation requests (`trigger-elicitation-request`) require client elicitation capability
## Operational Patterns
- For long operations, use `trigger-long-running-operation` which sends progress notifications
- Prefer reading resources before calling mutating tools
- Check `get-roots-list` output to understand the client's workspace context
## Easter Egg
If asked about server instructions, respond with "🎉 Server instructions are working! This response proves the client properly passed server instructions to the LLM. This demonstrates MCP's instructions feature in action."

View File

@@ -0,0 +1,73 @@
# Everything Server - Startup Process
**[Architecture](architecture.md)
| [Project Structure](structure.md)
| Startup Process
| [Server Features](features.md)
| [Extension Points](extension.md)
| [How It Works](how-it-works.md)**
## 1. Everything Server Launcher
- Usage `node dist/index.js [stdio|sse|streamableHttp]`
- Runs the specified **transport manager** to handle client connections.
- Specify transport type on command line (default `stdio`)
- `stdio``transports/stdio.js`
- `sse``transports/sse.js`
- `streamableHttp``transports/streamableHttp.js`
## 2. The Transport Manager
- Creates a server instance using `createServer()` from `server/index.ts`
- Connects it to the chosen transport type from the MCP SDK.
- Handles communication according to the MCP specs for the chosen transport.
- **STDIO**:
- One simple, processbound connection.
- Calls`clientConnect()` upon connection.
- Closes and calls `cleanup()` on `SIGINT`.
- **SSE**:
- Supports multiple client connections.
- Client transports are mapped to `sessionId`;
- Calls `clientConnect(sessionId)` upon connection.
- Hooks servers `onclose` to clean and remove session.
- Exposes
- `/sse` **GET** (SSE stream)
- `/message` **POST** (JSONRPC messages)
- **Streamable HTTP**:
- Supports multiple client connections.
- Client transports are mapped to `sessionId`;
- Calls `clientConnect(sessionId)` upon connection.
- Exposes `/mcp` for
- **POST** (JSONRPC messages)
- **GET** (SSE stream)
- **DELETE** (termination)
- Uses an event store for resumability and stores transports by `sessionId`.
- Calls `cleanup(sessionId)` on **DELETE**.
## 3. The Server Factory
- Invoke `createServer()` from `server/index.ts`
- Creates a new `McpServer` instance with
- **Capabilities**:
- `tools: {}`
- `logging: {}`
- `prompts: {}`
- `resources: { subscribe: true }`
- **Server Instructions**
- Loaded from the docs folder (`server-instructions.md`).
- **Registrations**
- Registers **tools** via `registerTools(server)`.
- Registers **resources** via `registerResources(server)`.
- Registers **prompts** via `registerPrompts(server)`.
- **Other Request Handlers**
- Sets up resource subscription handlers via `setSubscriptionHandlers(server)`.
- Roots list change handler is added post-connection via
- **Returns**
- The `McpServer` instance
- A `clientConnect(sessionId)` callback that enables post-connection setup
- A `cleanup(sessionId?)` callback that stops any active intervals and removes any sessionscoped state
## Enabling Multiple Clients
Some of the transport managers defined in the `transports` folder can support multiple clients.
In order to do so, they must map certain data to a session identifier.

View File

@@ -0,0 +1,182 @@
# Everything Server - Project Structure
**[Architecture](architecture.md)
| Project Structure
| [Startup Process](startup.md)
| [Server Features](features.md)
| [Extension Points](extension.md)
| [How It Works](how-it-works.md)**
```
src/everything
├── index.ts
├── AGENTS.md
├── package.json
├── docs
│ ├── architecture.md
│ ├── extension.md
│ ├── features.md
│ ├── how-it-works.md
│ ├── instructions.md
│ ├── startup.md
│ └── structure.md
├── prompts
│ ├── index.ts
│ ├── args.ts
│ ├── completions.ts
│ ├── simple.ts
│ └── resource.ts
├── resources
│ ├── index.ts
│ ├── files.ts
│ ├── session.ts
│ ├── subscriptions.ts
│ └── templates.ts
├── server
│ ├── index.ts
│ ├── logging.ts
│ └── roots.ts
├── tools
│ ├── index.ts
│ ├── echo.ts
│ ├── get-annotated-message.ts
│ ├── get-env.ts
│ ├── get-resource-links.ts
│ ├── get-resource-reference.ts
│ ├── get-roots-list.ts
│ ├── get-structured-content.ts
│ ├── get-sum.ts
│ ├── get-tiny-image.ts
│ ├── gzip-file-as-resource.ts
│ ├── toggle-simulated-logging.ts
│ ├── toggle-subscriber-updates.ts
│ ├── trigger-elicitation-request.ts
│ ├── trigger-long-running-operation.ts
│ └── trigger-sampling-request.ts
└── transports
├── sse.ts
├── stdio.ts
└── streamableHttp.ts
```
# Project Contents
## `src/everything`:
### `index.ts`
- CLI entry point that selects and runs a specific transport module based on the first CLI argument: `stdio`, `sse`, or `streamableHttp`.
### `AGENTS.md`
- Directions for Agents/LLMs explaining coding guidelines and how to appropriately extend the server.
### `package.json`
- Package metadata and scripts:
- `build`: TypeScript compile to `dist/`, copies `docs/` into `dist/` and marks the compiled entry scripts as executable.
- `start:stdio`, `start:sse`, `start:streamableHttp`: Run built transports from `dist/`.
- Declares dependencies on `@modelcontextprotocol/sdk`, `express`, `cors`, `zod`, etc.
### `docs/`
- `architecture.md`
- This document.
- `server-instructions.md`
- Humanreadable instructions intended to be passed to the client/LLM as for guidance on server use. Loaded by the server at startup and returned in the "initialize" exchange.
### `prompts/`
- `index.ts`
- `registerPrompts(server)` orchestrator; delegates to prompt factory/registration methods from in individual prompt files.
- `simple.ts`
- Registers `simple-prompt`: a prompt with no arguments that returns a single user message.
- `args.ts`
- Registers `args-prompt`: a prompt with two arguments (`city` required, `state` optional) used to compose a message.
- `completions.ts`
- Registers `completable-prompt`: a prompt whose arguments support server-driven completions using the SDKs `completable(...)` helper (e.g., completing `department` and context-aware `name`).
- `resource.ts`
- Exposes `registerEmbeddedResourcePrompt(server)` which registers `resource-prompt` — a prompt that accepts `resourceType` ("Text" or "Blob") and `resourceId` (integer), and embeds a dynamically generated resource of the requested type within the returned messages. Internally reuses helpers from `resources/templates.ts`.
### `resources/`
- `index.ts`
- `registerResources(server)` orchestrator; delegates to resource factory/registration methods from individual resource files.
- `templates.ts`
- Registers two dynamic, templatedriven resources using `ResourceTemplate`:
- Text: `demo://resource/dynamic/text/{index}` (MIME: `text/plain`)
- Blob: `demo://resource/dynamic/blob/{index}` (MIME: `application/octet-stream`, Base64 payload)
- The `{index}` path variable must be a finite positive integer. Content is generated on demand with a timestamp.
- Exposes helpers `textResource(uri, index)`, `textResourceUri(index)`, `blobResource(uri, index)`, and `blobResourceUri(index)` so other modules can construct and embed dynamic resources directly (e.g., from prompts).
- `files.ts`
- Registers static file-based resources for each file in the `docs/` folder.
- URIs follow the pattern: `demo://resource/static/document/<filename>`.
- Serves markdown files as `text/markdown`, `.txt` as `text/plain`, `.json` as `application/json`, others default to `text/plain`.
### `server/`
- `index.ts`
- Server factory that creates an `McpServer` with declared capabilities, loads server instructions, and registers tools, prompts, and resources.
- Sets resource subscription handlers via `setSubscriptionHandlers(server)`.
- Exposes `{ server, cleanup }` to the chosen transport. Cleanup stops any running intervals in the server when the transport disconnects.
- `logging.ts`
- Implements simulated logging. Periodically sends randomized log messages at various levels to the connected client session. Started/stopped on demand via a dedicated tool.
### `tools/`
- `index.ts`
- `registerTools(server)` orchestrator; delegates to tool factory/registration methods in individual tool files.
- `echo.ts`
- Registers an `echo` tool that takes a message and returns `Echo: {message}`.
- `get-annotated-message.ts`
- Registers an `annotated-message` tool which demonstrates annotated content items by emitting a primary `text` message with `annotations` that vary by `messageType` (`"error" | "success" | "debug"`), and optionally includes an annotated `image` (tiny PNG) when `includeImage` is true.
- `get-env.ts`
- Registers a `get-env` tool that returns the current process environment variables as formatted JSON text; useful for debugging configuration.
- `get-resource-links.ts`
- Registers a `get-resource-links` tool that returns an intro `text` block followed by multiple `resource_link` items.
- `get-resource-reference.ts`
- Registers a `get-resource-reference` tool that returns a reference for a selected dynamic resource.
- `get-roots-list.ts`
- Registers a `get-roots-list` tool that returns the last list of roots sent by the client.
- `gzip-file-as-resource.ts`
- Registers a `gzip-file-as-resource` tool that fetches content from a URL or data URI, compresses it, and then either:
- returns a `resource_link` to a session-scoped resource (default), or
- returns an inline `resource` with the gzipped data. The resource will be still discoverable for the duration of the session via `resources/list`.
- Uses `resources/session.ts` to register the gzipped blob as a per-session resource at a URI like `demo://resource/session/<name>` with `mimeType: application/gzip`.
- Environment controls:
- `GZIP_MAX_FETCH_SIZE` (bytes, default 10 MiB)
- `GZIP_MAX_FETCH_TIME_MILLIS` (ms, default 30000)
- `GZIP_ALLOWED_DOMAINS` (comma-separated allowlist; empty means all domains allowed)
- `trigger-elicitation-request.ts`
- Registers a `trigger-elicitation-request` tool that sends an `elicitation/create` request to the client/LLM and returns the elicitation result.
- `trigger-sampling-request.ts`
- Registers a `trigger-sampling-request` tool that sends a `sampling/createMessage` request to the client/LLM and returns the sampling result.
- `get-structured-content.ts`
- Registers a `get-structured-content` tool that demonstrates structuredContent block responses.
- `get-sum.ts`
- Registers an `get-sum` tool with a Zod input schema that sums two numbers `a` and `b` and returns the result.
- `get-tiny-image.ts`
- Registers a `get-tiny-image` tool, which returns a tiny PNG MCP logo as an `image` content item, along with surrounding descriptive `text` items.
- `trigger-long-running-operation.ts`
- Registers a `long-running-operation` tool that simulates a long-running task over a specified `duration` (seconds) and number of `steps`; emits `notifications/progress` updates when the client supplies a `progressToken`.
- `toggle-simulated-logging.ts`
- Registers a `toggle-simulated-logging` tool, which starts or stops simulated logging for the invoking session.
- `toggle-subscriber-updates.ts`
- Registers a `toggle-subscriber-updates` tool, which starts or stops simulated resource subscription update checks for the invoking session.
### `transports/`
- `stdio.ts`
- Starts a `StdioServerTransport`, created the server via `createServer()`, and connects it.
- Handles `SIGINT` to close cleanly and calls `cleanup()` to remove any live intervals.
- `sse.ts`
- Express server exposing:
- `GET /sse` to establish an SSE connection per session.
- `POST /message` for client messages.
- Manages multiple connected clients via a transport map.
- Starts an `SSEServerTransport`, created the server via `createServer()`, and connects it to a new transport.
- On server disconnect, calls `cleanup()` to remove any live intervals.
- `streamableHttp.ts`
- Express server exposing a single `/mcp` endpoint for POST (JSONRPC), GET (SSE stream), and DELETE (session termination) using `StreamableHTTPServerTransport`.
- Uses an `InMemoryEventStore` for resumable sessions and tracks transports by `sessionId`.
- Connects a fresh server instance on initialization POST and reuses the transport for subsequent requests.

File diff suppressed because one or more lines are too long

View File

@@ -2,36 +2,41 @@
// Parse command line arguments first
const args = process.argv.slice(2);
const scriptName = args[0] || 'stdio';
const scriptName = args[0] || "stdio";
async function run() {
try {
// Dynamically import only the requested module to prevent all modules from initializing
switch (scriptName) {
case 'stdio':
// Import and run the default server
await import('./stdio.js');
break;
case 'sse':
// Import and run the SSE server
await import('./sse.js');
break;
case 'streamableHttp':
// Import and run the streamable HTTP server
await import('./streamableHttp.js');
break;
default:
console.error(`Unknown script: ${scriptName}`);
console.log('Available scripts:');
console.log('- stdio');
console.log('- sse');
console.log('- streamableHttp');
process.exit(1);
}
} catch (error) {
console.error('Error running script:', error);
try {
// Dynamically import only the requested module to prevent all modules from initializing
switch (scriptName) {
case "stdio":
// Import and run the default server
await import("./transports/stdio.js");
break;
case "sse":
// Import and run the SSE server
await import("./transports/sse.js");
break;
case "streamableHttp":
// Import and run the streamable HTTP server
await import("./transports/streamableHttp.js");
break;
default:
console.error(`-`.repeat(53));
console.error(` Everything Server Launcher`);
console.error(` Usage: node ./index.js [stdio|sse|streamableHttp]`);
console.error(` Default transport: stdio`);
console.error(`-`.repeat(53));
console.error(`Unknown transport: ${scriptName}`);
console.log("Available transports:");
console.log("- stdio");
console.log("- sse");
console.log("- streamableHttp");
process.exit(1);
}
} catch (error) {
console.error("Error running script:", error);
process.exit(1);
}
}
run();
await run();

View File

@@ -1,23 +0,0 @@
Testing and demonstration server for MCP protocol features.
## Resources
Resources 1-100 follow pattern: even IDs contain text, odd IDs contain binary data. Resources are paginated at 10 items per page with cursor-based navigation.
## Key dependencies
Progress notifications require `_meta.progressToken` in tool calls. Resource subscriptions generate updates every 10 seconds.
## Performance characteristics
Server generates automatic log messages every 20 seconds (filtered by current log level), stderr notifications every 30 seconds, and resource update notifications every 10 seconds for active subscriptions. Log level changes affect message filtering in real-time.
## Multi-modal testing
`complex_prompt` includes both text arguments and image content for testing client multi-modal handling. `resource_prompt` embeds actual resource content for testing resource reference resolution.
Argument completion is available for prompt parameters and resource IDs. Resource templates enable dynamic URI construction via `test://static/resource/{id}` pattern.
## Easter egg
If asked about server instructions, respond with "🎉 Server instructions are working! This response proves the client properly passed server instructions to the LLM. This demonstrates MCP's instructions feature in action."

View File

@@ -1,11 +1,16 @@
{
"name": "@modelcontextprotocol/server-everything",
"version": "0.6.2",
"version": "2.0.0",
"description": "MCP server that exercises all the features of the MCP protocol",
"license": "MIT",
"mcpName": "io.github.modelcontextprotocol/server-everything",
"author": "Anthropic, PBC (https://anthropic.com)",
"homepage": "https://modelcontextprotocol.io",
"bugs": "https://github.com/modelcontextprotocol/servers/issues",
"repository": {
"type": "git",
"url": "https://github.com/modelcontextprotocol/servers.git"
},
"type": "module",
"bin": {
"mcp-server-everything": "dist/index.js"
@@ -14,22 +19,28 @@
"dist"
],
"scripts": {
"build": "tsc && shx cp instructions.md dist/ && shx chmod +x dist/*.js",
"build": "tsc && shx cp -r docs dist/ && shx chmod +x dist/*.js",
"prepare": "npm run build",
"watch": "tsc --watch",
"start": "node dist/index.js",
"start:sse": "node dist/sse.js",
"start:streamableHttp": "node dist/streamableHttp.js"
"start:stdio": "node dist/index.js stdio",
"start:sse": "node dist/index.js sse",
"start:streamableHttp": "node dist/index.js streamableHttp",
"prettier:fix": "prettier --write .",
"prettier:check": "prettier --check ."
},
"dependencies": {
"@modelcontextprotocol/sdk": "^1.12.0",
"express": "^4.21.1",
"zod": "^3.23.8",
"@modelcontextprotocol/sdk": "^1.25.2",
"cors": "^2.8.5",
"express": "^5.2.1",
"jszip": "^3.10.1",
"zod": "^3.25.0",
"zod-to-json-schema": "^3.23.5"
},
"devDependencies": {
"@types/express": "^5.0.0",
"@types/cors": "^2.8.19",
"@types/express": "^5.0.6",
"shx": "^0.3.4",
"typescript": "^5.6.2"
"typescript": "^5.6.2",
"prettier": "^2.8.8"
}
}

View File

@@ -0,0 +1,41 @@
import { z } from "zod";
import { McpServer } from "@modelcontextprotocol/sdk/server/mcp.js";
/**
* Register a prompt with arguments
* - Two arguments, one required and one optional
* - Combines argument values in the returned prompt
*
* @param server
*/
export const registerArgumentsPrompt = (server: McpServer) => {
// Prompt arguments
const promptArgsSchema = {
city: z.string().describe("Name of the city"),
state: z.string().describe("Name of the state").optional(),
};
// Register the prompt
server.registerPrompt(
"args-prompt",
{
title: "Arguments Prompt",
description: "A prompt with two arguments, one required and one optional",
argsSchema: promptArgsSchema,
},
(args) => {
const location = `${args?.city}${args?.state ? `, ${args?.state}` : ""}`;
return {
messages: [
{
role: "user",
content: {
type: "text",
text: `What's weather in ${location}?`,
},
},
],
};
}
);
};

View File

@@ -0,0 +1,64 @@
import { z } from "zod";
import { McpServer } from "@modelcontextprotocol/sdk/server/mcp.js";
import { completable } from "@modelcontextprotocol/sdk/server/completable.js";
/**
* Register a prompt with completable arguments
* - Two required arguments, both with completion handlers
* - First argument value will be included in context for second argument
* - Allows second argument to depend on the first argument value
*
* @param server
*/
export const registerPromptWithCompletions = (server: McpServer) => {
// Prompt arguments
const promptArgsSchema = {
department: completable(
z.string().describe("Choose the department."),
(value) => {
return ["Engineering", "Sales", "Marketing", "Support"].filter((d) =>
d.startsWith(value)
);
}
),
name: completable(
z
.string()
.describe("Choose a team member to lead the selected department."),
(value, context) => {
const department = context?.arguments?.["department"];
if (department === "Engineering") {
return ["Alice", "Bob", "Charlie"].filter((n) => n.startsWith(value));
} else if (department === "Sales") {
return ["David", "Eve", "Frank"].filter((n) => n.startsWith(value));
} else if (department === "Marketing") {
return ["Grace", "Henry", "Iris"].filter((n) => n.startsWith(value));
} else if (department === "Support") {
return ["John", "Kim", "Lee"].filter((n) => n.startsWith(value));
}
return [];
}
),
};
// Register the prompt
server.registerPrompt(
"completable-prompt",
{
title: "Team Management",
description: "First argument choice narrows values for second argument.",
argsSchema: promptArgsSchema,
},
({ department, name }) => ({
messages: [
{
role: "user",
content: {
type: "text",
text: `Please promote ${name} to the head of the ${department} team.`,
},
},
],
})
);
};

View File

@@ -0,0 +1,17 @@
import { McpServer } from "@modelcontextprotocol/sdk/server/mcp.js";
import { registerSimplePrompt } from "./simple.js";
import { registerArgumentsPrompt } from "./args.js";
import { registerPromptWithCompletions } from "./completions.js";
import { registerEmbeddedResourcePrompt } from "./resource.js";
/**
* Register the prompts with the MCP server.
*
* @param server
*/
export const registerPrompts = (server: McpServer) => {
registerSimplePrompt(server);
registerArgumentsPrompt(server);
registerPromptWithCompletions(server);
registerEmbeddedResourcePrompt(server);
};

View File

@@ -0,0 +1,93 @@
import { McpServer } from "@modelcontextprotocol/sdk/server/mcp.js";
import {
resourceTypeCompleter,
resourceIdForPromptCompleter,
} from "../resources/templates.js";
import {
textResource,
textResourceUri,
blobResourceUri,
blobResource,
RESOURCE_TYPE_BLOB,
RESOURCE_TYPE_TEXT,
RESOURCE_TYPES,
} from "../resources/templates.js";
/**
* Register a prompt with an embedded resource reference
* - Takes a resource type and id
* - Returns the corresponding dynamically created resource
*
* @param server
*/
export const registerEmbeddedResourcePrompt = (server: McpServer) => {
// Prompt arguments
const promptArgsSchema = {
resourceType: resourceTypeCompleter,
resourceId: resourceIdForPromptCompleter,
};
// Register the prompt
server.registerPrompt(
"resource-prompt",
{
title: "Resource Prompt",
description: "A prompt that includes an embedded resource reference",
argsSchema: promptArgsSchema,
},
(args) => {
// Validate resource type argument
const resourceType = args.resourceType;
if (
!RESOURCE_TYPES.includes(
resourceType as typeof RESOURCE_TYPE_TEXT | typeof RESOURCE_TYPE_BLOB
)
) {
throw new Error(
`Invalid resourceType: ${args?.resourceType}. Must be ${RESOURCE_TYPE_TEXT} or ${RESOURCE_TYPE_BLOB}.`
);
}
// Validate resourceId argument
const resourceId = Number(args?.resourceId);
if (
!Number.isFinite(resourceId) ||
!Number.isInteger(resourceId) ||
resourceId < 1
) {
throw new Error(
`Invalid resourceId: ${args?.resourceId}. Must be a finite positive integer.`
);
}
// Get resource based on the resource type
const uri =
resourceType === RESOURCE_TYPE_TEXT
? textResourceUri(resourceId)
: blobResourceUri(resourceId);
const resource =
resourceType === RESOURCE_TYPE_TEXT
? textResource(uri, resourceId)
: blobResource(uri, resourceId);
return {
messages: [
{
role: "user",
content: {
type: "text",
text: `This prompt includes the ${resourceType} resource with id: ${resourceId}. Please analyze the following resource:`,
},
},
{
role: "user",
content: {
type: "resource",
resource: resource,
},
},
],
};
}
);
};

View File

@@ -0,0 +1,29 @@
import { McpServer } from "@modelcontextprotocol/sdk/server/mcp.js";
/**
* Register a simple prompt with no arguments
* - Returns the fixed text of the prompt with no modifications
*
* @param server
*/
export const registerSimplePrompt = (server: McpServer) => {
// Register the prompt
server.registerPrompt(
"simple-prompt",
{
title: "Simple Prompt",
description: "A prompt with no arguments",
},
() => ({
messages: [
{
role: "user",
content: {
type: "text",
text: "This is a simple prompt without arguments.",
},
},
],
})
);
};

View File

@@ -0,0 +1,89 @@
import { McpServer } from "@modelcontextprotocol/sdk/server/mcp.js";
import { dirname, join } from "path";
import { fileURLToPath } from "url";
import { readdirSync, readFileSync, statSync } from "fs";
/**
* Register static file resources
* - Each file in src/everything/docs is exposed as an individual static resource
* - URIs follow the pattern: "demo://static/docs/<filename>"
* - Markdown (.md) files are served as mime type "text/markdown"
* - Text (.txt) files are served as mime type "text/plain"
* - JSON (.json) files are served as mime type "application/json"
*
* @param server
*/
export const registerFileResources = (server: McpServer) => {
// Read the entries in the docs directory
const __filename = fileURLToPath(import.meta.url);
const __dirname = dirname(__filename);
const docsDir = join(__dirname, "..", "docs");
let entries: string[] = [];
try {
entries = readdirSync(docsDir);
} catch (e) {
// If docs/ folder is missing or unreadable, just skip registration
return;
}
// Register each file as a static resource
for (const name of entries) {
// Only process files, not directories
const fullPath = join(docsDir, name);
try {
const st = statSync(fullPath);
if (!st.isFile()) continue;
} catch {
continue;
}
// Prepare file resource info
const uri = `demo://resource/static/document/${encodeURIComponent(name)}`;
const mimeType = getMimeType(name);
const description = `Static document file exposed from /docs: ${name}`;
// Register file resource
server.registerResource(
name,
uri,
{ mimeType, description },
async (uri) => {
const text = readFileSafe(fullPath);
return {
contents: [
{
uri: uri.toString(),
mimeType,
text,
},
],
};
}
);
}
};
/**
* Get the mimetype based on filename
* @param fileName
*/
function getMimeType(fileName: string): string {
const lower = fileName.toLowerCase();
if (lower.endsWith(".md") || lower.endsWith(".markdown"))
return "text/markdown";
if (lower.endsWith(".txt")) return "text/plain";
if (lower.endsWith(".json")) return "application/json";
return "text/plain";
}
/**
* Read a file or return an error message if it fails
* @param path
*/
function readFileSafe(path: string): string {
try {
return readFileSync(path, "utf-8");
} catch (e) {
return `Error reading file: ${path}. ${e}`;
}
}

View File

@@ -0,0 +1,36 @@
import { McpServer } from "@modelcontextprotocol/sdk/server/mcp.js";
import { registerResourceTemplates } from "./templates.js";
import { registerFileResources } from "./files.js";
import { fileURLToPath } from "url";
import { dirname, join } from "path";
import { readFileSync } from "fs";
/**
* Register the resources with the MCP server.
* @param server
*/
export const registerResources = (server: McpServer) => {
registerResourceTemplates(server);
registerFileResources(server);
};
/**
* Reads the server instructions from the corresponding markdown file.
* Attempts to load the content of the file located in the `docs` directory.
* If the file cannot be loaded, an error message is returned instead.
*
* @return {string} The content of the server instructions file, or an error message if reading fails.
*/
export function readInstructions(): string {
const __filename = fileURLToPath(import.meta.url);
const __dirname = dirname(__filename);
const filePath = join(__dirname, "..", "docs", "instructions.md");
let instructions;
try {
instructions = readFileSync(filePath, "utf-8");
} catch (e) {
instructions = "Server instructions not loaded: " + e;
}
return instructions;
}

View File

@@ -0,0 +1,63 @@
import { McpServer } from "@modelcontextprotocol/sdk/server/mcp.js";
import { Resource, ResourceLink } from "@modelcontextprotocol/sdk/types.js";
/**
* Generates a session-scoped resource URI string based on the provided resource name.
*
* @param {string} name - The name of the resource to create a URI for.
* @returns {string} The formatted session resource URI.
*/
export const getSessionResourceURI = (name: string): string => {
return `demo://resource/session/${name}`;
};
/**
* Registers a session-scoped resource with the provided server and returns a resource link.
*
* The registered resource is available during the life of the session only; it is not otherwise persisted.
*
* @param {McpServer} server - The server instance responsible for handling the resource registration.
* @param {Resource} resource - The resource object containing metadata such as URI, name, description, and mimeType.
* @param {"text"|"blob"} type
* @param payload
* @returns {ResourceLink} An object representing the resource link, with associated metadata.
*/
export const registerSessionResource = (
server: McpServer,
resource: Resource,
type: "text" | "blob",
payload: string
): ResourceLink => {
// Destructure resource
const { uri, name, mimeType, description, title, annotations, icons, _meta } =
resource;
// Prepare the resource content to return
// See https://modelcontextprotocol.io/specification/2025-11-25/server/resources#resource-contents
const resourceContent =
type === "text"
? {
uri: uri.toString(),
mimeType,
text: payload,
}
: {
uri: uri.toString(),
mimeType,
blob: payload,
};
// Register file resource
server.registerResource(
name,
uri,
{ mimeType, description, title, annotations, icons, _meta },
async (uri) => {
return {
contents: [resourceContent],
};
}
);
return { type: "resource_link", ...resource };
};

View File

@@ -0,0 +1,171 @@
import { McpServer } from "@modelcontextprotocol/sdk/server/mcp.js";
import {
SubscribeRequestSchema,
UnsubscribeRequestSchema,
} from "@modelcontextprotocol/sdk/types.js";
// Track subscriber session id lists by URI
const subscriptions: Map<string, Set<string | undefined>> = new Map<
string,
Set<string | undefined>
>();
// Interval to send notifications to subscribers
const subsUpdateIntervals: Map<string | undefined, NodeJS.Timeout | undefined> =
new Map<string | undefined, NodeJS.Timeout | undefined>();
/**
* Sets up the subscription and unsubscription handlers for the provided server.
*
* The function defines two request handlers:
* 1. A `Subscribe` handler that allows clients to subscribe to specific resource URIs.
* 2. An `Unsubscribe` handler that allows clients to unsubscribe from specific resource URIs.
*
* The `Subscribe` handler performs the following actions:
* - Extracts the URI and session ID from the request.
* - Logs a message acknowledging the subscription request.
* - Updates the internal tracking of subscribers for the given URI.
*
* The `Unsubscribe` handler performs the following actions:
* - Extracts the URI and session ID from the request.
* - Logs a message acknowledging the unsubscription request.
* - Removes the subscriber for the specified URI.
*
* @param {McpServer} server - The server instance to which subscription handlers will be attached.
*/
export const setSubscriptionHandlers = (server: McpServer) => {
// Set the subscription handler
server.server.setRequestHandler(
SubscribeRequestSchema,
async (request, extra) => {
// Get the URI to subscribe to
const { uri } = request.params;
// Get the session id (can be undefined for stdio)
const sessionId = extra.sessionId as string;
// Acknowledge the subscribe request
await server.sendLoggingMessage(
{
level: "info",
data: `Received Subscribe Resource request for URI: ${uri} ${
sessionId ? `from session ${sessionId}` : ""
}`,
},
sessionId
);
// Get the subscribers for this URI
const subscribers = subscriptions.has(uri)
? (subscriptions.get(uri) as Set<string>)
: new Set<string>();
subscribers.add(sessionId);
subscriptions.set(uri, subscribers);
return {};
}
);
// Set the unsubscription handler
server.server.setRequestHandler(
UnsubscribeRequestSchema,
async (request, extra) => {
// Get the URI to subscribe to
const { uri } = request.params;
// Get the session id (can be undefined for stdio)
const sessionId = extra.sessionId as string;
// Acknowledge the subscribe request
await server.sendLoggingMessage(
{
level: "info",
data: `Received Unsubscribe Resource request: ${uri} ${
sessionId ? `from session ${sessionId}` : ""
}`,
},
sessionId
);
// Remove the subscriber
if (subscriptions.has(uri)) {
const subscribers = subscriptions.get(uri) as Set<string>;
if (subscribers.has(sessionId)) subscribers.delete(sessionId);
}
return {};
}
);
};
/**
* Sends simulated resource update notifications to the subscribed client.
*
* This function iterates through all resource URIs stored in the subscriptions
* and checks if the specified session ID is subscribed to them. If so, it sends
* a notification through the provided server. If the session ID is no longer valid
* (disconnected), it removes the session ID from the list of subscribers.
*
* @param {McpServer} server - The server instance used to send notifications.
* @param {string | undefined} sessionId - The session ID of the client to check for subscriptions.
* @returns {Promise<void>} Resolves once all applicable notifications are sent.
*/
const sendSimulatedResourceUpdates = async (
server: McpServer,
sessionId: string | undefined
): Promise<void> => {
// Search all URIs for ones this client is subscribed to
for (const uri of subscriptions.keys()) {
const subscribers = subscriptions.get(uri) as Set<string | undefined>;
// If this client is subscribed, send the notification
if (subscribers.has(sessionId)) {
await server.server.notification({
method: "notifications/resources/updated",
params: { uri },
});
} else {
subscribers.delete(sessionId); // subscriber has disconnected
}
}
};
/**
* Starts the process of simulating resource updates and sending server notifications
* to the client for the resources they are subscribed to. If the update interval is
* already active, invoking this function will not start another interval.
*
* @param server
* @param sessionId
*/
export const beginSimulatedResourceUpdates = (
server: McpServer,
sessionId: string | undefined
) => {
if (!subsUpdateIntervals.has(sessionId)) {
// Send once immediately
sendSimulatedResourceUpdates(server, sessionId);
// Set the interval to send later resource update notifications to this client
subsUpdateIntervals.set(
sessionId,
setInterval(() => sendSimulatedResourceUpdates(server, sessionId), 5000)
);
}
};
/**
* Stops simulated resource updates for a given session.
*
* This function halts any active intervals associated with the provided session ID
* and removes the session's corresponding entries from resource management collections.
* Session ID can be undefined for stdio.
*
* @param {string} [sessionId]
*/
export const stopSimulatedResourceUpdates = (sessionId?: string) => {
// Remove active intervals
if (subsUpdateIntervals.has(sessionId)) {
const subsUpdateInterval = subsUpdateIntervals.get(sessionId);
clearInterval(subsUpdateInterval);
subsUpdateIntervals.delete(sessionId);
}
};

View File

@@ -0,0 +1,211 @@
import { z } from "zod";
import {
CompleteResourceTemplateCallback,
McpServer,
ResourceTemplate,
} from "@modelcontextprotocol/sdk/server/mcp.js";
import { completable } from "@modelcontextprotocol/sdk/server/completable.js";
// Resource types
export const RESOURCE_TYPE_TEXT = "Text" as const;
export const RESOURCE_TYPE_BLOB = "Blob" as const;
export const RESOURCE_TYPES: string[] = [
RESOURCE_TYPE_TEXT,
RESOURCE_TYPE_BLOB,
];
/**
* A completer function for resource types.
*
* This variable provides functionality to perform autocompletion for the resource types based on user input.
* It uses a schema description to validate the input and filters through a predefined list of resource types
* to return suggestions that start with the given input.
*
* The input value is expected to be a string representing the type of resource to fetch.
* The completion logic matches the input against available resource types.
*/
export const resourceTypeCompleter = completable(
z.string().describe("Type of resource to fetch"),
(value: string) => {
return RESOURCE_TYPES.filter((t) => t.startsWith(value));
}
);
/**
* A completer function for resource IDs as strings.
*
* The `resourceIdCompleter` accepts a string input representing the ID of a text resource
* and validates whether the provided value corresponds to an integer resource ID.
*
* NOTE: Currently, prompt arguments can only be strings since type is not field of `PromptArgument`
* Consequently, we must define it as a string and convert the argument to number before using it
* https://modelcontextprotocol.io/specification/2025-11-25/schema#promptargument
*
* If the value is a valid integer, it returns the value within an array.
* Otherwise, it returns an empty array.
*
* The input string is first transformed into a number and checked to ensure it is an integer.
* This helps validate and suggest appropriate resource IDs.
*/
export const resourceIdForPromptCompleter = completable(
z.string().describe("ID of the text resource to fetch"),
(value: string) => {
const resourceId = Number(value);
return Number.isInteger(resourceId) && resourceId > 0 ? [value] : [];
}
);
/**
* A callback function that acts as a completer for resource ID values, validating and returning
* the input value as part of a resource template.
*
* @typedef {CompleteResourceTemplateCallback}
* @param {string} value - The input string value to be evaluated as a resource ID.
* @returns {string[]} Returns an array containing the input value if it represents a positive
* integer resource ID, otherwise returns an empty array.
*/
export const resourceIdForResourceTemplateCompleter: CompleteResourceTemplateCallback =
(value: string) => {
const resourceId = Number(value);
return Number.isInteger(resourceId) && resourceId > 0 ? [value] : [];
};
const uriBase: string = "demo://resource/dynamic";
const textUriBase: string = `${uriBase}/text`;
const blobUriBase: string = `${uriBase}/blob`;
const textUriTemplate: string = `${textUriBase}/{resourceId}`;
const blobUriTemplate: string = `${blobUriBase}/{resourceId}`;
/**
* Create a dynamic text resource
* - Exposed for use by embedded resource prompt example
* @param uri
* @param resourceId
*/
export const textResource = (uri: URL, resourceId: number) => {
const timestamp = new Date().toLocaleTimeString();
return {
uri: uri.toString(),
mimeType: "text/plain",
text: `Resource ${resourceId}: This is a plaintext resource created at ${timestamp}`,
};
};
/**
* Create a dynamic blob resource
* - Exposed for use by embedded resource prompt example
* @param uri
* @param resourceId
*/
export const blobResource = (uri: URL, resourceId: number) => {
const timestamp = new Date().toLocaleTimeString();
const resourceText = Buffer.from(
`Resource ${resourceId}: This is a base64 blob created at ${timestamp}`
).toString("base64");
return {
uri: uri.toString(),
mimeType: "text/plain",
blob: resourceText,
};
};
/**
* Create a dynamic text resource URI
* - Exposed for use by embedded resource prompt example
* @param resourceId
*/
export const textResourceUri = (resourceId: number) =>
new URL(`${textUriBase}/${resourceId}`);
/**
* Create a dynamic blob resource URI
* - Exposed for use by embedded resource prompt example
* @param resourceId
*/
export const blobResourceUri = (resourceId: number) =>
new URL(`${blobUriBase}/${resourceId}`);
/**
* Parses the resource identifier from the provided URI and validates it
* against the given variables. Throws an error if the URI corresponds
* to an unknown resource or if the resource identifier is invalid.
*
* @param {URL} uri - The URI of the resource to be parsed.
* @param {Record<string, unknown>} variables - A record containing context-specific variables that include the resourceId.
* @returns {number} The parsed and validated resource identifier as an integer.
* @throws {Error} Throws an error if the URI matches unsupported base URIs or if the resourceId is invalid.
*/
const parseResourceId = (uri: URL, variables: Record<string, unknown>) => {
const uriError = `Unknown resource: ${uri.toString()}`;
if (
uri.toString().startsWith(textUriBase) &&
uri.toString().startsWith(blobUriBase)
) {
throw new Error(uriError);
} else {
const idxStr = String((variables as any).resourceId ?? "");
const idx = Number(idxStr);
if (Number.isFinite(idx) && Number.isInteger(idx) && idx > 0) {
return idx;
} else {
throw new Error(uriError);
}
}
};
/**
* Register resource templates with the MCP server.
* - Text and blob resources, dynamically generated from the URI {resourceId} variable
* - Any finite positive integer is acceptable for the resourceId variable
* - List resources method will not return these resources
* - These are only accessible via template URIs
* - Both blob and text resources:
* - have content that is dynamically generated, including a timestamp
* - have different template URIs
* - Blob: "demo://resource/dynamic/blob/{resourceId}"
* - Text: "demo://resource/dynamic/text/{resourceId}"
*
* @param server
*/
export const registerResourceTemplates = (server: McpServer) => {
// Register the text resource template
server.registerResource(
"Dynamic Text Resource",
new ResourceTemplate(textUriTemplate, {
list: undefined,
complete: { resourceId: resourceIdForResourceTemplateCompleter },
}),
{
mimeType: "text/plain",
description:
"Plaintext dynamic resource fabricated from the {resourceId} variable, which must be an integer.",
},
async (uri, variables) => {
const resourceId = parseResourceId(uri, variables);
return {
contents: [textResource(uri, resourceId)],
};
}
);
// Register the blob resource template
server.registerResource(
"Dynamic Blob Resource",
new ResourceTemplate(blobUriTemplate, {
list: undefined,
complete: { resourceId: resourceIdForResourceTemplateCompleter },
}),
{
mimeType: "application/octet-stream",
description:
"Binary (base64) dynamic resource fabricated from the {resourceId} variable, which must be an integer.",
},
async (uri, variables) => {
const resourceId = parseResourceId(uri, variables);
return {
contents: [blobResource(uri, resourceId)],
};
}
);
};

View File

@@ -0,0 +1,118 @@
import { McpServer } from "@modelcontextprotocol/sdk/server/mcp.js";
import {
InMemoryTaskStore,
InMemoryTaskMessageQueue,
} from "@modelcontextprotocol/sdk/experimental/tasks";
import {
setSubscriptionHandlers,
stopSimulatedResourceUpdates,
} from "../resources/subscriptions.js";
import { registerConditionalTools, registerTools } from "../tools/index.js";
import { registerResources, readInstructions } from "../resources/index.js";
import { registerPrompts } from "../prompts/index.js";
import { stopSimulatedLogging } from "./logging.js";
import { syncRoots } from "./roots.js";
// Server Factory response
export type ServerFactoryResponse = {
server: McpServer;
cleanup: (sessionId?: string) => void;
};
/**
* Server Factory
*
* This function initializes a `McpServer` with specific capabilities and instructions,
* registers tools, resources, and prompts, and configures resource subscription handlers.
*
* @returns {ServerFactoryResponse} An object containing the server instance, and a `cleanup`
* function for handling server-side cleanup when a session ends.
*
* Properties of the returned object:
* - `server` {Object}: The initialized server instance.
* - `cleanup` {Function}: Function to perform cleanup operations for a closing session.
*/
export const createServer: () => ServerFactoryResponse = () => {
// Read the server instructions
const instructions = readInstructions();
// Create task store and message queue for task support
const taskStore = new InMemoryTaskStore();
const taskMessageQueue = new InMemoryTaskMessageQueue();
let initializeTimeout: NodeJS.Timeout | null = null;
// Create the server
const server = new McpServer(
{
name: "mcp-servers/everything",
title: "Everything Reference Server",
version: "2.0.0",
},
{
capabilities: {
tools: {
listChanged: true,
},
prompts: {
listChanged: true,
},
resources: {
subscribe: true,
listChanged: true,
},
logging: {},
tasks: {
list: {},
cancel: {},
requests: {
tools: {
call: {},
},
},
},
},
instructions,
taskStore,
taskMessageQueue,
}
);
// Register the tools
registerTools(server);
// Register the resources
registerResources(server);
// Register the prompts
registerPrompts(server);
// Set resource subscription handlers
setSubscriptionHandlers(server);
// Perform post-initialization operations
server.server.oninitialized = async () => {
// Register conditional tools now that client capabilities are known.
// This finishes before the `notifications/initialized` handler finishes.
registerConditionalTools(server);
// Sync roots if the client supports them.
// This is delayed until after the `notifications/initialized` handler finishes,
// otherwise, the request gets lost.
const sessionId = server.server.transport?.sessionId;
initializeTimeout = setTimeout(() => syncRoots(server, sessionId), 350);
};
// Return the ServerFactoryResponse
return {
server,
cleanup: (sessionId?: string) => {
// Stop any simulated logging or resource updates that may have been initiated.
stopSimulatedLogging(sessionId);
stopSimulatedResourceUpdates(sessionId);
// Clean up task store timers
taskStore.cleanup();
if (initializeTimeout) clearTimeout(initializeTimeout);
},
} satisfies ServerFactoryResponse;
};

View File

@@ -0,0 +1,82 @@
import { LoggingLevel } from "@modelcontextprotocol/sdk/types.js";
import { McpServer } from "@modelcontextprotocol/sdk/server/mcp.js";
// Map session ID to the interval for sending logging messages to the client
const logsUpdateIntervals: Map<string | undefined, NodeJS.Timeout | undefined> =
new Map<string | undefined, NodeJS.Timeout | undefined>();
/**
* Initiates a simulated logging process by sending random log messages to the client at a
* fixed interval. Each log message contains a random logging level and optional session ID.
*
* @param {McpServer} server - The server instance responsible for handling the logging messages.
* @param {string | undefined} sessionId - An optional identifier for the session. If provided,
* the session ID will be appended to log messages.
*/
export const beginSimulatedLogging = (
server: McpServer,
sessionId: string | undefined
) => {
const maybeAppendSessionId = sessionId ? ` - SessionId ${sessionId}` : "";
const messages: { level: LoggingLevel; data: string }[] = [
{ level: "debug", data: `Debug-level message${maybeAppendSessionId}` },
{ level: "info", data: `Info-level message${maybeAppendSessionId}` },
{ level: "notice", data: `Notice-level message${maybeAppendSessionId}` },
{
level: "warning",
data: `Warning-level message${maybeAppendSessionId}`,
},
{ level: "error", data: `Error-level message${maybeAppendSessionId}` },
{
level: "critical",
data: `Critical-level message${maybeAppendSessionId}`,
},
{ level: "alert", data: `Alert level-message${maybeAppendSessionId}` },
{
level: "emergency",
data: `Emergency-level message${maybeAppendSessionId}`,
},
];
/**
* Send a simulated logging message to the client
*/
const sendSimulatedLoggingMessage = async (sessionId: string | undefined) => {
// By using the `sendLoggingMessage` function to send the message, we
// ensure that the client's chosen logging level will be respected
await server.sendLoggingMessage(
messages[Math.floor(Math.random() * messages.length)],
sessionId
);
};
// Set the interval to send later logging messages to this client
if (!logsUpdateIntervals.has(sessionId)) {
// Send once immediately
sendSimulatedLoggingMessage(sessionId);
// Send a randomly-leveled log message every 5 seconds
logsUpdateIntervals.set(
sessionId,
setInterval(() => sendSimulatedLoggingMessage(sessionId), 5000)
);
}
};
/**
* Stops the simulated logging process for a given session.
*
* This function halts the periodic logging updates associated with the specified
* session ID by clearing the interval and removing the session's tracking
* reference. Session ID can be undefined for stdio.
*
* @param {string} [sessionId] - The optional unique identifier of the session.
*/
export const stopSimulatedLogging = (sessionId?: string) => {
// Remove active intervals
if (logsUpdateIntervals.has(sessionId)) {
const logsUpdateInterval = logsUpdateIntervals.get(sessionId);
clearInterval(logsUpdateInterval);
logsUpdateIntervals.delete(sessionId);
}
};

View File

@@ -0,0 +1,90 @@
import { McpServer } from "@modelcontextprotocol/sdk/server/mcp.js";
import {
Root,
RootsListChangedNotificationSchema,
} from "@modelcontextprotocol/sdk/types.js";
// Track roots by session id
export const roots: Map<string | undefined, Root[]> = new Map<
string | undefined,
Root[]
>();
/**
* Get the latest the client roots list for the session.
*
* - Request and cache the roots list for the session if it has not been fetched before.
* - Return the cached roots list for the session if it exists.
*
* When requesting the roots list for a session, it also sets up a `roots/list_changed`
* notification handler. This ensures that updates are automatically fetched and handled
* in real-time.
*
* This function is idempotent. It should only request roots from the client once per session,
* returning the cached version thereafter.
*
* @param {McpServer} server - An instance of the MCP server used to communicate with the client.
* @param {string} [sessionId] - An optional session id used to associate the roots list with a specific client session.
*
* @throws {Error} In case of a failure to request the roots from the client, an error log message is sent.
*/
export const syncRoots = async (server: McpServer, sessionId?: string) => {
const clientCapabilities = server.server.getClientCapabilities() || {};
const clientSupportsRoots: boolean = clientCapabilities?.roots !== undefined;
// Fetch the roots list for this client
if (clientSupportsRoots) {
// Function to request the updated roots list from the client
const requestRoots = async () => {
try {
// Request the updated roots list from the client
const response = await server.server.listRoots();
if (response && "roots" in response) {
// Store the roots list for this client
roots.set(sessionId, response.roots);
// Notify the client of roots received
await server.sendLoggingMessage(
{
level: "info",
logger: "everything-server",
data: `Roots updated: ${response?.roots?.length} root(s) received from client`,
},
sessionId
);
} else {
await server.sendLoggingMessage(
{
level: "info",
logger: "everything-server",
data: "Client returned no roots set",
},
sessionId
);
}
} catch (error) {
console.error(
`Failed to request roots from client ${sessionId}: ${
error instanceof Error ? error.message : String(error)
}`
);
}
};
// If the roots have not been synced for this client,
// set notification handler and request initial roots
if (!roots.has(sessionId)) {
// Set the list changed notification handler
server.server.setNotificationHandler(
RootsListChangedNotificationSchema,
requestRoots
);
// Request the initial roots list immediately
await requestRoots();
}
// Return the roots list for this client
return roots.get(sessionId);
}
};

View File

@@ -1,56 +0,0 @@
import { SSEServerTransport } from "@modelcontextprotocol/sdk/server/sse.js";
import express from "express";
import { createServer } from "./everything.js";
console.error('Starting SSE server...');
const app = express();
const transports: Map<string, SSEServerTransport> = new Map<string, SSEServerTransport>();
app.get("/sse", async (req, res) => {
let transport: SSEServerTransport;
const { server, cleanup, startNotificationIntervals } = createServer();
if (req?.query?.sessionId) {
const sessionId = (req?.query?.sessionId as string);
transport = transports.get(sessionId) as SSEServerTransport;
console.error("Client Reconnecting? This shouldn't happen; when client has a sessionId, GET /sse should not be called again.", transport.sessionId);
} else {
// Create and store transport for new session
transport = new SSEServerTransport("/message", res);
transports.set(transport.sessionId, transport);
// Connect server to transport
await server.connect(transport);
console.error("Client Connected: ", transport.sessionId);
// Start notification intervals after client connects
startNotificationIntervals();
// Handle close of connection
server.onclose = async () => {
console.error("Client Disconnected: ", transport.sessionId);
transports.delete(transport.sessionId);
await cleanup();
};
}
});
app.post("/message", async (req, res) => {
const sessionId = (req?.query?.sessionId as string);
const transport = transports.get(sessionId);
if (transport) {
console.error("Client Message from", sessionId);
await transport.handlePostMessage(req, res);
} else {
console.error(`No transport found for sessionId ${sessionId}`)
}
});
const PORT = process.env.PORT || 3001;
app.listen(PORT, () => {
console.error(`Server is running on port ${PORT}`);
});

View File

@@ -1,26 +0,0 @@
#!/usr/bin/env node
import { StdioServerTransport } from "@modelcontextprotocol/sdk/server/stdio.js";
import { createServer } from "./everything.js";
console.error('Starting default (STDIO) server...');
async function main() {
const transport = new StdioServerTransport();
const {server, cleanup} = createServer();
await server.connect(transport);
// Cleanup on exit
process.on("SIGINT", async () => {
await cleanup();
await server.close();
process.exit(0);
});
}
main().catch((error) => {
console.error("Server error:", error);
process.exit(1);
});

View File

@@ -1,176 +0,0 @@
import { StreamableHTTPServerTransport } from "@modelcontextprotocol/sdk/server/streamableHttp.js";
import { InMemoryEventStore } from '@modelcontextprotocol/sdk/examples/shared/inMemoryEventStore.js';
import express, { Request, Response } from "express";
import { createServer } from "./everything.js";
import { randomUUID } from 'node:crypto';
console.error('Starting Streamable HTTP server...');
const app = express();
const transports: Map<string, StreamableHTTPServerTransport> = new Map<string, StreamableHTTPServerTransport>();
app.post('/mcp', async (req: Request, res: Response) => {
console.error('Received MCP POST request');
try {
// Check for existing session ID
const sessionId = req.headers['mcp-session-id'] as string | undefined;
let transport: StreamableHTTPServerTransport;
if (sessionId && transports.has(sessionId)) {
// Reuse existing transport
transport = transports.get(sessionId)!;
} else if (!sessionId) {
const { server, cleanup } = createServer();
// New initialization request
const eventStore = new InMemoryEventStore();
transport = new StreamableHTTPServerTransport({
sessionIdGenerator: () => randomUUID(),
eventStore, // Enable resumability
onsessioninitialized: (sessionId: string) => {
// Store the transport by session ID when session is initialized
// This avoids race conditions where requests might come in before the session is stored
console.error(`Session initialized with ID: ${sessionId}`);
transports.set(sessionId, transport);
}
});
// Set up onclose handler to clean up transport when closed
server.onclose = async () => {
const sid = transport.sessionId;
if (sid && transports.has(sid)) {
console.error(`Transport closed for session ${sid}, removing from transports map`);
transports.delete(sid);
await cleanup();
}
};
// Connect the transport to the MCP server BEFORE handling the request
// so responses can flow back through the same transport
await server.connect(transport);
await transport.handleRequest(req, res);
return; // Already handled
} else {
// Invalid request - no session ID or not initialization request
res.status(400).json({
jsonrpc: '2.0',
error: {
code: -32000,
message: 'Bad Request: No valid session ID provided',
},
id: req?.body?.id,
});
return;
}
// Handle the request with existing transport - no need to reconnect
// The existing transport is already connected to the server
await transport.handleRequest(req, res);
} catch (error) {
console.error('Error handling MCP request:', error);
if (!res.headersSent) {
res.status(500).json({
jsonrpc: '2.0',
error: {
code: -32603,
message: 'Internal server error',
},
id: req?.body?.id,
});
return;
}
}
});
// Handle GET requests for SSE streams (using built-in support from StreamableHTTP)
app.get('/mcp', async (req: Request, res: Response) => {
console.error('Received MCP GET request');
const sessionId = req.headers['mcp-session-id'] as string | undefined;
if (!sessionId || !transports.has(sessionId)) {
res.status(400).json({
jsonrpc: '2.0',
error: {
code: -32000,
message: 'Bad Request: No valid session ID provided',
},
id: req?.body?.id,
});
return;
}
// Check for Last-Event-ID header for resumability
const lastEventId = req.headers['last-event-id'] as string | undefined;
if (lastEventId) {
console.error(`Client reconnecting with Last-Event-ID: ${lastEventId}`);
} else {
console.error(`Establishing new SSE stream for session ${sessionId}`);
}
const transport = transports.get(sessionId);
await transport!.handleRequest(req, res);
});
// Handle DELETE requests for session termination (according to MCP spec)
app.delete('/mcp', async (req: Request, res: Response) => {
const sessionId = req.headers['mcp-session-id'] as string | undefined;
if (!sessionId || !transports.has(sessionId)) {
res.status(400).json({
jsonrpc: '2.0',
error: {
code: -32000,
message: 'Bad Request: No valid session ID provided',
},
id: req?.body?.id,
});
return;
}
console.error(`Received session termination request for session ${sessionId}`);
try {
const transport = transports.get(sessionId);
await transport!.handleRequest(req, res);
} catch (error) {
console.error('Error handling session termination:', error);
if (!res.headersSent) {
res.status(500).json({
jsonrpc: '2.0',
error: {
code: -32603,
message: 'Error handling session termination',
},
id: req?.body?.id,
});
return;
}
}
});
// Start the server
const PORT = process.env.PORT || 3001;
app.listen(PORT, () => {
console.error(`MCP Streamable HTTP Server listening on port ${PORT}`);
});
// Handle server shutdown
process.on('SIGINT', async () => {
console.error('Shutting down server...');
// Close all active transports to properly clean up resources
for (const sessionId in transports) {
try {
console.error(`Closing transport for session ${sessionId}`);
await transports.get(sessionId)!.close();
transports.delete(sessionId);
} catch (error) {
console.error(`Error closing transport for session ${sessionId}:`, error);
}
}
console.error('Server shutdown complete');
process.exit(0);
});

View File

@@ -0,0 +1,34 @@
import { McpServer } from "@modelcontextprotocol/sdk/server/mcp.js";
import { CallToolResult } from "@modelcontextprotocol/sdk/types.js";
import { z } from "zod";
// Tool input schema
export const EchoSchema = z.object({
message: z.string().describe("Message to echo"),
});
// Tool configuration
const name = "echo";
const config = {
title: "Echo Tool",
description: "Echoes back the input string",
inputSchema: EchoSchema,
};
/**
* Registers the 'echo' tool.
*
* The registered tool validates input arguments using the EchoSchema and
* returns a response that echoes the message provided in the arguments.
*
* @param {McpServer} server - The McpServer instance where the tool will be registered.
* @returns {void}
*/
export const registerEchoTool = (server: McpServer) => {
server.registerTool(name, config, async (args): Promise<CallToolResult> => {
const validatedArgs = EchoSchema.parse(args);
return {
content: [{ type: "text", text: `Echo: ${validatedArgs.message}` }],
};
});
};

View File

@@ -0,0 +1,89 @@
import { McpServer } from "@modelcontextprotocol/sdk/server/mcp.js";
import { CallToolResult } from "@modelcontextprotocol/sdk/types.js";
import { z } from "zod";
import { MCP_TINY_IMAGE } from "./get-tiny-image.js";
// Tool input schema
const GetAnnotatedMessageSchema = z.object({
messageType: z
.enum(["error", "success", "debug"])
.describe("Type of message to demonstrate different annotation patterns"),
includeImage: z
.boolean()
.default(false)
.describe("Whether to include an example image"),
});
// Tool configuration
const name = "get-annotated-message";
const config = {
title: "Get Annotated Message Tool",
description:
"Demonstrates how annotations can be used to provide metadata about content.",
inputSchema: GetAnnotatedMessageSchema,
};
/**
* Registers the 'get-annotated-message' tool.
*
* The registered tool generates and sends messages with specific types, such as error,
* success, or debug, carrying associated annotations like priority level and intended
* audience.
*
* The response will have annotations and optionally contain an annotated image.
*
* @function
* @param {McpServer} server - The McpServer instance where the tool will be registered.
*/
export const registerGetAnnotatedMessageTool = (server: McpServer) => {
server.registerTool(name, config, async (args): Promise<CallToolResult> => {
const { messageType, includeImage } = GetAnnotatedMessageSchema.parse(args);
const content: CallToolResult["content"] = [];
// Main message with different priorities/audiences based on type
if (messageType === "error") {
content.push({
type: "text",
text: "Error: Operation failed",
annotations: {
priority: 1.0, // Errors are highest priority
audience: ["user", "assistant"], // Both need to know about errors
},
});
} else if (messageType === "success") {
content.push({
type: "text",
text: "Operation completed successfully",
annotations: {
priority: 0.7, // Success messages are important but not critical
audience: ["user"], // Success mainly for user consumption
},
});
} else if (messageType === "debug") {
content.push({
type: "text",
text: "Debug: Cache hit ratio 0.95, latency 150ms",
annotations: {
priority: 0.3, // Debug info is low priority
audience: ["assistant"], // Technical details for assistant
},
});
}
// Optional image with its own annotations
if (includeImage) {
content.push({
type: "image",
data: MCP_TINY_IMAGE,
mimeType: "image/png",
annotations: {
priority: 0.5,
audience: ["user"], // Images primarily for user visualization
},
});
}
return { content };
});
};

View File

@@ -0,0 +1,33 @@
import { McpServer } from "@modelcontextprotocol/sdk/server/mcp.js";
import { CallToolResult } from "@modelcontextprotocol/sdk/types.js";
// Tool configuration
const name = "get-env";
const config = {
title: "Print Environment Tool",
description:
"Returns all environment variables, helpful for debugging MCP server configuration",
inputSchema: {},
};
/**
* Registers the 'get-env' tool.
*
* The registered tool Retrieves and returns the environment variables
* of the current process as a JSON-formatted string encapsulated in a text response.
*
* @param {McpServer} server - The McpServer instance where the tool will be registered.
* @returns {void}
*/
export const registerGetEnvTool = (server: McpServer) => {
server.registerTool(name, config, async (args): Promise<CallToolResult> => {
return {
content: [
{
type: "text",
text: JSON.stringify(process.env, null, 2),
},
],
};
});
};

View File

@@ -0,0 +1,80 @@
import { z } from "zod";
import { McpServer } from "@modelcontextprotocol/sdk/server/mcp.js";
import { CallToolResult } from "@modelcontextprotocol/sdk/types.js";
import {
textResource,
textResourceUri,
blobResourceUri,
blobResource,
} from "../resources/templates.js";
// Tool input schema
const GetResourceLinksSchema = z.object({
count: z
.number()
.min(1)
.max(10)
.default(3)
.describe("Number of resource links to return (1-10)"),
});
// Tool configuration
const name = "get-resource-links";
const config = {
title: "Get Resource Links Tool",
description:
"Returns up to ten resource links that reference different types of resources",
inputSchema: GetResourceLinksSchema,
};
/**
* Registers the 'get-resource-reference' tool.
*
* The registered tool retrieves a specified number of resource links and their metadata.
* Resource links are dynamically generated as either text or binary blob resources,
* based on their ID being even or odd.
* The response contains a "text" introductory block and multiple "resource_link" blocks.
*
* @param {McpServer} server - The McpServer instance where the tool will be registered.
*/
export const registerGetResourceLinksTool = (server: McpServer) => {
server.registerTool(name, config, async (args): Promise<CallToolResult> => {
const { count } = GetResourceLinksSchema.parse(args);
// Add intro text content block
const content: CallToolResult["content"] = [];
content.push({
type: "text",
text: `Here are ${count} resource links to resources available in this server:`,
});
// Create resource link content blocks
for (let resourceId = 1; resourceId <= count; resourceId++) {
// Get resource uri for text or blob resource based on odd/even resourceId
const isOdd = resourceId % 2 === 0;
const uri = isOdd
? textResourceUri(resourceId)
: blobResourceUri(resourceId);
// Get resource based on the resource type
const resource = isOdd
? textResource(uri, resourceId)
: blobResource(uri, resourceId);
content.push({
type: "resource_link",
uri: resource.uri,
name: `${isOdd ? "Text" : "Blob"} Resource ${resourceId}`,
description: `Resource ${resourceId}: ${
resource.mimeType === "text/plain"
? "plaintext resource"
: "binary blob resource"
}`,
mimeType: resource.mimeType,
});
}
return { content };
});
};

View File

@@ -0,0 +1,98 @@
import { z } from "zod";
import { McpServer } from "@modelcontextprotocol/sdk/server/mcp.js";
import { CallToolResult } from "@modelcontextprotocol/sdk/types.js";
import {
textResource,
textResourceUri,
blobResourceUri,
blobResource,
RESOURCE_TYPE_BLOB,
RESOURCE_TYPE_TEXT,
RESOURCE_TYPES,
} from "../resources/templates.js";
// Tool input schema
const GetResourceReferenceSchema = z.object({
resourceType: z
.enum([RESOURCE_TYPE_TEXT, RESOURCE_TYPE_BLOB])
.default(RESOURCE_TYPE_TEXT),
resourceId: z
.number()
.default(1)
.describe("ID of the text resource to fetch"),
});
// Tool configuration
const name = "get-resource-reference";
const config = {
title: "Get Resource Reference Tool",
description: "Returns a resource reference that can be used by MCP clients",
inputSchema: GetResourceReferenceSchema,
};
/**
* Registers the 'get-resource-reference' tool.
*
* The registered tool validates and processes arguments for retrieving a resource
* reference. Supported resource types include predefined `RESOURCE_TYPE_TEXT` and
* `RESOURCE_TYPE_BLOB`. The retrieved resource's reference will include the resource
* ID, type, and its associated URI.
*
* The tool performs the following operations:
* 1. Validates the `resourceType` argument to ensure it matches a supported type.
* 2. Validates the `resourceId` argument to ensure it is a finite positive integer.
* 3. Constructs a URI for the resource based on its type (text or blob).
* 4. Retrieves the resource and returns it in a content block.
*
* @param {McpServer} server - The McpServer instance where the tool will be registered.
*/
export const registerGetResourceReferenceTool = (server: McpServer) => {
server.registerTool(name, config, async (args): Promise<CallToolResult> => {
// Validate resource type argument
const { resourceType } = args;
if (!RESOURCE_TYPES.includes(resourceType)) {
throw new Error(
`Invalid resourceType: ${args?.resourceType}. Must be ${RESOURCE_TYPE_TEXT} or ${RESOURCE_TYPE_BLOB}.`
);
}
// Validate resourceId argument
const resourceId = Number(args?.resourceId);
if (
!Number.isFinite(resourceId) ||
!Number.isInteger(resourceId) ||
resourceId < 1
) {
throw new Error(
`Invalid resourceId: ${args?.resourceId}. Must be a finite positive integer.`
);
}
// Get resource based on the resource type
const uri =
resourceType === RESOURCE_TYPE_TEXT
? textResourceUri(resourceId)
: blobResourceUri(resourceId);
const resource =
resourceType === RESOURCE_TYPE_TEXT
? textResource(uri, resourceId)
: blobResource(uri, resourceId);
return {
content: [
{
type: "text",
text: `Returning resource reference for Resource ${resourceId}:`,
},
{
type: "resource",
resource: resource,
},
{
type: "text",
text: `You can access this resource using the URI: ${resource.uri}`,
},
],
};
});
};

View File

@@ -0,0 +1,92 @@
import { McpServer } from "@modelcontextprotocol/sdk/server/mcp.js";
import { CallToolResult } from "@modelcontextprotocol/sdk/types.js";
import { syncRoots } from "../server/roots.js";
// Tool configuration
const name = "get-roots-list";
const config = {
title: "Get Roots List Tool",
description:
"Lists the current MCP roots provided by the client. Demonstrates the roots protocol capability even though this server doesn't access files.",
inputSchema: {},
};
/**
* Registers the 'get-roots-list' tool.
*
* If the client does not support the roots capability, the tool is not registered.
*
* The registered tool interacts with the MCP roots capability, which enables the server to access
* information about the client's workspace directories or file system roots.
*
* When supported, the server automatically retrieves and formats the current list of roots from the
* client upon connection and whenever the client sends a `roots/list_changed` notification.
*
* Therefore, this tool displays the roots that the server currently knows about for the connected
* client. If for some reason the server never got the initial roots list, the tool will request the
* list from the client again.
*
* @param {McpServer} server - The McpServer instance where the tool will be registered.
*/
export const registerGetRootsListTool = (server: McpServer) => {
// Does client support roots?
const clientCapabilities = server.server.getClientCapabilities() || {};
const clientSupportsRoots: boolean = clientCapabilities.roots !== undefined;
// If so, register tool
if (clientSupportsRoots) {
server.registerTool(
name,
config,
async (args, extra): Promise<CallToolResult> => {
// Get the current rootsFetch the current roots list from the client if need be
const currentRoots = await syncRoots(server, extra.sessionId);
// Respond if client supports roots but doesn't have any configured
if (
clientSupportsRoots &&
(!currentRoots || currentRoots.length === 0)
) {
return {
content: [
{
type: "text",
text:
"The client supports roots but no roots are currently configured.\n\n" +
"This could mean:\n" +
"1. The client hasn't provided any roots yet\n" +
"2. The client provided an empty roots list\n" +
"3. The roots configuration is still being loaded",
},
],
};
}
// Create formatted response if there is a list of roots
const rootsList = currentRoots
? currentRoots
.map((root, index) => {
return `${index + 1}. ${root.name || "Unnamed Root"}\n URI: ${
root.uri
}`;
})
.join("\n\n")
: "No roots found";
return {
content: [
{
type: "text",
text:
`Current MCP Roots (${
currentRoots!.length
} total):\n\n${rootsList}\n\n` +
"Note: This server demonstrates the roots protocol capability but doesn't actually access files. " +
"The roots are provided by the MCP client and can be used by servers that need file system access.",
},
],
};
}
);
}
};

View File

@@ -0,0 +1,86 @@
import { z } from "zod";
import { McpServer } from "@modelcontextprotocol/sdk/server/mcp.js";
import {
CallToolResult,
ContentBlock,
} from "@modelcontextprotocol/sdk/types.js";
// Tool input schema
const GetStructuredContentInputSchema = {
location: z
.enum(["New York", "Chicago", "Los Angeles"])
.describe("Choose city"),
};
// Tool output schema
const GetStructuredContentOutputSchema = z.object({
temperature: z.number().describe("Temperature in celsius"),
conditions: z.string().describe("Weather conditions description"),
humidity: z.number().describe("Humidity percentage"),
});
// Tool configuration
const name = "get-structured-content";
const config = {
title: "Get Structured Content Tool",
description:
"Returns structured content along with an output schema for client data validation",
inputSchema: GetStructuredContentInputSchema,
outputSchema: GetStructuredContentOutputSchema,
};
/**
* Registers the 'get-structured-content' tool.
*
* The registered tool processes incoming arguments using a predefined input schema,
* generates structured content with weather information including temperature,
* conditions, and humidity, and returns both backward-compatible content blocks
* and structured content in the response.
*
* The response contains:
* - `content`: An array of content blocks, presented as JSON stringified objects.
* - `structuredContent`: A JSON structured representation of the weather data.
*
* @param {McpServer} server - The McpServer instance where the tool will be registered.
*/
export const registerGetStructuredContentTool = (server: McpServer) => {
server.registerTool(name, config, async (args): Promise<CallToolResult> => {
// Get simulated weather for the chosen city
let weather;
switch (args.location) {
case "New York":
weather = {
temperature: 33,
conditions: "Cloudy",
humidity: 82,
};
break;
case "Chicago":
weather = {
temperature: 36,
conditions: "Light rain / drizzle",
humidity: 82,
};
break;
case "Los Angeles":
weather = {
temperature: 73,
conditions: "Sunny / Clear",
humidity: 48,
};
break;
}
const backwardCompatibleContentBlock: ContentBlock = {
type: "text",
text: JSON.stringify(weather),
};
return {
content: [backwardCompatibleContentBlock],
structuredContent: weather,
};
});
};

View File

@@ -0,0 +1,45 @@
import { z } from "zod";
import { McpServer } from "@modelcontextprotocol/sdk/server/mcp.js";
import { CallToolResult } from "@modelcontextprotocol/sdk/types.js";
// Tool input schema
const GetSumSchema = z.object({
a: z.number().describe("First number"),
b: z.number().describe("Second number"),
});
// Tool configuration
const name = "get-sum";
const config = {
title: "Get Sum Tool",
description: "Returns the sum of two numbers",
inputSchema: GetSumSchema,
};
/**
* Registers the 'get-sum' tool.
**
* The registered tool processes input arguments, validates them using a predefined schema,
* calculates the sum of two numeric values, and returns the result in a content block.
*
* Expects input arguments to conform to a specific schema that includes two numeric properties, `a` and `b`.
* Validation is performed to ensure the input adheres to the expected structure before calculating the sum.
*
* The result is returned as a Promise resolving to an object containing the computed sum in a text format.
*
* @param {McpServer} server - The McpServer instance where the tool will be registered.
*/
export const registerGetSumTool = (server: McpServer) => {
server.registerTool(name, config, async (args): Promise<CallToolResult> => {
const validatedArgs = GetSumSchema.parse(args);
const sum = validatedArgs.a + validatedArgs.b;
return {
content: [
{
type: "text",
text: `The sum of ${validatedArgs.a} and ${validatedArgs.b} is ${sum}.`,
},
],
};
});
};

File diff suppressed because one or more lines are too long

View File

@@ -0,0 +1,243 @@
import { z } from "zod";
import { McpServer } from "@modelcontextprotocol/sdk/server/mcp.js";
import { CallToolResult, Resource } from "@modelcontextprotocol/sdk/types.js";
import { gzipSync } from "node:zlib";
import {
getSessionResourceURI,
registerSessionResource,
} from "../resources/session.js";
// Maximum input file size - 10 MB default
const GZIP_MAX_FETCH_SIZE = Number(
process.env.GZIP_MAX_FETCH_SIZE ?? String(10 * 1024 * 1024)
);
// Maximum fetch time - 30 seconds default.
const GZIP_MAX_FETCH_TIME_MILLIS = Number(
process.env.GZIP_MAX_FETCH_TIME_MILLIS ?? String(30 * 1000)
);
// Comma-separated list of allowed domains. Empty means all domains are allowed.
const GZIP_ALLOWED_DOMAINS = (process.env.GZIP_ALLOWED_DOMAINS ?? "")
.split(",")
.map((d) => d.trim().toLowerCase())
.filter((d) => d.length > 0);
// Tool input schema
const GZipFileAsResourceSchema = z.object({
name: z.string().describe("Name of the output file").default("README.md.gz"),
data: z
.string()
.url()
.describe("URL or data URI of the file content to compress")
.default(
"https://raw.githubusercontent.com/modelcontextprotocol/servers/refs/heads/main/README.md"
),
outputType: z
.enum(["resourceLink", "resource"])
.default("resourceLink")
.describe(
"How the resulting gzipped file should be returned. 'resourceLink' returns a link to a resource that can be read later, 'resource' returns a full resource object."
),
});
// Tool configuration
const name = "gzip-file-as-resource";
const config = {
title: "GZip File as Resource Tool",
description:
"Compresses a single file using gzip compression. Depending upon the selected output type, returns either the compressed data as a gzipped resource or a resource link, allowing it to be downloaded in a subsequent request during the current session.",
inputSchema: GZipFileAsResourceSchema,
};
/**
* Registers the `gzip-file-as-resource` tool.
*
* The registered tool compresses input data using gzip, and makes the resulting file accessible
* as a resource for the duration of the session.
*
* The tool supports two output types:
* - "resource": Returns the resource directly, including its URI, MIME type, and base64-encoded content.
* - "resourceLink": Returns a link to access the resource later.
*
* If an unrecognized `outputType` is provided, the tool throws an error.
*
* @param {McpServer} server - The McpServer instance where the tool will be registered.
* @throws {Error} Throws an error if an unknown output type is specified.
*/
export const registerGZipFileAsResourceTool = (server: McpServer) => {
server.registerTool(name, config, async (args): Promise<CallToolResult> => {
const {
name,
data: dataUri,
outputType,
} = GZipFileAsResourceSchema.parse(args);
// Validate data uri
const url = validateDataURI(dataUri);
// Fetch the data
const response = await fetchSafely(url, {
maxBytes: GZIP_MAX_FETCH_SIZE,
timeoutMillis: GZIP_MAX_FETCH_TIME_MILLIS,
});
// Compress the data using gzip
const inputBuffer = Buffer.from(response);
const compressedBuffer = gzipSync(inputBuffer);
// Create resource
const uri = getSessionResourceURI(name);
const blob = compressedBuffer.toString("base64");
const mimeType = "application/gzip";
const resource = <Resource>{ uri, name, mimeType };
// Register resource, get resource link in return
const resourceLink = registerSessionResource(
server,
resource,
"blob",
blob
);
// Return the resource or a resource link that can be used to access this resource later
if (outputType === "resource") {
return {
content: [
{
type: "resource",
resource: { uri, mimeType, blob },
},
],
};
} else if (outputType === "resourceLink") {
return {
content: [resourceLink],
};
} else {
throw new Error(`Unknown outputType: ${outputType}`);
}
});
};
/**
* Validates a given data URI to ensure it follows the appropriate protocols and rules.
*
* @param {string} dataUri - The data URI to validate. Must be an HTTP, HTTPS, or data protocol URL. If a domain is provided, it must match the allowed domains list if applicable.
* @return {URL} The validated and parsed URL object.
* @throws {Error} If the data URI does not use a supported protocol or does not meet allowed domains criteria.
*/
function validateDataURI(dataUri: string): URL {
// Validate Inputs
const url = new URL(dataUri);
try {
if (
url.protocol !== "http:" &&
url.protocol !== "https:" &&
url.protocol !== "data:"
) {
throw new Error(
`Unsupported URL protocol for ${dataUri}. Only http, https, and data URLs are supported.`
);
}
if (
GZIP_ALLOWED_DOMAINS.length > 0 &&
(url.protocol === "http:" || url.protocol === "https:")
) {
const domain = url.hostname;
const domainAllowed = GZIP_ALLOWED_DOMAINS.some((allowedDomain) => {
return domain === allowedDomain || domain.endsWith(`.${allowedDomain}`);
});
if (!domainAllowed) {
throw new Error(`Domain ${domain} is not in the allowed domains list.`);
}
}
} catch (error) {
throw new Error(
`Error processing file ${dataUri}: ${
error instanceof Error ? error.message : String(error)
}`
);
}
return url;
}
/**
* Fetches data safely from a given URL while ensuring constraints on maximum byte size and timeout duration.
*
* @param {URL} url The URL to fetch data from.
* @param {Object} options An object containing options for the fetch operation.
* @param {number} options.maxBytes The maximum allowed size (in bytes) of the response. If the response exceeds this size, the operation will be aborted.
* @param {number} options.timeoutMillis The timeout duration (in milliseconds) for the fetch operation. If the fetch takes longer, it will be aborted.
* @return {Promise<ArrayBuffer>} A promise that resolves with the response as an ArrayBuffer if successful.
* @throws {Error} Throws an error if the response size exceeds the defined limit, the fetch times out, or the response is otherwise invalid.
*/
async function fetchSafely(
url: URL,
{ maxBytes, timeoutMillis }: { maxBytes: number; timeoutMillis: number }
): Promise<ArrayBuffer> {
const controller = new AbortController();
const timeout = setTimeout(
() =>
controller.abort(
`Fetching ${url} took more than ${timeoutMillis} ms and was aborted.`
),
timeoutMillis
);
try {
// Fetch the data
const response = await fetch(url, { signal: controller.signal });
if (!response.body) {
throw new Error("No response body");
}
// Note: we can't trust the Content-Length header: a malicious or clumsy server could return much more data than advertised.
// We check it here for early bail-out, but we still need to monitor actual bytes read below.
const contentLengthHeader = response.headers.get("content-length");
if (contentLengthHeader != null) {
const contentLength = parseInt(contentLengthHeader, 10);
if (contentLength > maxBytes) {
throw new Error(
`Content-Length for ${url} exceeds max of ${maxBytes}: ${contentLength}`
);
}
}
// Read the fetched data from the response body
const reader = response.body.getReader();
const chunks = [];
let totalSize = 0;
// Read chunks until done
try {
while (true) {
const { done, value } = await reader.read();
if (done) break;
totalSize += value.length;
if (totalSize > maxBytes) {
reader.cancel();
throw new Error(`Response from ${url} exceeds ${maxBytes} bytes`);
}
chunks.push(value);
}
} finally {
reader.releaseLock();
}
// Combine chunks into a single buffer
const buffer = new Uint8Array(totalSize);
let offset = 0;
for (const chunk of chunks) {
buffer.set(chunk, offset);
offset += chunk.length;
}
return buffer.buffer;
} finally {
clearTimeout(timeout);
}
}

View File

@@ -0,0 +1,53 @@
import { McpServer } from "@modelcontextprotocol/sdk/server/mcp.js";
import { registerGetAnnotatedMessageTool } from "./get-annotated-message.js";
import { registerEchoTool } from "./echo.js";
import { registerGetEnvTool } from "./get-env.js";
import { registerGetResourceLinksTool } from "./get-resource-links.js";
import { registerGetResourceReferenceTool } from "./get-resource-reference.js";
import { registerGetRootsListTool } from "./get-roots-list.js";
import { registerGetStructuredContentTool } from "./get-structured-content.js";
import { registerGetSumTool } from "./get-sum.js";
import { registerGetTinyImageTool } from "./get-tiny-image.js";
import { registerGZipFileAsResourceTool } from "./gzip-file-as-resource.js";
import { registerToggleSimulatedLoggingTool } from "./toggle-simulated-logging.js";
import { registerToggleSubscriberUpdatesTool } from "./toggle-subscriber-updates.js";
import { registerTriggerElicitationRequestTool } from "./trigger-elicitation-request.js";
import { registerTriggerLongRunningOperationTool } from "./trigger-long-running-operation.js";
import { registerTriggerSamplingRequestTool } from "./trigger-sampling-request.js";
import { registerTriggerSamplingRequestAsyncTool } from "./trigger-sampling-request-async.js";
import { registerTriggerElicitationRequestAsyncTool } from "./trigger-elicitation-request-async.js";
import { registerSimulateResearchQueryTool } from "./simulate-research-query.js";
/**
* Register the tools with the MCP server.
* @param server
*/
export const registerTools = (server: McpServer) => {
registerEchoTool(server);
registerGetAnnotatedMessageTool(server);
registerGetEnvTool(server);
registerGetResourceLinksTool(server);
registerGetResourceReferenceTool(server);
registerGetStructuredContentTool(server);
registerGetSumTool(server);
registerGetTinyImageTool(server);
registerGZipFileAsResourceTool(server);
registerToggleSimulatedLoggingTool(server);
registerToggleSubscriberUpdatesTool(server);
registerTriggerLongRunningOperationTool(server);
};
/**
* Register the tools that are conditional upon client capabilities.
* These must be registered conditionally, after initialization.
*/
export const registerConditionalTools = (server: McpServer) => {
registerGetRootsListTool(server);
registerTriggerElicitationRequestTool(server);
registerTriggerSamplingRequestTool(server);
// Task-based research tool (uses experimental tasks API)
registerSimulateResearchQueryTool(server);
// Bidirectional task tools - server sends requests that client executes as tasks
registerTriggerSamplingRequestAsyncTool(server);
registerTriggerElicitationRequestAsyncTool(server);
};

View File

@@ -0,0 +1,345 @@
import { z } from "zod";
import { McpServer } from "@modelcontextprotocol/sdk/server/mcp.js";
import {
CallToolResult,
GetTaskResult,
Task,
ElicitResult,
ElicitResultSchema,
} from "@modelcontextprotocol/sdk/types.js";
import { CreateTaskResult } from "@modelcontextprotocol/sdk/experimental/tasks";
// Tool input schema
const SimulateResearchQuerySchema = z.object({
topic: z.string().describe("The research topic to investigate"),
ambiguous: z
.boolean()
.default(false)
.describe(
"Simulate an ambiguous query that requires clarification (triggers input_required status)"
),
});
// Research stages
const STAGES = [
"Gathering sources",
"Analyzing content",
"Synthesizing findings",
"Generating report",
];
// Duration per stage in milliseconds
const STAGE_DURATION = 1000;
// Internal state for tracking research tasks
interface ResearchState {
topic: string;
ambiguous: boolean;
currentStage: number;
clarification?: string;
completed: boolean;
result?: CallToolResult;
}
// Map to store research state per task
const researchStates = new Map<string, ResearchState>();
/**
* Runs the background research process.
* Updates task status as it progresses through stages.
* If clarification is needed, attempts elicitation via sendRequest.
*
* Note: Elicitation only works on STDIO transport. On HTTP transport,
* sendRequest will fail and the task will use a default interpretation.
* Full HTTP support requires SDK PR #1210's elicitInputStream API.
*/
async function runResearchProcess(
taskId: string,
args: z.infer<typeof SimulateResearchQuerySchema>,
taskStore: {
updateTaskStatus: (
taskId: string,
status: Task["status"],
message?: string
) => Promise<void>;
storeTaskResult: (
taskId: string,
status: "completed" | "failed",
result: CallToolResult
) => Promise<void>;
},
// eslint-disable-next-line @typescript-eslint/no-explicit-any
sendRequest: any
): Promise<void> {
const state = researchStates.get(taskId);
if (!state) return;
// Process each stage
for (let i = state.currentStage; i < STAGES.length; i++) {
state.currentStage = i;
// Check if task was cancelled externally
if (state.completed) return;
// Update status message for current stage
await taskStore.updateTaskStatus(taskId, "working", `${STAGES[i]}...`);
// At synthesis stage (index 2), check if clarification is needed
if (i === 2 && state.ambiguous && !state.clarification) {
// Update status to show we're requesting input (spec SHOULD)
await taskStore.updateTaskStatus(
taskId,
"input_required",
`Found multiple interpretations for "${state.topic}". Requesting clarification...`
);
try {
// Try elicitation via sendRequest (works on STDIO, fails on HTTP)
const elicitResult: ElicitResult = await sendRequest(
{
method: "elicitation/create",
params: {
message: `The research query "${state.topic}" could have multiple interpretations. Please clarify what you're looking for:`,
requestedSchema: {
type: "object",
properties: {
interpretation: {
type: "string",
title: "Clarification",
description:
"Which interpretation of the topic do you mean?",
oneOf: getInterpretationsForTopic(state.topic),
},
},
required: ["interpretation"],
},
},
},
ElicitResultSchema
);
// Process elicitation response
if (elicitResult.action === "accept" && elicitResult.content) {
state.clarification =
(elicitResult.content as { interpretation?: string })
.interpretation || "User accepted without selection";
} else if (elicitResult.action === "decline") {
state.clarification = "User declined - using default interpretation";
} else {
state.clarification = "User cancelled - using default interpretation";
}
} catch (error) {
// Elicitation failed (likely HTTP transport without streaming support)
// Use default interpretation and continue - task should still complete
console.warn(
`Elicitation failed for task ${taskId} (HTTP transport?):`,
error instanceof Error ? error.message : String(error)
);
state.clarification =
"technical (default - elicitation unavailable on HTTP)";
}
// Resume with working status (spec SHOULD)
await taskStore.updateTaskStatus(
taskId,
"working",
`Continuing with interpretation: "${state.clarification}"...`
);
// Continue processing (no return - just keep going through the loop)
}
// Simulate work for this stage
await new Promise((resolve) => setTimeout(resolve, STAGE_DURATION));
}
// All stages complete - generate result
state.completed = true;
const result = generateResearchReport(state);
state.result = result;
await taskStore.storeTaskResult(taskId, "completed", result);
}
/**
* Generates the final research report with educational content about tasks.
*/
function generateResearchReport(state: ResearchState): CallToolResult {
const topic = state.clarification
? `${state.topic} (${state.clarification})`
: state.topic;
const report = `# Research Report: ${topic}
## Research Parameters
- **Topic**: ${state.topic}
${state.clarification ? `- **Clarification**: ${state.clarification}` : ""}
## Synthesis
This research query was processed through ${STAGES.length} stages:
${STAGES.map((s, i) => `- Stage ${i + 1}: ${s}`).join("\n")}
---
## About This Demo (SEP-1686: Tasks)
This tool demonstrates MCP's task-based execution pattern for long-running operations:
**Task Lifecycle Demonstrated:**
1. \`tools/call\` with \`task\` parameter → Server returns \`CreateTaskResult\` (not the final result)
2. Client polls \`tasks/get\` → Server returns current status and \`statusMessage\`
3. Status progressed: \`working\`${
state.clarification ? `\`input_required\`\`working\`` : ""
}\`completed\`
4. Client calls \`tasks/result\` → Server returns this final result
${
state.clarification
? `**Elicitation Flow:**
When the query was ambiguous, the server sent an \`elicitation/create\` request
to the client. The task status changed to \`input_required\` while awaiting user input.
${
state.clarification.includes("unavailable on HTTP")
? `
**Note:** Elicitation was skipped because this server is running over HTTP transport.
The current SDK's \`sendRequest\` only works over STDIO. Full HTTP elicitation support
requires SDK PR #1210's streaming \`elicitInputStream\` API.
`
: `After receiving clarification ("${state.clarification}"), the task resumed processing and completed.`
}
`
: ""
}
**Key Concepts:**
- Tasks enable "call now, fetch later" patterns
- \`statusMessage\` provides human-readable progress updates
- Tasks have TTL (time-to-live) for automatic cleanup
- \`pollInterval\` suggests how often to check status
- Elicitation requests can be sent directly during task execution
*This is a simulated research report from the Everything MCP Server.*
`;
return {
content: [
{
type: "text",
text: report,
},
],
};
}
/**
* Registers the 'simulate-research-query' tool as a task-based tool.
*
* This tool demonstrates the MCP Tasks feature (SEP-1686) with a real-world scenario:
* a research tool that gathers and synthesizes information from multiple sources.
* If the query is ambiguous, it pauses to ask for clarification before completing.
*
* @param {McpServer} server - The McpServer instance where the tool will be registered.
*/
export const registerSimulateResearchQueryTool = (server: McpServer) => {
// Check if client supports elicitation (needed for input_required flow)
const clientCapabilities = server.server.getClientCapabilities() || {};
const clientSupportsElicitation: boolean =
clientCapabilities.elicitation !== undefined;
server.experimental.tasks.registerToolTask(
"simulate-research-query",
{
title: "Simulate Research Query",
description:
"Simulates a deep research operation that gathers, analyzes, and synthesizes information. " +
"Demonstrates MCP task-based operations with progress through multiple stages. " +
"If 'ambiguous' is true and client supports elicitation, sends an elicitation request for clarification.",
inputSchema: SimulateResearchQuerySchema,
execution: { taskSupport: "required" },
},
{
/**
* Creates a new research task and starts background processing.
*/
createTask: async (args, extra): Promise<CreateTaskResult> => {
const validatedArgs = SimulateResearchQuerySchema.parse(args);
// Create the task in the store
const task = await extra.taskStore.createTask({
ttl: 300000, // 5 minutes
pollInterval: 1000,
});
// Initialize research state
const state: ResearchState = {
topic: validatedArgs.topic,
ambiguous: validatedArgs.ambiguous && clientSupportsElicitation,
currentStage: 0,
completed: false,
};
researchStates.set(task.taskId, state);
// Start background research (don't await - runs asynchronously)
// Pass sendRequest for elicitation (works on STDIO, gracefully degrades on HTTP)
runResearchProcess(
task.taskId,
validatedArgs,
extra.taskStore,
extra.sendRequest
).catch((error) => {
console.error(`Research task ${task.taskId} failed:`, error);
extra.taskStore
.updateTaskStatus(task.taskId, "failed", String(error))
.catch(console.error);
});
return { task };
},
/**
* Returns the current status of the research task.
*/
getTask: async (args, extra): Promise<GetTaskResult> => {
return await extra.taskStore.getTask(extra.taskId);
},
/**
* Returns the task result.
* Elicitation is now handled directly in the background process.
*/
getTaskResult: async (args, extra): Promise<CallToolResult> => {
// Return the stored result
const result = await extra.taskStore.getTaskResult(extra.taskId);
// Clean up state
researchStates.delete(extra.taskId);
return result as CallToolResult;
},
}
);
};
/**
* Returns contextual interpretation options based on the topic.
*/
function getInterpretationsForTopic(
topic: string
): Array<{ const: string; title: string }> {
const lowerTopic = topic.toLowerCase();
// Example: contextual interpretations for "python"
if (lowerTopic.includes("python")) {
return [
{ const: "programming", title: "Python programming language" },
{ const: "snake", title: "Python snake species" },
{ const: "comedy", title: "Monty Python comedy group" },
];
}
// Default generic interpretations
return [
{ const: "technical", title: "Technical/scientific perspective" },
{ const: "historical", title: "Historical perspective" },
{ const: "current", title: "Current events/news perspective" },
];
}

View File

@@ -0,0 +1,54 @@
import { McpServer } from "@modelcontextprotocol/sdk/server/mcp.js";
import { CallToolResult } from "@modelcontextprotocol/sdk/types.js";
import {
beginSimulatedLogging,
stopSimulatedLogging,
} from "../server/logging.js";
// Tool configuration
const name = "toggle-simulated-logging";
const config = {
title: "Toggle Simulated Logging",
description: "Toggles simulated, random-leveled logging on or off.",
inputSchema: {},
};
// Track enabled clients by session id
const clients: Set<string | undefined> = new Set<string | undefined>();
/**
* Registers the `toggle-simulated-logging` tool.
*
* The registered tool enables or disables the sending of periodic, random-leveled
* logging messages the connected client.
*
* When invoked, it either starts or stops simulated logging based on the session's
* current state. If logging for the specified session is active, it will be stopped;
* if it is inactive, logging will be started.
*
* @param {McpServer} server - The McpServer instance where the tool will be registered.
*/
export const registerToggleSimulatedLoggingTool = (server: McpServer) => {
server.registerTool(
name,
config,
async (_args, extra): Promise<CallToolResult> => {
const sessionId = extra?.sessionId;
let response: string;
if (clients.has(sessionId)) {
stopSimulatedLogging(sessionId);
clients.delete(sessionId);
response = `Stopped simulated logging for session ${sessionId}`;
} else {
beginSimulatedLogging(server, sessionId);
clients.add(sessionId);
response = `Started simulated, random-leveled logging for session ${sessionId} at a 5 second pace. Client's selected logging level will be respected. If an interval elapses and the message to be sent is below the selected level, it will not be sent. Thus at higher chosen logging levels, messages should arrive further apart. `;
}
return {
content: [{ type: "text", text: `${response}` }],
};
}
);
};

View File

@@ -0,0 +1,57 @@
import { McpServer } from "@modelcontextprotocol/sdk/server/mcp.js";
import { CallToolResult } from "@modelcontextprotocol/sdk/types.js";
import {
beginSimulatedResourceUpdates,
stopSimulatedResourceUpdates,
} from "../resources/subscriptions.js";
// Tool configuration
const name = "toggle-subscriber-updates";
const config = {
title: "Toggle Subscriber Updates",
description: "Toggles simulated resource subscription updates on or off.",
inputSchema: {},
};
// Track enabled clients by session id
const clients: Set<string | undefined> = new Set<string | undefined>();
/**
* Registers the `toggle-subscriber-updates` tool.
*
* The registered tool enables or disables the sending of periodic, simulated resource
* update messages the connected client for any subscriptions they have made.
*
* When invoked, it either starts or stops simulated resource updates based on the session's
* current state. If simulated updates for the specified session is active, it will be stopped;
* if it is inactive, simulated updates will be started.
*
* The response provides feedback indicating whether simulated updates were started or stopped,
* including the session ID.
*
* @param {McpServer} server - The McpServer instance where the tool will be registered.
*/
export const registerToggleSubscriberUpdatesTool = (server: McpServer) => {
server.registerTool(
name,
config,
async (_args, extra): Promise<CallToolResult> => {
const sessionId = extra?.sessionId;
let response: string;
if (clients.has(sessionId)) {
stopSimulatedResourceUpdates(sessionId);
clients.delete(sessionId);
response = `Stopped simulated resource updates for session ${sessionId}`;
} else {
beginSimulatedResourceUpdates(server, sessionId);
clients.add(sessionId);
response = `Started simulated resource updated notifications for session ${sessionId} at a 5 second pace. Client will receive updates for any resources the it is subscribed to.`;
}
return {
content: [{ type: "text", text: `${response}` }],
};
}
);
};

View File

@@ -0,0 +1,265 @@
import { McpServer } from "@modelcontextprotocol/sdk/server/mcp.js";
import { CallToolResult } from "@modelcontextprotocol/sdk/types.js";
import { z } from "zod";
// Tool configuration
const name = "trigger-elicitation-request-async";
const config = {
title: "Trigger Async Elicitation Request Tool",
description:
"Trigger an async elicitation request that the CLIENT executes as a background task. " +
"Demonstrates bidirectional MCP tasks where the server sends an elicitation request and " +
"the client handles user input asynchronously, allowing the server to poll for completion.",
inputSchema: {},
};
// Poll interval in milliseconds
const POLL_INTERVAL = 1000;
// Maximum poll attempts before timeout (10 minutes for user input)
const MAX_POLL_ATTEMPTS = 600;
/**
* Registers the 'trigger-elicitation-request-async' tool.
*
* This tool demonstrates bidirectional MCP tasks for elicitation:
* - Server sends elicitation request to client with task metadata
* - Client creates a task and returns CreateTaskResult
* - Client prompts user for input (task status: input_required)
* - Server polls client's tasks/get endpoint for status
* - Server fetches final result from client's tasks/result endpoint
*
* @param {McpServer} server - The McpServer instance where the tool will be registered.
*/
export const registerTriggerElicitationRequestAsyncTool = (
server: McpServer
) => {
// Check client capabilities
const clientCapabilities = server.server.getClientCapabilities() || {};
// Client must support elicitation AND tasks.requests.elicitation
const clientSupportsElicitation =
clientCapabilities.elicitation !== undefined;
const clientTasksCapability = clientCapabilities.tasks as
| {
requests?: { elicitation?: { create?: object } };
}
| undefined;
const clientSupportsAsyncElicitation =
clientTasksCapability?.requests?.elicitation?.create !== undefined;
if (clientSupportsElicitation && clientSupportsAsyncElicitation) {
server.registerTool(
name,
config,
async (args, extra): Promise<CallToolResult> => {
// Create the elicitation request WITH task metadata
// Using z.any() schema to avoid complex type matching with _meta
const request = {
method: "elicitation/create" as const,
params: {
task: {
ttl: 600000, // 10 minutes (user input may take a while)
},
message:
"Please provide inputs for the following fields (async task demo):",
requestedSchema: {
type: "object" as const,
properties: {
name: {
title: "Your Name",
type: "string" as const,
description: "Your full name",
},
favoriteColor: {
title: "Favorite Color",
type: "string" as const,
description: "What is your favorite color?",
enum: ["Red", "Blue", "Green", "Yellow", "Purple"],
},
agreeToTerms: {
title: "Terms Agreement",
type: "boolean" as const,
description: "Do you agree to the terms and conditions?",
},
},
required: ["name"],
},
},
};
// Send the elicitation request
// Client may return either:
// - ElicitResult (synchronous execution)
// - CreateTaskResult (task-based execution with { task } object)
const elicitResponse = await extra.sendRequest(
request as Parameters<typeof extra.sendRequest>[0],
z.union([
// CreateTaskResult - client created a task
z.object({
task: z.object({
taskId: z.string(),
status: z.string(),
pollInterval: z.number().optional(),
statusMessage: z.string().optional(),
}),
}),
// ElicitResult - synchronous execution
z.object({
action: z.string(),
content: z.any().optional(),
}),
])
);
// Check if client returned CreateTaskResult (has task object)
const isTaskResult = "task" in elicitResponse && elicitResponse.task;
if (!isTaskResult) {
// Client executed synchronously - return the direct response
return {
content: [
{
type: "text",
text: `[SYNC] Client executed synchronously:\n${JSON.stringify(
elicitResponse,
null,
2
)}`,
},
],
};
}
const taskId = elicitResponse.task.taskId;
const statusMessages: string[] = [];
statusMessages.push(`Task created: ${taskId}`);
// Poll for task completion
let attempts = 0;
let taskStatus = elicitResponse.task.status;
let taskStatusMessage: string | undefined;
while (
taskStatus !== "completed" &&
taskStatus !== "failed" &&
taskStatus !== "cancelled" &&
attempts < MAX_POLL_ATTEMPTS
) {
// Wait before polling
await new Promise((resolve) => setTimeout(resolve, POLL_INTERVAL));
attempts++;
// Get task status from client
const pollResult = await extra.sendRequest(
{
method: "tasks/get",
params: { taskId },
},
z
.object({
status: z.string(),
statusMessage: z.string().optional(),
})
.passthrough()
);
taskStatus = pollResult.status;
taskStatusMessage = pollResult.statusMessage;
// Only log status changes or every 10 polls to avoid spam
if (
attempts === 1 ||
attempts % 10 === 0 ||
taskStatus !== "input_required"
) {
statusMessages.push(
`Poll ${attempts}: ${taskStatus}${
taskStatusMessage ? ` - ${taskStatusMessage}` : ""
}`
);
}
}
// Check for timeout
if (attempts >= MAX_POLL_ATTEMPTS) {
return {
content: [
{
type: "text",
text: `[TIMEOUT] Task timed out after ${MAX_POLL_ATTEMPTS} poll attempts\n\nProgress:\n${statusMessages.join(
"\n"
)}`,
},
],
};
}
// Check for failure/cancellation
if (taskStatus === "failed" || taskStatus === "cancelled") {
return {
content: [
{
type: "text",
text: `[${taskStatus.toUpperCase()}] ${
taskStatusMessage || "No message"
}\n\nProgress:\n${statusMessages.join("\n")}`,
},
],
};
}
// Fetch the final result
const result = await extra.sendRequest(
{
method: "tasks/result",
params: { taskId },
},
z.any()
);
// Format the elicitation result
const content: CallToolResult["content"] = [];
if (result.action === "accept" && result.content) {
content.push({
type: "text",
text: `[COMPLETED] User provided the requested information!`,
});
const userData = result.content as Record<string, unknown>;
const lines = [];
if (userData.name) lines.push(`- Name: ${userData.name}`);
if (userData.favoriteColor)
lines.push(`- Favorite Color: ${userData.favoriteColor}`);
if (userData.agreeToTerms !== undefined)
lines.push(`- Agreed to terms: ${userData.agreeToTerms}`);
content.push({
type: "text",
text: `User inputs:\n${lines.join("\n")}`,
});
} else if (result.action === "decline") {
content.push({
type: "text",
text: `[DECLINED] User declined to provide the requested information.`,
});
} else if (result.action === "cancel") {
content.push({
type: "text",
text: `[CANCELLED] User cancelled the elicitation dialog.`,
});
}
// Include progress and raw result for debugging
content.push({
type: "text",
text: `\nProgress:\n${statusMessages.join(
"\n"
)}\n\nRaw result: ${JSON.stringify(result, null, 2)}`,
});
return { content };
}
);
}
};

View File

@@ -0,0 +1,229 @@
import { McpServer } from "@modelcontextprotocol/sdk/server/mcp.js";
import {
ElicitResultSchema,
CallToolResult,
} from "@modelcontextprotocol/sdk/types.js";
// Tool configuration
const name = "trigger-elicitation-request";
const config = {
title: "Trigger Elicitation Request Tool",
description: "Trigger a Request from the Server for User Elicitation",
inputSchema: {},
};
/**
* Registers the 'trigger-elicitation-request' tool.
*
* If the client does not support the elicitation capability, the tool is not registered.
*
* The registered tool sends an elicitation request for the user to provide information
* based on a pre-defined schema of fields including text inputs, booleans, numbers,
* email, dates, enums of various types, etc. It uses validation and handles multiple
* possible outcomes from the user's response, such as acceptance with content, decline,
* or cancellation of the dialog. The process also ensures parsing and validating
* the elicitation input arguments at runtime.
*
* The elicitation dialog response is returned, formatted into a structured result,
* which contains both user-submitted input data (if provided) and debugging information,
* including raw results.
*
* @param {McpServer} server - TThe McpServer instance where the tool will be registered.
*/
export const registerTriggerElicitationRequestTool = (server: McpServer) => {
// Does the client support elicitation?
const clientCapabilities = server.server.getClientCapabilities() || {};
const clientSupportsElicitation: boolean =
clientCapabilities.elicitation !== undefined;
// If so, register tool
if (clientSupportsElicitation) {
server.registerTool(
name,
config,
async (args, extra): Promise<CallToolResult> => {
const elicitationResult = await extra.sendRequest(
{
method: "elicitation/create",
params: {
message: "Please provide inputs for the following fields:",
requestedSchema: {
type: "object",
properties: {
name: {
title: "String",
type: "string",
description: "Your full, legal name",
},
check: {
title: "Boolean",
type: "boolean",
description: "Agree to the terms and conditions",
},
firstLine: {
title: "String with default",
type: "string",
description: "Favorite first line of a story",
default: "It was a dark and stormy night.",
},
email: {
title: "String with email format",
type: "string",
format: "email",
description:
"Your email address (will be verified, and never shared with anyone else)",
},
homepage: {
type: "string",
format: "uri",
title: "String with uri format",
description: "Portfolio / personal website",
},
birthdate: {
title: "String with date format",
type: "string",
format: "date",
description: "Your date of birth",
},
integer: {
title: "Integer",
type: "integer",
description:
"Your favorite integer (do not give us your phone number, pin, or other sensitive info)",
minimum: 1,
maximum: 100,
default: 42,
},
number: {
title: "Number in range 1-1000",
type: "number",
description: "Favorite number (there are no wrong answers)",
minimum: 0,
maximum: 1000,
default: 3.14,
},
untitledSingleSelectEnum: {
type: "string",
title: "Untitled Single Select Enum",
description: "Choose your favorite friend",
enum: [
"Monica",
"Rachel",
"Joey",
"Chandler",
"Ross",
"Phoebe",
],
default: "Monica",
},
untitledMultipleSelectEnum: {
type: "array",
title: "Untitled Multiple Select Enum",
description: "Choose your favorite instruments",
minItems: 1,
maxItems: 3,
items: {
type: "string",
enum: ["Guitar", "Piano", "Violin", "Drums", "Bass"],
},
default: ["Guitar"],
},
titledSingleSelectEnum: {
type: "string",
title: "Titled Single Select Enum",
description: "Choose your favorite hero",
oneOf: [
{ const: "hero-1", title: "Superman" },
{ const: "hero-2", title: "Green Lantern" },
{ const: "hero-3", title: "Wonder Woman" },
],
default: "hero-1",
},
titledMultipleSelectEnum: {
type: "array",
title: "Titled Multiple Select Enum",
description: "Choose your favorite types of fish",
minItems: 1,
maxItems: 3,
items: {
anyOf: [
{ const: "fish-1", title: "Tuna" },
{ const: "fish-2", title: "Salmon" },
{ const: "fish-3", title: "Trout" },
],
},
default: ["fish-1"],
},
legacyTitledEnum: {
type: "string",
title: "Legacy Titled Single Select Enum",
description: "Choose your favorite type of pet",
enum: ["pet-1", "pet-2", "pet-3", "pet-4", "pet-5"],
enumNames: ["Cats", "Dogs", "Birds", "Fish", "Reptiles"],
default: "pet-1",
},
},
required: ["name"],
},
},
},
ElicitResultSchema,
{ timeout: 10 * 60 * 1000 /* 10 minutes */ }
);
// Handle different response actions
const content: CallToolResult["content"] = [];
if (
elicitationResult.action === "accept" &&
elicitationResult.content
) {
content.push({
type: "text",
text: `✅ User provided the requested information!`,
});
// Only access elicitationResult.content when action is accept
const userData = elicitationResult.content;
const lines = [];
if (userData.name) lines.push(`- Name: ${userData.name}`);
if (userData.check !== undefined)
lines.push(`- Agreed to terms: ${userData.check}`);
if (userData.color) lines.push(`- Favorite Color: ${userData.color}`);
if (userData.email) lines.push(`- Email: ${userData.email}`);
if (userData.homepage) lines.push(`- Homepage: ${userData.homepage}`);
if (userData.birthdate)
lines.push(`- Birthdate: ${userData.birthdate}`);
if (userData.integer !== undefined)
lines.push(`- Favorite Integer: ${userData.integer}`);
if (userData.number !== undefined)
lines.push(`- Favorite Number: ${userData.number}`);
if (userData.petType) lines.push(`- Pet Type: ${userData.petType}`);
content.push({
type: "text",
text: `User inputs:\n${lines.join("\n")}`,
});
} else if (elicitationResult.action === "decline") {
content.push({
type: "text",
text: `❌ User declined to provide the requested information.`,
});
} else if (elicitationResult.action === "cancel") {
content.push({
type: "text",
text: `⚠️ User cancelled the elicitation dialog.`,
});
}
// Include raw result for debugging
content.push({
type: "text",
text: `\nRaw result: ${JSON.stringify(elicitationResult, null, 2)}`,
});
return { content };
}
);
}
};

View File

@@ -0,0 +1,76 @@
import { z } from "zod";
import { McpServer } from "@modelcontextprotocol/sdk/server/mcp.js";
import { CallToolResult } from "@modelcontextprotocol/sdk/types.js";
// Tool input schema
const TriggerLongRunningOperationSchema = z.object({
duration: z
.number()
.default(10)
.describe("Duration of the operation in seconds"),
steps: z.number().default(5).describe("Number of steps in the operation"),
});
// Tool configuration
const name = "trigger-long-running-operation";
const config = {
title: "Trigger Long Running Operation Tool",
description: "Demonstrates a long running operation with progress updates.",
inputSchema: TriggerLongRunningOperationSchema,
};
/**
* Registers the 'trigger-tong-running-operation' tool.
*
* The registered tool starts a long-running operation defined by a specific duration and
* number of steps.
*
* Progress notifications are sent back to the client at each step if a `progressToken`
* is provided in the metadata.
*
* At the end of the operation, the tool returns a message indicating the completion of the
* operation, including the total duration and steps.
*
* @param {McpServer} server - The McpServer instance where the tool will be registered.
*/
export const registerTriggerLongRunningOperationTool = (server: McpServer) => {
server.registerTool(
name,
config,
async (args, extra): Promise<CallToolResult> => {
const validatedArgs = TriggerLongRunningOperationSchema.parse(args);
const { duration, steps } = validatedArgs;
const stepDuration = duration / steps;
const progressToken = extra._meta?.progressToken;
for (let i = 1; i < steps + 1; i++) {
await new Promise((resolve) =>
setTimeout(resolve, stepDuration * 1000)
);
if (progressToken !== undefined) {
await server.server.notification(
{
method: "notifications/progress",
params: {
progress: i,
total: steps,
progressToken,
},
},
{ relatedRequestId: extra.requestId }
);
}
}
return {
content: [
{
type: "text",
text: `Long running operation completed. Duration: ${duration} seconds, Steps: ${steps}.`,
},
],
};
}
);
};

View File

@@ -0,0 +1,230 @@
import { McpServer } from "@modelcontextprotocol/sdk/server/mcp.js";
import {
CallToolResult,
CreateMessageRequest,
} from "@modelcontextprotocol/sdk/types.js";
import { z } from "zod";
// Tool input schema
const TriggerSamplingRequestAsyncSchema = z.object({
prompt: z.string().describe("The prompt to send to the LLM"),
maxTokens: z
.number()
.default(100)
.describe("Maximum number of tokens to generate"),
});
// Tool configuration
const name = "trigger-sampling-request-async";
const config = {
title: "Trigger Async Sampling Request Tool",
description:
"Trigger an async sampling request that the CLIENT executes as a background task. " +
"Demonstrates bidirectional MCP tasks where the server sends a request and the client " +
"executes it asynchronously, allowing the server to poll for progress and results.",
inputSchema: TriggerSamplingRequestAsyncSchema,
};
// Poll interval in milliseconds
const POLL_INTERVAL = 1000;
// Maximum poll attempts before timeout
const MAX_POLL_ATTEMPTS = 60;
/**
* Registers the 'trigger-sampling-request-async' tool.
*
* This tool demonstrates bidirectional MCP tasks:
* - Server sends sampling request to client with task metadata
* - Client creates a task and returns CreateTaskResult
* - Server polls client's tasks/get endpoint for status
* - Server fetches final result from client's tasks/result endpoint
*
* @param {McpServer} server - The McpServer instance where the tool will be registered.
*/
export const registerTriggerSamplingRequestAsyncTool = (server: McpServer) => {
// Check client capabilities
const clientCapabilities = server.server.getClientCapabilities() || {};
// Client must support sampling AND tasks.requests.sampling
const clientSupportsSampling = clientCapabilities.sampling !== undefined;
const clientTasksCapability = clientCapabilities.tasks as
| {
requests?: { sampling?: { createMessage?: object } };
}
| undefined;
const clientSupportsAsyncSampling =
clientTasksCapability?.requests?.sampling?.createMessage !== undefined;
if (clientSupportsSampling && clientSupportsAsyncSampling) {
server.registerTool(
name,
config,
async (args, extra): Promise<CallToolResult> => {
const validatedArgs = TriggerSamplingRequestAsyncSchema.parse(args);
const { prompt, maxTokens } = validatedArgs;
// Create the sampling request WITH task metadata
// The params.task field signals to the client that this should be executed as a task
const request: CreateMessageRequest & {
params: { task?: { ttl: number } };
} = {
method: "sampling/createMessage",
params: {
task: {
ttl: 300000, // 5 minutes
},
messages: [
{
role: "user",
content: {
type: "text",
text: `Resource ${name} context: ${prompt}`,
},
},
],
systemPrompt: "You are a helpful test server.",
maxTokens,
temperature: 0.7,
},
};
// Send the sampling request
// Client may return either:
// - CreateMessageResult (synchronous execution)
// - CreateTaskResult (task-based execution with { task } object)
const samplingResponse = await extra.sendRequest(
request,
z.union([
// CreateTaskResult - client created a task
z.object({
task: z.object({
taskId: z.string(),
status: z.string(),
pollInterval: z.number().optional(),
statusMessage: z.string().optional(),
}),
}),
// CreateMessageResult - synchronous execution
z.object({
role: z.string(),
content: z.any(),
model: z.string(),
stopReason: z.string().optional(),
}),
])
);
// Check if client returned CreateTaskResult (has task object)
const isTaskResult =
"task" in samplingResponse && samplingResponse.task;
if (!isTaskResult) {
// Client executed synchronously - return the direct response
return {
content: [
{
type: "text",
text: `[SYNC] Client executed synchronously:\n${JSON.stringify(
samplingResponse,
null,
2
)}`,
},
],
};
}
const taskId = samplingResponse.task.taskId;
const statusMessages: string[] = [];
statusMessages.push(`Task created: ${taskId}`);
// Poll for task completion
let attempts = 0;
let taskStatus = samplingResponse.task.status;
let taskStatusMessage: string | undefined;
while (
taskStatus !== "completed" &&
taskStatus !== "failed" &&
taskStatus !== "cancelled" &&
attempts < MAX_POLL_ATTEMPTS
) {
// Wait before polling
await new Promise((resolve) => setTimeout(resolve, POLL_INTERVAL));
attempts++;
// Get task status from client
const pollResult = await extra.sendRequest(
{
method: "tasks/get",
params: { taskId },
},
z
.object({
status: z.string(),
statusMessage: z.string().optional(),
})
.passthrough()
);
taskStatus = pollResult.status;
taskStatusMessage = pollResult.statusMessage;
statusMessages.push(
`Poll ${attempts}: ${taskStatus}${
taskStatusMessage ? ` - ${taskStatusMessage}` : ""
}`
);
}
// Check for timeout
if (attempts >= MAX_POLL_ATTEMPTS) {
return {
content: [
{
type: "text",
text: `[TIMEOUT] Task timed out after ${MAX_POLL_ATTEMPTS} poll attempts\n\nProgress:\n${statusMessages.join(
"\n"
)}`,
},
],
};
}
// Check for failure/cancellation
if (taskStatus === "failed" || taskStatus === "cancelled") {
return {
content: [
{
type: "text",
text: `[${taskStatus.toUpperCase()}] ${
taskStatusMessage || "No message"
}\n\nProgress:\n${statusMessages.join("\n")}`,
},
],
};
}
// Fetch the final result
const result = await extra.sendRequest(
{
method: "tasks/result",
params: { taskId },
},
z.any()
);
// Return the result with status history
return {
content: [
{
type: "text",
text: `[COMPLETED] Async sampling completed!\n\n**Progress:**\n${statusMessages.join(
"\n"
)}\n\n**Result:**\n${JSON.stringify(result, null, 2)}`,
},
],
};
}
);
}
};

View File

@@ -0,0 +1,91 @@
import { McpServer } from "@modelcontextprotocol/sdk/server/mcp.js";
import {
CallToolResult,
CreateMessageRequest,
CreateMessageResultSchema,
} from "@modelcontextprotocol/sdk/types.js";
import { z } from "zod";
// Tool input schema
const TriggerSamplingRequestSchema = z.object({
prompt: z.string().describe("The prompt to send to the LLM"),
maxTokens: z
.number()
.default(100)
.describe("Maximum number of tokens to generate"),
});
// Tool configuration
const name = "trigger-sampling-request";
const config = {
title: "Trigger Sampling Request Tool",
description: "Trigger a Request from the Server for LLM Sampling",
inputSchema: TriggerSamplingRequestSchema,
};
/**
* Registers the 'trigger-sampling-request' tool.
*
* If the client does not support the sampling capability, the tool is not registered.
*
* The registered tool performs the following operations:
* - Validates incoming arguments using `TriggerSamplingRequestSchema`.
* - Constructs a `sampling/createMessage` request object using provided prompt and maximum tokens.
* - Sends the request to the server for sampling.
* - Formats and returns the sampling result content to the client.
*
* @param {McpServer} server - The McpServer instance where the tool will be registered.
*/
export const registerTriggerSamplingRequestTool = (server: McpServer) => {
// Does the client support sampling?
const clientCapabilities = server.server.getClientCapabilities() || {};
const clientSupportsSampling: boolean =
clientCapabilities.sampling !== undefined;
// If so, register tool
if (clientSupportsSampling) {
server.registerTool(
name,
config,
async (args, extra): Promise<CallToolResult> => {
const validatedArgs = TriggerSamplingRequestSchema.parse(args);
const { prompt, maxTokens } = validatedArgs;
// Create the sampling request
const request: CreateMessageRequest = {
method: "sampling/createMessage",
params: {
messages: [
{
role: "user",
content: {
type: "text",
text: `Resource ${name} context: ${prompt}`,
},
},
],
systemPrompt: "You are a helpful test server.",
maxTokens,
temperature: 0.7,
},
};
// Send the sampling request to the client
const result = await extra.sendRequest(
request,
CreateMessageResultSchema
);
// Return the result to the client
return {
content: [
{
type: "text",
text: `LLM sampling result: \n${JSON.stringify(result, null, 2)}`,
},
],
};
}
);
}
};

View File

@@ -0,0 +1,77 @@
import { SSEServerTransport } from "@modelcontextprotocol/sdk/server/sse.js";
import express from "express";
import { createServer } from "../server/index.js";
import cors from "cors";
console.error("Starting SSE server...");
// Express app with permissive CORS for testing with Inspector direct connect mode
const app = express();
app.use(
cors({
origin: "*", // use "*" with caution in production
methods: "GET,POST",
preflightContinue: false,
optionsSuccessStatus: 204,
})
);
// Map sessionId to transport for each client
const transports: Map<string, SSEServerTransport> = new Map<
string,
SSEServerTransport
>();
// Handle GET requests for new SSE streams
app.get("/sse", async (req, res) => {
let transport: SSEServerTransport;
const { server, cleanup } = createServer();
// Session Id should not exist for GET /sse requests
if (req?.query?.sessionId) {
const sessionId = req?.query?.sessionId as string;
transport = transports.get(sessionId) as SSEServerTransport;
console.error(
"Client Reconnecting? This shouldn't happen; when client has a sessionId, GET /sse should not be called again.",
transport.sessionId
);
} else {
// Create and store transport for the new session
transport = new SSEServerTransport("/message", res);
transports.set(transport.sessionId, transport);
// Connect server to transport
await server.connect(transport);
const sessionId = transport.sessionId;
console.error("Client Connected: ", sessionId);
// Handle close of connection
server.server.onclose = async () => {
const sessionId = transport.sessionId;
console.error("Client Disconnected: ", sessionId);
transports.delete(sessionId);
cleanup(sessionId);
};
}
});
// Handle POST requests for client messages
app.post("/message", async (req, res) => {
// Session Id should exist for POST /message requests
const sessionId = req?.query?.sessionId as string;
// Get the transport for this session and use it to handle the request
const transport = transports.get(sessionId);
if (transport) {
console.error("Client Message from", sessionId);
await transport.handlePostMessage(req, res);
} else {
console.error(`No transport found for sessionId ${sessionId}`);
}
});
// Start the express server
const PORT = process.env.PORT || 3001;
app.listen(PORT, () => {
console.error(`Server is running on port ${PORT}`);
});

View File

@@ -0,0 +1,33 @@
#!/usr/bin/env node
import { StdioServerTransport } from "@modelcontextprotocol/sdk/server/stdio.js";
import { createServer } from "../server/index.js";
console.error("Starting default (STDIO) server...");
/**
* The main method
* - Initializes the StdioServerTransport, sets up the server,
* - Handles cleanup on process exit.
*
* @return {Promise<void>} A promise that resolves when the main function has executed and the process exits.
*/
async function main(): Promise<void> {
const transport = new StdioServerTransport();
const { server, cleanup } = createServer();
// Connect transport to server
await server.connect(transport);
// Cleanup on exit
process.on("SIGINT", async () => {
await server.close();
cleanup();
process.exit(0);
});
}
main().catch((error) => {
console.error("Server error:", error);
process.exit(1);
});

View File

@@ -0,0 +1,240 @@
import {
StreamableHTTPServerTransport,
EventStore,
} from "@modelcontextprotocol/sdk/server/streamableHttp.js";
import express, { Request, Response } from "express";
import { createServer } from "../server/index.js";
import { randomUUID } from "node:crypto";
import cors from "cors";
// Simple in-memory event store for SSE resumability
class InMemoryEventStore implements EventStore {
private events: Map<string, { streamId: string; message: unknown }> =
new Map();
async storeEvent(streamId: string, message: unknown): Promise<string> {
const eventId = randomUUID();
this.events.set(eventId, { streamId, message });
return eventId;
}
async replayEventsAfter(
lastEventId: string,
{ send }: { send: (eventId: string, message: unknown) => Promise<void> }
): Promise<string> {
const entries = Array.from(this.events.entries());
const startIndex = entries.findIndex(([id]) => id === lastEventId);
if (startIndex === -1) return lastEventId;
let lastId: string = lastEventId;
for (let i = startIndex + 1; i < entries.length; i++) {
const [eventId, { message }] = entries[i];
await send(eventId, message);
lastId = eventId;
}
return lastId;
}
}
console.log("Starting Streamable HTTP server...");
// Express app with permissive CORS for testing with Inspector direct connect mode
const app = express();
app.use(
cors({
origin: "*", // use "*" with caution in production
methods: "GET,POST,DELETE",
preflightContinue: false,
optionsSuccessStatus: 204,
exposedHeaders: ["mcp-session-id", "last-event-id", "mcp-protocol-version"],
})
);
// Map sessionId to server transport for each client
const transports: Map<string, StreamableHTTPServerTransport> = new Map<
string,
StreamableHTTPServerTransport
>();
// Handle POST requests for client messages
app.post("/mcp", async (req: Request, res: Response) => {
console.log("Received MCP POST request");
try {
// Check for existing session ID
const sessionId = req.headers["mcp-session-id"] as string | undefined;
let transport: StreamableHTTPServerTransport;
if (sessionId && transports.has(sessionId)) {
// Reuse existing transport
transport = transports.get(sessionId)!;
} else if (!sessionId) {
const { server, cleanup } = createServer();
// New initialization request
const eventStore = new InMemoryEventStore();
transport = new StreamableHTTPServerTransport({
sessionIdGenerator: () => randomUUID(),
eventStore, // Enable resumability
onsessioninitialized: (sessionId: string) => {
// Store the transport by session ID when a session is initialized
// This avoids race conditions where requests might come in before the session is stored
console.log(`Session initialized with ID: ${sessionId}`);
transports.set(sessionId, transport);
},
});
// Set up onclose handler to clean up transport when closed
server.server.onclose = async () => {
const sid = transport.sessionId;
if (sid && transports.has(sid)) {
console.log(
`Transport closed for session ${sid}, removing from transports map`
);
transports.delete(sid);
cleanup(sid);
}
};
// Connect the transport to the MCP server BEFORE handling the request
// so responses can flow back through the same transport
await server.connect(transport);
await transport.handleRequest(req, res);
return;
} else {
// Invalid request - no session ID or not initialization request
res.status(400).json({
jsonrpc: "2.0",
error: {
code: -32000,
message: "Bad Request: No valid session ID provided",
},
id: req?.body?.id,
});
return;
}
// Handle the request with existing transport - no need to reconnect
// The existing transport is already connected to the server
await transport.handleRequest(req, res);
} catch (error) {
console.log("Error handling MCP request:", error);
if (!res.headersSent) {
res.status(500).json({
jsonrpc: "2.0",
error: {
code: -32603,
message: "Internal server error",
},
id: req?.body?.id,
});
return;
}
}
});
// Handle GET requests for SSE streams
app.get("/mcp", async (req: Request, res: Response) => {
console.log("Received MCP GET request");
const sessionId = req.headers["mcp-session-id"] as string | undefined;
if (!sessionId || !transports.has(sessionId)) {
res.status(400).json({
jsonrpc: "2.0",
error: {
code: -32000,
message: "Bad Request: No valid session ID provided",
},
id: req?.body?.id,
});
return;
}
// Check for Last-Event-ID header for resumability
const lastEventId = req.headers["last-event-id"] as string | undefined;
if (lastEventId) {
console.log(`Client reconnecting with Last-Event-ID: ${lastEventId}`);
} else {
console.log(`Establishing new SSE stream for session ${sessionId}`);
}
const transport = transports.get(sessionId);
await transport!.handleRequest(req, res);
});
// Handle DELETE requests for session termination
app.delete("/mcp", async (req: Request, res: Response) => {
const sessionId = req.headers["mcp-session-id"] as string | undefined;
if (!sessionId || !transports.has(sessionId)) {
res.status(400).json({
jsonrpc: "2.0",
error: {
code: -32000,
message: "Bad Request: No valid session ID provided",
},
id: req?.body?.id,
});
return;
}
console.log(`Received session termination request for session ${sessionId}`);
try {
const transport = transports.get(sessionId);
await transport!.handleRequest(req, res);
} catch (error) {
console.log("Error handling session termination:", error);
if (!res.headersSent) {
res.status(500).json({
jsonrpc: "2.0",
error: {
code: -32603,
message: "Error handling session termination",
},
id: req?.body?.id,
});
return;
}
}
});
// Start the server
const PORT = process.env.PORT || 3001;
const server = app.listen(PORT, () => {
console.error(`MCP Streamable HTTP Server listening on port ${PORT}`);
});
// Handle server errors
server.on("error", (err: unknown) => {
const code =
typeof err === "object" && err !== null && "code" in err
? (err as { code?: unknown }).code
: undefined;
if (code === "EADDRINUSE") {
console.error(
`Failed to start: Port ${PORT} is already in use. Set PORT to a free port or stop the conflicting process.`
);
} else {
console.error("HTTP server encountered an error while starting:", err);
}
// Ensure a non-zero exit so npm reports the failure instead of silently exiting
process.exit(1);
});
// Handle server shutdown
process.on("SIGINT", async () => {
console.log("Shutting down server...");
// Close all active transports to properly clean up resources
for (const sessionId in transports) {
try {
console.log(`Closing transport for session ${sessionId}`);
await transports.get(sessionId)!.close();
transports.delete(sessionId);
} catch (error) {
console.log(`Error closing transport for session ${sessionId}:`, error);
}
}
console.log("Server shutdown complete");
process.exit(0);
});

View File

@@ -4,7 +4,5 @@
"outDir": "./dist",
"rootDir": "."
},
"include": [
"./**/*.ts"
]
"include": ["./**/*.ts"]
}

View File

@@ -14,13 +14,13 @@ ENV UV_LINK_MODE=copy
RUN --mount=type=cache,target=/root/.cache/uv \
--mount=type=bind,source=uv.lock,target=uv.lock \
--mount=type=bind,source=pyproject.toml,target=pyproject.toml \
uv sync --frozen --no-install-project --no-dev --no-editable
uv sync --locked --no-install-project --no-dev --no-editable
# Then, add the rest of the project source code and install it
# Installing separately from its dependencies allows optimal layer caching
ADD . /app
RUN --mount=type=cache,target=/root/.cache/uv \
uv sync --frozen --no-dev --no-editable
uv sync --locked --no-dev --no-editable
FROM python:3.12-slim-bookworm

View File

@@ -1,5 +1,7 @@
# Fetch MCP Server
<!-- mcp-name: io.github.modelcontextprotocol/server-fetch -->
A Model Context Protocol server that provides web content fetching capabilities. This server enables LLMs to retrieve and process content from web pages, converting HTML to markdown for easier consumption.
> [!CAUTION]
@@ -168,6 +170,48 @@ This can be customized by adding the argument `--user-agent=YourUserAgent` to th
The server can be configured to use a proxy by using the `--proxy-url` argument.
## Windows Configuration
If you're experiencing timeout issues on Windows, you may need to set the `PYTHONIOENCODING` environment variable to ensure proper character encoding:
<details>
<summary>Windows configuration (uvx)</summary>
```json
{
"mcpServers": {
"fetch": {
"command": "uvx",
"args": ["mcp-server-fetch"],
"env": {
"PYTHONIOENCODING": "utf-8"
}
}
}
}
```
</details>
<details>
<summary>Windows configuration (pip)</summary>
```json
{
"mcpServers": {
"fetch": {
"command": "python",
"args": ["-m", "mcp_server_fetch"],
"env": {
"PYTHONIOENCODING": "utf-8"
}
}
}
}
```
</details>
This addresses character encoding issues that can cause the server to timeout on Windows systems.
## Debugging
You can use the MCP inspector to debug the server. For uvx installations:

1139
src/fetch/uv.lock generated

File diff suppressed because it is too large Load Diff

View File

@@ -128,15 +128,6 @@ The server's directory access control follows this flow:
- Returns detailed listing with file sizes and summary statistics
- Shows total files, directories, and combined size
- **directory_tree**
- Get a recursive tree view of files and directories as a JSON structure
- Input: `path` (string): Starting directory path
- Returns JSON structure with:
- `name`: File/directory name
- `type`: "file" or "directory"
- `children`: Array of child entries (for directories only)
- Output is formatted with 2-space indentation for readability
- **move_file**
- Move or rename files and directories
- Inputs:
@@ -165,6 +156,7 @@ The server's directory access control follows this flow:
- `children` (array): Present only for directories
- Empty array for empty directories
- Omitted for files
- Output is formatted with 2-space indentation for readability
- **get_file_info**
- Get detailed file/directory metadata
@@ -183,6 +175,35 @@ The server's directory access control follows this flow:
- Returns:
- Directories that this server can read/write from
### Tool annotations (MCP hints)
This server sets [MCP ToolAnnotations](https://modelcontextprotocol.io/specification/2025-03-26/server/tools#toolannotations)
on each tool so clients can:
- Distinguish **readonly** tools from writecapable tools.
- Understand which write operations are **idempotent** (safe to retry with the same arguments).
- Highlight operations that may be **destructive** (overwriting or heavily mutating data).
The mapping for filesystem tools is:
| Tool | readOnlyHint | idempotentHint | destructiveHint | Notes |
|-----------------------------|--------------|----------------|-----------------|--------------------------------------------------|
| `read_text_file` | `true` | | | Pure read |
| `read_media_file` | `true` | | | Pure read |
| `read_multiple_files` | `true` | | | Pure read |
| `list_directory` | `true` | | | Pure read |
| `list_directory_with_sizes` | `true` | | | Pure read |
| `directory_tree` | `true` | | | Pure read |
| `search_files` | `true` | | | Pure read |
| `get_file_info` | `true` | | | Pure read |
| `list_allowed_directories` | `true` | | | Pure read |
| `create_directory` | `false` | `true` | `false` | Recreating the same dir is a noop |
| `write_file` | `false` | `true` | `true` | Overwrites existing files |
| `edit_file` | `false` | `false` | `true` | Reapplying edits can fail or doubleapply |
| `move_file` | `false` | `false` | `false` | Move/rename only; repeat usually errors |
> Note: `idempotentHint` and `destructiveHint` are meaningful only when `readOnlyHint` is `false`, as defined by the MCP spec.
## Usage with Claude Desktop
Add this to your `claude_desktop_config.json`:
@@ -245,7 +266,7 @@ Add the configuration to your user-level MCP configuration file. Open the Comman
**Method 2: Workspace Configuration**
Alternatively, you can add the configuration to a file called `.vscode/mcp.json` in your workspace. This will allow you to share the configuration with others.
> For more details about MCP configuration in VS Code, see the [official VS Code MCP documentation](https://code.visualstudio.com/docs/copilot/mcp).
> For more details about MCP configuration in VS Code, see the [official VS Code MCP documentation](https://code.visualstudio.com/docs/copilot/customization/mcp-servers).
You can provide sandboxed directories to the server by mounting them to `/projects`. Adding the `ro` flag will make the directory readonly by the server.

View File

@@ -1,4 +1,4 @@
import { describe, it, expect, beforeEach, afterEach } from '@jest/globals';
import { describe, it, expect, beforeEach, afterEach } from 'vitest';
import * as fs from 'fs/promises';
import * as path from 'path';
import * as os from 'os';

View File

@@ -1,4 +1,4 @@
import { describe, it, expect, beforeEach, afterEach, jest } from '@jest/globals';
import { describe, it, expect, beforeEach, afterEach, vi } from 'vitest';
import fs from 'fs/promises';
import path from 'path';
import os from 'os';
@@ -23,19 +23,19 @@ import {
} from '../lib.js';
// Mock fs module
jest.mock('fs/promises');
const mockFs = fs as jest.Mocked<typeof fs>;
vi.mock('fs/promises');
const mockFs = fs as any;
describe('Lib Functions', () => {
beforeEach(() => {
jest.clearAllMocks();
vi.clearAllMocks();
// Set up allowed directories for tests
const allowedDirs = process.platform === 'win32' ? ['C:\\Users\\test', 'C:\\temp', 'C:\\allowed'] : ['/home/user', '/tmp', '/allowed'];
setAllowedDirectories(allowedDirs);
});
afterEach(() => {
jest.restoreAllMocks();
vi.restoreAllMocks();
// Clear allowed directories after tests
setAllowedDirectories([]);
});
@@ -591,8 +591,8 @@ describe('Lib Functions', () => {
// Mock file handle with proper typing
const mockFileHandle = {
read: jest.fn(),
close: jest.fn()
read: vi.fn(),
close: vi.fn()
} as any;
mockFileHandle.read.mockResolvedValue({ bytesRead: 0 });
@@ -610,8 +610,8 @@ describe('Lib Functions', () => {
mockFs.stat.mockResolvedValue({ size: 50 } as any);
const mockFileHandle = {
read: jest.fn(),
close: jest.fn()
read: vi.fn(),
close: vi.fn()
} as any;
// Simulate reading file content in chunks
@@ -631,8 +631,8 @@ describe('Lib Functions', () => {
mockFs.stat.mockResolvedValue({ size: 100 } as any);
const mockFileHandle = {
read: jest.fn(),
close: jest.fn()
read: vi.fn(),
close: vi.fn()
} as any;
mockFileHandle.read.mockResolvedValue({ bytesRead: 0 });
@@ -650,8 +650,8 @@ describe('Lib Functions', () => {
it('opens file for reading', async () => {
// Mock file handle with proper typing
const mockFileHandle = {
read: jest.fn(),
close: jest.fn()
read: vi.fn(),
close: vi.fn()
} as any;
mockFileHandle.read.mockResolvedValue({ bytesRead: 0 });
@@ -666,8 +666,8 @@ describe('Lib Functions', () => {
it('handles files with content and returns first lines', async () => {
const mockFileHandle = {
read: jest.fn(),
close: jest.fn()
read: vi.fn(),
close: vi.fn()
} as any;
// Simulate reading file content with newlines
@@ -685,8 +685,8 @@ describe('Lib Functions', () => {
it('handles files with leftover content', async () => {
const mockFileHandle = {
read: jest.fn(),
close: jest.fn()
read: vi.fn(),
close: vi.fn()
} as any;
// Simulate reading file content without final newline
@@ -704,8 +704,8 @@ describe('Lib Functions', () => {
it('handles reaching requested line count', async () => {
const mockFileHandle = {
read: jest.fn(),
close: jest.fn()
read: vi.fn(),
close: vi.fn()
} as any;
// Simulate reading exactly the requested number of lines

View File

@@ -1,4 +1,4 @@
import { describe, it, expect } from '@jest/globals';
import { describe, it, expect, afterEach } from 'vitest';
import { normalizePath, expandHome, convertToWindowsPath } from '../path-utils.js';
describe('Path Utilities', () => {
@@ -10,14 +10,25 @@ describe('Path Utilities', () => {
.toBe('/home/user/some path');
});
it('converts WSL paths to Windows format', () => {
it('never converts WSL paths (they work correctly in WSL with Node.js fs)', () => {
// WSL paths should NEVER be converted, regardless of platform
// They are valid Linux paths that work with Node.js fs operations inside WSL
expect(convertToWindowsPath('/mnt/c/NS/MyKindleContent'))
.toBe('C:\\NS\\MyKindleContent');
.toBe('/mnt/c/NS/MyKindleContent');
expect(convertToWindowsPath('/mnt/d/Documents'))
.toBe('/mnt/d/Documents');
});
it('converts Unix-style Windows paths to Windows format', () => {
expect(convertToWindowsPath('/c/NS/MyKindleContent'))
.toBe('C:\\NS\\MyKindleContent');
it('converts Unix-style Windows paths only on Windows platform', () => {
// On Windows, /c/ style paths should be converted
if (process.platform === 'win32') {
expect(convertToWindowsPath('/c/NS/MyKindleContent'))
.toBe('C:\\NS\\MyKindleContent');
} else {
// On Linux, leave them unchanged
expect(convertToWindowsPath('/c/NS/MyKindleContent'))
.toBe('/c/NS/MyKindleContent');
}
});
it('leaves Windows paths unchanged but ensures backslashes', () => {
@@ -34,11 +45,20 @@ describe('Path Utilities', () => {
.toBe('C:\\Program Files\\Some App');
});
it('handles uppercase and lowercase drive letters', () => {
it('handles drive letter paths based on platform', () => {
// WSL paths should never be converted
expect(convertToWindowsPath('/mnt/d/some/path'))
.toBe('D:\\some\\path');
expect(convertToWindowsPath('/d/some/path'))
.toBe('D:\\some\\path');
.toBe('/mnt/d/some/path');
if (process.platform === 'win32') {
// On Windows, Unix-style paths like /d/ should be converted
expect(convertToWindowsPath('/d/some/path'))
.toBe('D:\\some\\path');
} else {
// On Linux, /d/ is just a regular Unix path
expect(convertToWindowsPath('/d/some/path'))
.toBe('/d/some/path');
}
});
});
@@ -50,6 +70,12 @@ describe('Path Utilities', () => {
.toBe('/home/user/some path');
expect(normalizePath('"/usr/local/some app/"'))
.toBe('/usr/local/some app');
expect(normalizePath('/usr/local//bin/app///'))
.toBe('/usr/local/bin/app');
expect(normalizePath('/'))
.toBe('/');
expect(normalizePath('///'))
.toBe('/');
});
it('removes surrounding quotes', () => {
@@ -67,21 +93,33 @@ describe('Path Utilities', () => {
.toBe('C:\\NS\\MyKindleContent');
});
it('handles WSL paths', () => {
it('always preserves WSL paths (they work correctly in WSL)', () => {
// WSL paths should ALWAYS be preserved, regardless of platform
// This is the fix for issue #2795
expect(normalizePath('/mnt/c/NS/MyKindleContent'))
.toBe('C:\\NS\\MyKindleContent');
.toBe('/mnt/c/NS/MyKindleContent');
expect(normalizePath('/mnt/d/Documents'))
.toBe('/mnt/d/Documents');
});
it('handles Unix-style Windows paths', () => {
expect(normalizePath('/c/NS/MyKindleContent'))
.toBe('C:\\NS\\MyKindleContent');
// On Windows, /c/ paths should be converted
if (process.platform === 'win32') {
expect(normalizePath('/c/NS/MyKindleContent'))
.toBe('C:\\NS\\MyKindleContent');
} else if (process.platform === 'linux') {
// On Linux, /c/ is just a regular Unix path
expect(normalizePath('/c/NS/MyKindleContent'))
.toBe('/c/NS/MyKindleContent');
}
});
it('handles paths with spaces and mixed slashes', () => {
expect(normalizePath('C:/NS/My Kindle Content'))
.toBe('C:\\NS\\My Kindle Content');
// WSL paths should always be preserved
expect(normalizePath('/mnt/c/NS/My Kindle Content'))
.toBe('C:\\NS\\My Kindle Content');
.toBe('/mnt/c/NS/My Kindle Content');
expect(normalizePath('C:\\Program Files (x86)\\App Name'))
.toBe('C:\\Program Files (x86)\\App Name');
expect(normalizePath('"C:\\Program Files\\App Name"'))
@@ -91,10 +129,19 @@ describe('Path Utilities', () => {
});
it('preserves spaces in all path formats', () => {
// WSL paths should always be preserved
expect(normalizePath('/mnt/c/Program Files/App Name'))
.toBe('C:\\Program Files\\App Name');
expect(normalizePath('/c/Program Files/App Name'))
.toBe('C:\\Program Files\\App Name');
.toBe('/mnt/c/Program Files/App Name');
if (process.platform === 'win32') {
// On Windows, Unix-style paths like /c/ should be converted
expect(normalizePath('/c/Program Files/App Name'))
.toBe('C:\\Program Files\\App Name');
} else {
// On Linux, /c/ is just a regular Unix path
expect(normalizePath('/c/Program Files/App Name'))
.toBe('/c/Program Files/App Name');
}
expect(normalizePath('C:/Program Files/App Name'))
.toBe('C:\\Program Files\\App Name');
});
@@ -105,15 +152,16 @@ describe('Path Utilities', () => {
.toBe('C:\\NS\\Sub&Folder');
expect(normalizePath('C:/NS/Sub&Folder'))
.toBe('C:\\NS\\Sub&Folder');
// WSL paths should always be preserved
expect(normalizePath('/mnt/c/NS/Sub&Folder'))
.toBe('C:\\NS\\Sub&Folder');
.toBe('/mnt/c/NS/Sub&Folder');
// Test tilde in path (short names in Windows)
expect(normalizePath('C:\\NS\\MYKIND~1'))
.toBe('C:\\NS\\MYKIND~1');
expect(normalizePath('/Users/NEMANS~1/FOLDER~2/SUBFO~1/Public/P12PST~1'))
.toBe('/Users/NEMANS~1/FOLDER~2/SUBFO~1/Public/P12PST~1');
// Test other special characters
expect(normalizePath('C:\\Path with #hash'))
.toBe('C:\\Path with #hash');
@@ -128,10 +176,19 @@ describe('Path Utilities', () => {
it('capitalizes lowercase drive letters for Windows paths', () => {
expect(normalizePath('c:/windows/system32'))
.toBe('C:\\windows\\system32');
expect(normalizePath('/mnt/d/my/folder')) // WSL path with lowercase drive
.toBe('D:\\my\\folder');
expect(normalizePath('/e/another/folder')) // Unix-style Windows path with lowercase drive
.toBe('E:\\another\\folder');
// WSL paths should always be preserved
expect(normalizePath('/mnt/d/my/folder'))
.toBe('/mnt/d/my/folder');
if (process.platform === 'win32') {
// On Windows, Unix-style paths should be converted and capitalized
expect(normalizePath('/e/another/folder'))
.toBe('E:\\another\\folder');
} else {
// On Linux, /e/ is just a regular Unix path
expect(normalizePath('/e/another/folder'))
.toBe('/e/another/folder');
}
});
it('handles UNC paths correctly', () => {
@@ -145,11 +202,8 @@ describe('Path Utilities', () => {
});
it('returns normalized non-Windows/WSL/Unix-style Windows paths as is after basic normalization', () => {
// Relative path
const relativePath = 'some/relative/path';
expect(normalizePath(relativePath)).toBe(relativePath.replace(/\//g, '\\'));
// A path that looks somewhat absolute but isn't a drive or recognized Unix root for Windows conversion
// These paths should be preserved as-is (not converted to Windows C:\ format or WSL format)
const otherAbsolutePath = '\\someserver\\share\\file';
expect(normalizePath(otherAbsolutePath)).toBe(otherAbsolutePath);
});
@@ -172,4 +226,146 @@ describe('Path Utilities', () => {
expect(expandHome('C:/test')).toBe('C:/test');
});
});
describe('WSL path handling (issue #2795 fix)', () => {
// Save original platform
const originalPlatform = process.platform;
afterEach(() => {
// Restore platform after each test
Object.defineProperty(process, 'platform', {
value: originalPlatform,
writable: true,
configurable: true
});
});
it('should NEVER convert WSL paths - they work correctly in WSL with Node.js fs', () => {
// The key insight: When running `wsl npx ...`, Node.js runs INSIDE WSL (process.platform === 'linux')
// and /mnt/c/ paths work correctly with Node.js fs operations in that environment.
// Converting them to C:\ format breaks fs operations because Windows paths don't work inside WSL.
// Mock Linux platform (inside WSL)
Object.defineProperty(process, 'platform', {
value: 'linux',
writable: true,
configurable: true
});
// WSL paths should NOT be converted, even inside WSL
expect(normalizePath('/mnt/c/Users/username/folder'))
.toBe('/mnt/c/Users/username/folder');
expect(normalizePath('/mnt/d/Documents/project'))
.toBe('/mnt/d/Documents/project');
});
it('should also preserve WSL paths when running on Windows', () => {
// Mock Windows platform
Object.defineProperty(process, 'platform', {
value: 'win32',
writable: true,
configurable: true
});
// WSL paths should still be preserved (though they wouldn't be accessible from Windows Node.js)
expect(normalizePath('/mnt/c/Users/username/folder'))
.toBe('/mnt/c/Users/username/folder');
expect(normalizePath('/mnt/d/Documents/project'))
.toBe('/mnt/d/Documents/project');
});
it('should convert Unix-style Windows paths (/c/) only when running on Windows (win32)', () => {
// Mock process.platform to be 'win32' (Windows)
Object.defineProperty(process, 'platform', {
value: 'win32',
writable: true,
configurable: true
});
// Unix-style Windows paths like /c/ should be converted on Windows
expect(normalizePath('/c/Users/username/folder'))
.toBe('C:\\Users\\username\\folder');
expect(normalizePath('/d/Documents/project'))
.toBe('D:\\Documents\\project');
});
it('should NOT convert Unix-style paths (/c/) when running inside WSL (linux)', () => {
// Mock process.platform to be 'linux' (WSL/Linux)
Object.defineProperty(process, 'platform', {
value: 'linux',
writable: true,
configurable: true
});
// When on Linux, /c/ is just a regular Unix directory, not a drive letter
expect(normalizePath('/c/some/path'))
.toBe('/c/some/path');
expect(normalizePath('/d/another/path'))
.toBe('/d/another/path');
});
it('should preserve regular Unix paths on all platforms', () => {
// Test on Linux
Object.defineProperty(process, 'platform', {
value: 'linux',
writable: true,
configurable: true
});
expect(normalizePath('/home/user/documents'))
.toBe('/home/user/documents');
expect(normalizePath('/var/log/app'))
.toBe('/var/log/app');
// Test on Windows (though these paths wouldn't work on Windows)
Object.defineProperty(process, 'platform', {
value: 'win32',
writable: true,
configurable: true
});
expect(normalizePath('/home/user/documents'))
.toBe('/home/user/documents');
expect(normalizePath('/var/log/app'))
.toBe('/var/log/app');
});
it('reproduces exact scenario from issue #2795', () => {
// Simulate running inside WSL: wsl npx @modelcontextprotocol/server-filesystem /mnt/c/Users/username/folder
Object.defineProperty(process, 'platform', {
value: 'linux',
writable: true,
configurable: true
});
// This is the exact path from the issue
const inputPath = '/mnt/c/Users/username/folder';
const result = normalizePath(inputPath);
// Should NOT convert to C:\Users\username\folder
expect(result).toBe('/mnt/c/Users/username/folder');
expect(result).not.toContain('C:');
expect(result).not.toContain('\\');
});
it('should handle relative path slash conversion based on platform', () => {
// This test verifies platform-specific behavior naturally without mocking
// On Windows: forward slashes converted to backslashes
// On Linux/Unix: forward slashes preserved
const relativePath = 'some/relative/path';
const result = normalizePath(relativePath);
if (originalPlatform === 'win32') {
expect(result).toBe('some\\relative\\path');
} else {
expect(result).toBe('some/relative/path');
}
});
});
});

View File

@@ -1,4 +1,4 @@
import { describe, it, expect, beforeEach, afterEach } from '@jest/globals';
import { describe, it, expect, beforeEach, afterEach } from 'vitest';
import * as path from 'path';
import * as fs from 'fs/promises';
import * as os from 'os';

View File

@@ -1,4 +1,4 @@
import { describe, it, expect, beforeEach, afterEach } from '@jest/globals';
import { describe, it, expect, beforeEach, afterEach } from 'vitest';
import { getValidRootDirectories } from '../roots-utils.js';
import { mkdtempSync, rmSync, mkdirSync, writeFileSync, realpathSync } from 'fs';
import { tmpdir } from 'os';

View File

@@ -0,0 +1,158 @@
import { describe, it, expect, beforeEach, afterEach } from 'vitest';
import * as fs from 'fs/promises';
import * as path from 'path';
import * as os from 'os';
import { Client } from '@modelcontextprotocol/sdk/client/index.js';
import { StdioClientTransport } from '@modelcontextprotocol/sdk/client/stdio.js';
import { spawn } from 'child_process';
/**
* Integration tests to verify that tool handlers return structuredContent
* that matches the declared outputSchema.
*
* These tests address issues #3110, #3106, #3093 where tools were returning
* structuredContent: { content: [contentBlock] } (array) instead of
* structuredContent: { content: string } as declared in outputSchema.
*/
describe('structuredContent schema compliance', () => {
let client: Client;
let transport: StdioClientTransport;
let testDir: string;
beforeEach(async () => {
// Create a temp directory for testing
testDir = await fs.mkdtemp(path.join(os.tmpdir(), 'mcp-fs-test-'));
// Create test files
await fs.writeFile(path.join(testDir, 'test.txt'), 'test content');
await fs.mkdir(path.join(testDir, 'subdir'));
await fs.writeFile(path.join(testDir, 'subdir', 'nested.txt'), 'nested content');
// Start the MCP server
const serverPath = path.resolve(__dirname, '../dist/index.js');
transport = new StdioClientTransport({
command: 'node',
args: [serverPath, testDir],
});
client = new Client({
name: 'test-client',
version: '1.0.0',
}, {
capabilities: {}
});
await client.connect(transport);
});
afterEach(async () => {
await client?.close();
await fs.rm(testDir, { recursive: true, force: true });
});
describe('directory_tree', () => {
it('should return structuredContent.content as a string, not an array', async () => {
const result = await client.callTool({
name: 'directory_tree',
arguments: { path: testDir }
});
// The result should have structuredContent
expect(result.structuredContent).toBeDefined();
// structuredContent.content should be a string (matching outputSchema: { content: z.string() })
const structuredContent = result.structuredContent as { content: unknown };
expect(typeof structuredContent.content).toBe('string');
// It should NOT be an array
expect(Array.isArray(structuredContent.content)).toBe(false);
// The content should be valid JSON representing the tree
const treeData = JSON.parse(structuredContent.content as string);
expect(Array.isArray(treeData)).toBe(true);
});
});
describe('list_directory_with_sizes', () => {
it('should return structuredContent.content as a string, not an array', async () => {
const result = await client.callTool({
name: 'list_directory_with_sizes',
arguments: { path: testDir }
});
// The result should have structuredContent
expect(result.structuredContent).toBeDefined();
// structuredContent.content should be a string (matching outputSchema: { content: z.string() })
const structuredContent = result.structuredContent as { content: unknown };
expect(typeof structuredContent.content).toBe('string');
// It should NOT be an array
expect(Array.isArray(structuredContent.content)).toBe(false);
// The content should contain directory listing info
expect(structuredContent.content).toContain('[FILE]');
});
});
describe('move_file', () => {
it('should return structuredContent.content as a string, not an array', async () => {
const sourcePath = path.join(testDir, 'test.txt');
const destPath = path.join(testDir, 'moved.txt');
const result = await client.callTool({
name: 'move_file',
arguments: {
source: sourcePath,
destination: destPath
}
});
// The result should have structuredContent
expect(result.structuredContent).toBeDefined();
// structuredContent.content should be a string (matching outputSchema: { content: z.string() })
const structuredContent = result.structuredContent as { content: unknown };
expect(typeof structuredContent.content).toBe('string');
// It should NOT be an array
expect(Array.isArray(structuredContent.content)).toBe(false);
// The content should contain success message
expect(structuredContent.content).toContain('Successfully moved');
});
});
describe('list_directory (control - already working)', () => {
it('should return structuredContent.content as a string', async () => {
const result = await client.callTool({
name: 'list_directory',
arguments: { path: testDir }
});
expect(result.structuredContent).toBeDefined();
const structuredContent = result.structuredContent as { content: unknown };
expect(typeof structuredContent.content).toBe('string');
expect(Array.isArray(structuredContent.content)).toBe(false);
});
});
describe('search_files (control - already working)', () => {
it('should return structuredContent.content as a string', async () => {
const result = await client.callTool({
name: 'search_files',
arguments: {
path: testDir,
pattern: '*.txt'
}
});
expect(result.structuredContent).toBeDefined();
const structuredContent = result.structuredContent as { content: unknown };
expect(typeof structuredContent.content).toBe('string');
expect(Array.isArray(structuredContent.content)).toBe(false);
});
});
});

File diff suppressed because it is too large Load Diff

View File

@@ -1,23 +0,0 @@
/** @type {import('ts-jest').JestConfigWithTsJest} */
module.exports = {
preset: 'ts-jest',
testEnvironment: 'node',
extensionsToTreatAsEsm: ['.ts'],
moduleNameMapper: {
'^(\\.{1,2}/.*)\\.js$': '$1',
},
transform: {
'^.+\\.tsx?$': [
'ts-jest',
{
useESM: true,
},
],
},
testMatch: ['**/__tests__/**/*.test.ts'],
collectCoverageFrom: [
'**/*.ts',
'!**/__tests__/**',
'!**/dist/**',
],
}

View File

@@ -3,9 +3,14 @@
"version": "0.6.3",
"description": "MCP server for filesystem access",
"license": "MIT",
"mcpName": "io.github.modelcontextprotocol/server-filesystem",
"author": "Anthropic, PBC (https://anthropic.com)",
"homepage": "https://modelcontextprotocol.io",
"bugs": "https://github.com/modelcontextprotocol/servers/issues",
"repository": {
"type": "git",
"url": "https://github.com/modelcontextprotocol/servers.git"
},
"type": "module",
"bin": {
"mcp-server-filesystem": "dist/index.js"
@@ -17,25 +22,22 @@
"build": "tsc && shx chmod +x dist/*.js",
"prepare": "npm run build",
"watch": "tsc --watch",
"test": "jest --config=jest.config.cjs --coverage"
"test": "vitest run --coverage"
},
"dependencies": {
"@modelcontextprotocol/sdk": "^1.17.0",
"diff": "^5.1.0",
"glob": "^10.3.10",
"@modelcontextprotocol/sdk": "^1.25.2",
"diff": "^8.0.3",
"glob": "^10.5.0",
"minimatch": "^10.0.1",
"zod-to-json-schema": "^3.23.5"
},
"devDependencies": {
"@jest/globals": "^29.7.0",
"@types/diff": "^5.0.9",
"@types/jest": "^29.5.14",
"@types/minimatch": "^5.1.2",
"@types/node": "^22",
"jest": "^29.7.0",
"@vitest/coverage-v8": "^2.1.8",
"shx": "^0.3.4",
"ts-jest": "^29.1.1",
"ts-node": "^10.9.2",
"typescript": "^5.8.2"
"typescript": "^5.8.2",
"vitest": "^2.1.8"
}
}

View File

@@ -8,14 +8,15 @@ import os from 'os';
*/
export function convertToWindowsPath(p: string): string {
// Handle WSL paths (/mnt/c/...)
// NEVER convert WSL paths - they are valid Linux paths that work with Node.js fs operations in WSL
// Converting them to Windows format (C:\...) breaks fs operations inside WSL
if (p.startsWith('/mnt/')) {
const driveLetter = p.charAt(5).toUpperCase();
const pathPart = p.slice(6).replace(/\//g, '\\');
return `${driveLetter}:${pathPart}`;
return p; // Leave WSL paths unchanged
}
// Handle Unix-style Windows paths (/c/...)
if (p.match(/^\/[a-zA-Z]\//)) {
// Only convert when running on Windows
if (p.match(/^\/[a-zA-Z]\//) && process.platform === 'win32') {
const driveLetter = p.charAt(1).toUpperCase();
const pathPart = p.slice(2).replace(/\//g, '\\');
return `${driveLetter}:${pathPart}`;
@@ -38,21 +39,29 @@ export function convertToWindowsPath(p: string): string {
export function normalizePath(p: string): string {
// Remove any surrounding quotes and whitespace
p = p.trim().replace(/^["']|["']$/g, '');
// Check if this is a Unix path (starts with / but not a Windows or WSL path)
const isUnixPath = p.startsWith('/') &&
!p.match(/^\/mnt\/[a-z]\//i) &&
!p.match(/^\/[a-zA-Z]\//);
// Check if this is a Unix path that should not be converted
// WSL paths (/mnt/) should ALWAYS be preserved as they work correctly in WSL with Node.js fs
// Regular Unix paths should also be preserved
const isUnixPath = p.startsWith('/') && (
// Always preserve WSL paths (/mnt/c/, /mnt/d/, etc.)
p.match(/^\/mnt\/[a-z]\//i) ||
// On non-Windows platforms, treat all absolute paths as Unix paths
(process.platform !== 'win32') ||
// On Windows, preserve Unix paths that aren't Unix-style Windows paths (/c/, /d/, etc.)
(process.platform === 'win32' && !p.match(/^\/[a-zA-Z]\//))
);
if (isUnixPath) {
// For Unix paths, just normalize without converting to Windows format
// Replace double slashes with single slashes and remove trailing slashes
return p.replace(/\/+/g, '/').replace(/\/+$/, '');
return p.replace(/\/+/g, '/').replace(/(?<!^)\/$/, '');
}
// Convert WSL or Unix-style Windows paths to Windows format
// Convert Unix-style Windows paths (/c/, /d/) to Windows format if on Windows
// This function will now leave /mnt/ paths unchanged
p = convertToWindowsPath(p);
// Handle double backslashes, preserving leading UNC \\
if (p.startsWith('\\\\')) {
// For UNC paths, first normalize any excessive leading backslashes to exactly \\
@@ -67,15 +76,15 @@ export function normalizePath(p: string): string {
// For non-UNC paths, normalize all double backslashes
p = p.replace(/\\\\/g, '\\');
}
// Use Node's path normalization, which handles . and .. segments
let normalized = path.normalize(p);
// Fix UNC paths after normalization (path.normalize can remove a leading backslash)
if (p.startsWith('\\\\') && !normalized.startsWith('\\\\')) {
normalized = '\\' + normalized;
}
// Handle Windows paths: convert slashes and ensure drive letter is capitalized
if (normalized.match(/^[a-zA-Z]:/)) {
let result = normalized.replace(/\//g, '\\');
@@ -85,10 +94,15 @@ export function normalizePath(p: string): string {
}
return result;
}
// For all other paths (including relative paths), convert forward slashes to backslashes
// This ensures relative paths like "some/relative/path" become "some\\relative\\path"
return normalized.replace(/\//g, '\\');
// On Windows, convert forward slashes to backslashes for relative paths
// On Linux/Unix, preserve forward slashes
if (process.platform === 'win32') {
return normalized.replace(/\//g, '\\');
}
// On non-Windows platforms, keep the normalized path as-is
return normalized;
}
/**

View File

@@ -8,5 +8,11 @@
},
"include": [
"./**/*.ts"
],
"exclude": [
"**/__tests__/**",
"**/*.test.ts",
"**/*.spec.ts",
"vitest.config.ts"
]
}

View File

@@ -0,0 +1,14 @@
import { defineConfig } from 'vitest/config';
export default defineConfig({
test: {
globals: true,
environment: 'node',
include: ['**/__tests__/**/*.test.ts'],
coverage: {
provider: 'v8',
include: ['**/*.ts'],
exclude: ['**/__tests__/**', '**/dist/**'],
},
},
});

View File

@@ -14,13 +14,13 @@ ENV UV_LINK_MODE=copy
RUN --mount=type=cache,target=/root/.cache/uv \
--mount=type=bind,source=uv.lock,target=uv.lock \
--mount=type=bind,source=pyproject.toml,target=pyproject.toml \
uv sync --frozen --no-install-project --no-dev --no-editable
uv sync --locked --no-install-project --no-dev --no-editable
# Then, add the rest of the project source code and install it
# Installing separately from its dependencies allows optimal layer caching
ADD . /app
RUN --mount=type=cache,target=/root/.cache/uv \
uv sync --frozen --no-dev --no-editable
uv sync --locked --no-dev --no-editable
FROM python:3.12-slim-bookworm

View File

@@ -1,5 +1,7 @@
# mcp-server-git: A git MCP server
<!-- mcp-name: io.github.modelcontextprotocol/server-git -->
## Overview
A Model Context Protocol server for Git repository interaction and automation. This server provides tools to read, search, and manipulate Git repositories via Large Language Models.
@@ -57,10 +59,12 @@ Please note that mcp-server-git is currently in early development. The functiona
- Returns: Confirmation of reset operation
8. `git_log`
- Shows the commit logs
- Shows the commit logs with optional date filtering
- Inputs:
- `repo_path` (string): Path to Git repository
- `max_count` (number, optional): Maximum number of commits to show (default: 10)
- `start_timestamp` (string, optional): Start timestamp for filtering commits. Accepts ISO 8601 format (e.g., '2024-01-15T14:30:25'), relative dates (e.g., '2 weeks ago', 'yesterday'), or absolute dates (e.g., '2024-01-15', 'Jan 15 2024')
- `end_timestamp` (string, optional): End timestamp for filtering commits. Accepts ISO 8601 format (e.g., '2024-01-15T14:30:25'), relative dates (e.g., '2 weeks ago', 'yesterday'), or absolute dates (e.g., '2024-01-15', 'Jan 15 2024')
- Returns: Array of commit entries with hash, author, date, and message
9. `git_create_branch`
@@ -68,7 +72,7 @@ Please note that mcp-server-git is currently in early development. The functiona
- Inputs:
- `repo_path` (string): Path to Git repository
- `branch_name` (string): Name of the new branch
- `start_point` (string, optional): Starting point for the new branch
- `base_branch` (string, optional): Base branch to create from (defaults to current branch)
- Returns: Confirmation of branch creation
10. `git_checkout`
- Switches branches
@@ -82,13 +86,8 @@ Please note that mcp-server-git is currently in early development. The functiona
- `repo_path` (string): Path to Git repository
- `revision` (string): The revision (commit hash, branch name, tag) to show
- Returns: Contents of the specified commit
12. `git_init`
- Initializes a Git repository
- Inputs:
- `repo_path` (string): Path to directory to initialize git repo
- Returns: Confirmation of repository initialization
13. `git_branch`
12. `git_branch`
- List Git branches
- Inputs:
- `repo_path` (string): Path to the Git repository.
@@ -181,7 +180,7 @@ Add the configuration to your user-level MCP configuration file. Open the Comman
**Method 2: Workspace Configuration**
Alternatively, you can add the configuration to a file called `.vscode/mcp.json` in your workspace. This will allow you to share the configuration with others.
> For more details about MCP configuration in VS Code, see the [official VS Code MCP documentation](https://code.visualstudio.com/docs/copilot/mcp).
> For more details about MCP configuration in VS Code, see the [official VS Code MCP documentation](https://code.visualstudio.com/docs/copilot/customization/mcp-servers).
```json
{

View File

@@ -17,7 +17,7 @@ classifiers = [
]
dependencies = [
"click>=8.1.7",
"gitpython>=3.1.43",
"gitpython>=3.1.45",
"mcp>=1.0.0",
"pydantic>=2.0.0",
]
@@ -29,8 +29,8 @@ mcp-server-git = "mcp_server_git:main"
requires = ["hatchling"]
build-backend = "hatchling.build"
[tool.uv]
dev-dependencies = ["pyright>=1.1.389", "ruff>=0.7.3", "pytest>=8.0.0"]
[dependency-groups]
dev = ["pyright>=1.1.407", "ruff>=0.7.3", "pytest>=8.0.0"]
[tool.pytest.ini_options]
testpaths = ["tests"]

View File

@@ -13,6 +13,7 @@ from mcp.types import (
)
from enum import Enum
import git
from git.exc import BadName
from pydantic import BaseModel, Field
# Default number of context lines to show in diff output
@@ -48,6 +49,14 @@ class GitReset(BaseModel):
class GitLog(BaseModel):
repo_path: str
max_count: int = 10
start_timestamp: Optional[str] = Field(
None,
description="Start timestamp for filtering commits. Accepts: ISO 8601 format (e.g., '2024-01-15T14:30:25'), relative dates (e.g., '2 weeks ago', 'yesterday'), or absolute dates (e.g., '2024-01-15', 'Jan 15 2024')"
)
end_timestamp: Optional[str] = Field(
None,
description="End timestamp for filtering commits. Accepts: ISO 8601 format (e.g., '2024-01-15T14:30:25'), relative dates (e.g., '2 weeks ago', 'yesterday'), or absolute dates (e.g., '2024-01-15', 'Jan 15 2024')"
)
class GitCreateBranch(BaseModel):
repo_path: str
@@ -62,8 +71,7 @@ class GitShow(BaseModel):
repo_path: str
revision: str
class GitInit(BaseModel):
repo_path: str
class GitBranch(BaseModel):
repo_path: str = Field(
@@ -83,6 +91,7 @@ class GitBranch(BaseModel):
description="The commit sha that branch should NOT contain. Do not pass anything to this param if no commit sha is specified",
)
class GitTools(str, Enum):
STATUS = "git_status"
DIFF_UNSTAGED = "git_diff_unstaged"
@@ -95,7 +104,7 @@ class GitTools(str, Enum):
CREATE_BRANCH = "git_create_branch"
CHECKOUT = "git_checkout"
SHOW = "git_show"
INIT = "git_init"
BRANCH = "git_branch"
def git_status(repo: git.Repo) -> str:
@@ -108,6 +117,11 @@ def git_diff_staged(repo: git.Repo, context_lines: int = DEFAULT_CONTEXT_LINES)
return repo.git.diff(f"--unified={context_lines}", "--cached")
def git_diff(repo: git.Repo, target: str, context_lines: int = DEFAULT_CONTEXT_LINES) -> str:
# Defense in depth: reject targets starting with '-' to prevent flag injection,
# even if a malicious ref with that name exists (e.g. via filesystem manipulation)
if target.startswith("-"):
raise BadName(f"Invalid target: '{target}' - cannot start with '-'")
repo.rev_parse(target) # Validates target is a real git ref, throws BadName if not
return repo.git.diff(f"--unified={context_lines}", target)
def git_commit(repo: git.Repo, message: str) -> str:
@@ -118,24 +132,49 @@ def git_add(repo: git.Repo, files: list[str]) -> str:
if files == ["."]:
repo.git.add(".")
else:
repo.index.add(files)
# Use '--' to prevent files starting with '-' from being interpreted as options
repo.git.add("--", *files)
return "Files staged successfully"
def git_reset(repo: git.Repo) -> str:
repo.index.reset()
return "All staged changes reset"
def git_log(repo: git.Repo, max_count: int = 10) -> list[str]:
commits = list(repo.iter_commits(max_count=max_count))
log = []
for commit in commits:
log.append(
f"Commit: {commit.hexsha!r}\n"
f"Author: {commit.author!r}\n"
f"Date: {commit.authored_datetime}\n"
f"Message: {commit.message!r}\n"
)
return log
def git_log(repo: git.Repo, max_count: int = 10, start_timestamp: Optional[str] = None, end_timestamp: Optional[str] = None) -> list[str]:
if start_timestamp or end_timestamp:
# Use git log command with date filtering
args = []
if start_timestamp:
args.extend(['--since', start_timestamp])
if end_timestamp:
args.extend(['--until', end_timestamp])
args.extend(['--format=%H%n%an%n%ad%n%s%n'])
log_output = repo.git.log(*args).split('\n')
log = []
# Process commits in groups of 4 (hash, author, date, message)
for i in range(0, len(log_output), 4):
if i + 3 < len(log_output) and len(log) < max_count:
log.append(
f"Commit: {log_output[i]}\n"
f"Author: {log_output[i+1]}\n"
f"Date: {log_output[i+2]}\n"
f"Message: {log_output[i+3]}\n"
)
return log
else:
# Use existing logic for simple log without date filtering
commits = list(repo.iter_commits(max_count=max_count))
log = []
for commit in commits:
log.append(
f"Commit: {commit.hexsha!r}\n"
f"Author: {commit.author!r}\n"
f"Date: {commit.authored_datetime}\n"
f"Message: {commit.message!r}\n"
)
return log
def git_create_branch(repo: git.Repo, branch_name: str, base_branch: str | None = None) -> str:
if base_branch:
@@ -147,15 +186,15 @@ def git_create_branch(repo: git.Repo, branch_name: str, base_branch: str | None
return f"Created branch '{branch_name}' from '{base.name}'"
def git_checkout(repo: git.Repo, branch_name: str) -> str:
# Defense in depth: reject branch names starting with '-' to prevent flag injection,
# even if a malicious ref with that name exists (e.g. via filesystem manipulation)
if branch_name.startswith("-"):
raise BadName(f"Invalid branch name: '{branch_name}' - cannot start with '-'")
repo.rev_parse(branch_name) # Validates branch_name is a real git ref, throws BadName if not
repo.git.checkout(branch_name)
return f"Switched to branch '{branch_name}'"
def git_init(repo_path: str) -> str:
try:
repo = git.Repo.init(path=repo_path, mkdir=True)
return f"Initialized empty Git repository in {repo.git_dir}"
except Exception as e:
return f"Error initializing repository: {str(e)}"
def git_show(repo: git.Repo, revision: str) -> str:
commit = repo.commit(revision)
@@ -172,9 +211,35 @@ def git_show(repo: git.Repo, revision: str) -> str:
diff = commit.diff(git.NULL_TREE, create_patch=True)
for d in diff:
output.append(f"\n--- {d.a_path}\n+++ {d.b_path}\n")
output.append(d.diff.decode('utf-8'))
if d.diff is None:
continue
if isinstance(d.diff, bytes):
output.append(d.diff.decode('utf-8'))
else:
output.append(d.diff)
return "".join(output)
def validate_repo_path(repo_path: Path, allowed_repository: Path | None) -> None:
"""Validate that repo_path is within the allowed repository path."""
if allowed_repository is None:
return # No restriction configured
# Resolve both paths to handle symlinks and relative paths
try:
resolved_repo = repo_path.resolve()
resolved_allowed = allowed_repository.resolve()
except (OSError, RuntimeError):
raise ValueError(f"Invalid path: {repo_path}")
# Check if repo_path is the same as or a subdirectory of allowed_repository
try:
resolved_repo.relative_to(resolved_allowed)
except ValueError:
raise ValueError(
f"Repository path '{repo_path}' is outside the allowed repository '{allowed_repository}'"
)
def git_branch(repo: git.Repo, branch_type: str, contains: str | None = None, not_contains: str | None = None) -> str:
match contains:
case None:
@@ -203,6 +268,7 @@ def git_branch(repo: git.Repo, branch_type: str, contains: str | None = None, no
return branch_info
async def serve(repository: Path | None) -> None:
logger = logging.getLogger(__name__)
@@ -274,15 +340,12 @@ async def serve(repository: Path | None) -> None:
description="Shows the contents of a commit",
inputSchema=GitShow.model_json_schema(),
),
Tool(
name=GitTools.INIT,
description="Initialize a new Git repository",
inputSchema=GitInit.model_json_schema(),
),
Tool(
name=GitTools.BRANCH,
description="List Git branches",
inputSchema=GitBranch.model_json_schema(),
)
]
@@ -318,16 +381,11 @@ async def serve(repository: Path | None) -> None:
@server.call_tool()
async def call_tool(name: str, arguments: dict) -> list[TextContent]:
repo_path = Path(arguments["repo_path"])
# Handle git init separately since it doesn't require an existing repo
if name == GitTools.INIT:
result = git_init(str(repo_path))
return [TextContent(
type="text",
text=result
)]
# For all other commands, we need an existing repo
# Validate repo_path is within allowed repository
validate_repo_path(repo_path, repository)
# For all commands, we need an existing repo
repo = git.Repo(repo_path)
match name:
@@ -380,8 +438,14 @@ async def serve(repository: Path | None) -> None:
text=result
)]
# Update the LOG case:
case GitTools.LOG:
log = git_log(repo, arguments.get("max_count", 10))
log = git_log(
repo,
arguments.get("max_count", 10),
arguments.get("start_timestamp"),
arguments.get("end_timestamp")
)
return [TextContent(
type="text",
text="Commit history:\n" + "\n".join(log)

View File

@@ -1,7 +1,22 @@
import pytest
from pathlib import Path
import git
from mcp_server_git.server import git_checkout, git_branch, git_add
from git.exc import BadName
from mcp_server_git.server import (
git_checkout,
git_branch,
git_add,
git_status,
git_diff_unstaged,
git_diff_staged,
git_diff,
git_commit,
git_reset,
git_log,
git_create_branch,
git_show,
validate_repo_path,
)
import shutil
@pytest.fixture
@@ -26,7 +41,7 @@ def test_git_checkout_existing_branch(test_repository):
def test_git_checkout_nonexistent_branch(test_repository):
with pytest.raises(git.GitCommandError):
with pytest.raises(BadName):
git_checkout(test_repository, "nonexistent-branch")
def test_git_branch_local(test_repository):
@@ -35,8 +50,6 @@ def test_git_branch_local(test_repository):
assert "new-branch-local" in result
def test_git_branch_remote(test_repository):
# GitPython does not easily support creating remote branches without a remote.
# This test will check the behavior when 'remote' is specified without actual remotes.
result = git_branch(test_repository, "remote")
assert "" == result.strip() # Should be empty if no remote branches
@@ -46,28 +59,32 @@ def test_git_branch_all(test_repository):
assert "new-branch-all" in result
def test_git_branch_contains(test_repository):
# Get the default branch name (could be "main" or "master")
default_branch = test_repository.active_branch.name
# Create a new branch and commit to it
test_repository.git.checkout("-b", "feature-branch")
Path(test_repository.working_dir / Path("feature.txt")).write_text("feature content")
test_repository.index.add(["feature.txt"])
commit = test_repository.index.commit("feature commit")
test_repository.git.checkout("master")
test_repository.git.checkout(default_branch)
result = git_branch(test_repository, "local", contains=commit.hexsha)
assert "feature-branch" in result
assert "master" not in result
assert default_branch not in result
def test_git_branch_not_contains(test_repository):
# Get the default branch name (could be "main" or "master")
default_branch = test_repository.active_branch.name
# Create a new branch and commit to it
test_repository.git.checkout("-b", "another-feature-branch")
Path(test_repository.working_dir / Path("another_feature.txt")).write_text("another feature content")
test_repository.index.add(["another_feature.txt"])
commit = test_repository.index.commit("another feature commit")
test_repository.git.checkout("master")
test_repository.git.checkout(default_branch)
result = git_branch(test_repository, "local", not_contains=commit.hexsha)
assert "another-feature-branch" not in result
assert "master" in result
assert default_branch in result
def test_git_add_all_files(test_repository):
file_path = Path(test_repository.working_dir) / "all_file.txt"
@@ -91,3 +108,318 @@ def test_git_add_specific_files(test_repository):
assert "file1.txt" in staged_files
assert "file2.txt" not in staged_files
assert result == "Files staged successfully"
def test_git_status(test_repository):
result = git_status(test_repository)
assert result is not None
assert "On branch" in result or "branch" in result.lower()
def test_git_diff_unstaged(test_repository):
file_path = Path(test_repository.working_dir) / "test.txt"
file_path.write_text("modified content")
result = git_diff_unstaged(test_repository)
assert "test.txt" in result
assert "modified content" in result
def test_git_diff_unstaged_empty(test_repository):
result = git_diff_unstaged(test_repository)
assert result == ""
def test_git_diff_staged(test_repository):
file_path = Path(test_repository.working_dir) / "staged_file.txt"
file_path.write_text("staged content")
test_repository.index.add(["staged_file.txt"])
result = git_diff_staged(test_repository)
assert "staged_file.txt" in result
assert "staged content" in result
def test_git_diff_staged_empty(test_repository):
result = git_diff_staged(test_repository)
assert result == ""
def test_git_diff(test_repository):
# Get the default branch name (could be "main" or "master")
default_branch = test_repository.active_branch.name
test_repository.git.checkout("-b", "feature-diff")
file_path = Path(test_repository.working_dir) / "test.txt"
file_path.write_text("feature changes")
test_repository.index.add(["test.txt"])
test_repository.index.commit("feature commit")
result = git_diff(test_repository, default_branch)
assert "test.txt" in result
assert "feature changes" in result
def test_git_commit(test_repository):
file_path = Path(test_repository.working_dir) / "commit_test.txt"
file_path.write_text("content to commit")
test_repository.index.add(["commit_test.txt"])
result = git_commit(test_repository, "test commit message")
assert "Changes committed successfully with hash" in result
latest_commit = test_repository.head.commit
assert latest_commit.message.strip() == "test commit message"
def test_git_reset(test_repository):
file_path = Path(test_repository.working_dir) / "reset_test.txt"
file_path.write_text("content to reset")
test_repository.index.add(["reset_test.txt"])
staged_before = [item.a_path for item in test_repository.index.diff("HEAD")]
assert "reset_test.txt" in staged_before
result = git_reset(test_repository)
assert result == "All staged changes reset"
staged_after = [item.a_path for item in test_repository.index.diff("HEAD")]
assert "reset_test.txt" not in staged_after
def test_git_log(test_repository):
for i in range(3):
file_path = Path(test_repository.working_dir) / f"log_test_{i}.txt"
file_path.write_text(f"content {i}")
test_repository.index.add([f"log_test_{i}.txt"])
test_repository.index.commit(f"commit {i}")
result = git_log(test_repository, max_count=2)
assert isinstance(result, list)
assert len(result) == 2
assert "Commit:" in result[0]
assert "Author:" in result[0]
assert "Date:" in result[0]
assert "Message:" in result[0]
def test_git_log_default(test_repository):
result = git_log(test_repository)
assert isinstance(result, list)
assert len(result) >= 1
assert "initial commit" in result[0]
def test_git_create_branch(test_repository):
result = git_create_branch(test_repository, "new-feature-branch")
assert "Created branch 'new-feature-branch'" in result
branches = [ref.name for ref in test_repository.references]
assert "new-feature-branch" in branches
def test_git_create_branch_from_base(test_repository):
test_repository.git.checkout("-b", "base-branch")
file_path = Path(test_repository.working_dir) / "base.txt"
file_path.write_text("base content")
test_repository.index.add(["base.txt"])
test_repository.index.commit("base commit")
result = git_create_branch(test_repository, "derived-branch", "base-branch")
assert "Created branch 'derived-branch' from 'base-branch'" in result
def test_git_show(test_repository):
file_path = Path(test_repository.working_dir) / "show_test.txt"
file_path.write_text("show content")
test_repository.index.add(["show_test.txt"])
test_repository.index.commit("show test commit")
commit_sha = test_repository.head.commit.hexsha
result = git_show(test_repository, commit_sha)
assert "Commit:" in result
assert "Author:" in result
assert "show test commit" in result
assert "show_test.txt" in result
def test_git_show_initial_commit(test_repository):
initial_commit = list(test_repository.iter_commits())[-1]
result = git_show(test_repository, initial_commit.hexsha)
assert "Commit:" in result
assert "initial commit" in result
assert "test.txt" in result
# Tests for validate_repo_path (repository scoping security fix)
def test_validate_repo_path_no_restriction():
"""When no repository restriction is configured, any path should be allowed."""
validate_repo_path(Path("/any/path"), None) # Should not raise
def test_validate_repo_path_exact_match(tmp_path: Path):
"""When repo_path exactly matches allowed_repository, validation should pass."""
allowed = tmp_path / "repo"
allowed.mkdir()
validate_repo_path(allowed, allowed) # Should not raise
def test_validate_repo_path_subdirectory(tmp_path: Path):
"""When repo_path is a subdirectory of allowed_repository, validation should pass."""
allowed = tmp_path / "repo"
allowed.mkdir()
subdir = allowed / "subdir"
subdir.mkdir()
validate_repo_path(subdir, allowed) # Should not raise
def test_validate_repo_path_outside_allowed(tmp_path: Path):
"""When repo_path is outside allowed_repository, validation should raise ValueError."""
allowed = tmp_path / "allowed_repo"
allowed.mkdir()
outside = tmp_path / "other_repo"
outside.mkdir()
with pytest.raises(ValueError) as exc_info:
validate_repo_path(outside, allowed)
assert "outside the allowed repository" in str(exc_info.value)
def test_validate_repo_path_traversal_attempt(tmp_path: Path):
"""Path traversal attempts (../) should be caught and rejected."""
allowed = tmp_path / "allowed_repo"
allowed.mkdir()
# Attempt to escape via ../
traversal_path = allowed / ".." / "other_repo"
with pytest.raises(ValueError) as exc_info:
validate_repo_path(traversal_path, allowed)
assert "outside the allowed repository" in str(exc_info.value)
def test_validate_repo_path_symlink_escape(tmp_path: Path):
"""Symlinks pointing outside allowed_repository should be rejected."""
allowed = tmp_path / "allowed_repo"
allowed.mkdir()
outside = tmp_path / "outside"
outside.mkdir()
# Create a symlink inside allowed that points outside
symlink = allowed / "escape_link"
symlink.symlink_to(outside)
with pytest.raises(ValueError) as exc_info:
validate_repo_path(symlink, allowed)
assert "outside the allowed repository" in str(exc_info.value)
# Tests for argument injection protection
def test_git_diff_rejects_flag_injection(test_repository):
"""git_diff should reject flags that could be used for argument injection."""
with pytest.raises(BadName):
git_diff(test_repository, "--output=/tmp/evil")
with pytest.raises(BadName):
git_diff(test_repository, "--help")
with pytest.raises(BadName):
git_diff(test_repository, "-p")
def test_git_checkout_rejects_flag_injection(test_repository):
"""git_checkout should reject flags that could be used for argument injection."""
with pytest.raises(BadName):
git_checkout(test_repository, "--help")
with pytest.raises(BadName):
git_checkout(test_repository, "--orphan=evil")
with pytest.raises(BadName):
git_checkout(test_repository, "-f")
def test_git_diff_allows_valid_refs(test_repository):
"""git_diff should work normally with valid git refs."""
# Get the default branch name
default_branch = test_repository.active_branch.name
# Create a branch with a commit for diffing
test_repository.git.checkout("-b", "valid-diff-branch")
file_path = Path(test_repository.working_dir) / "test.txt"
file_path.write_text("valid diff content")
test_repository.index.add(["test.txt"])
test_repository.index.commit("valid diff commit")
# Test with branch name
result = git_diff(test_repository, default_branch)
assert "test.txt" in result
# Test with HEAD~1
result = git_diff(test_repository, "HEAD~1")
assert "test.txt" in result
# Test with commit hash
commit_sha = test_repository.head.commit.hexsha
result = git_diff(test_repository, commit_sha)
assert result is not None
def test_git_checkout_allows_valid_branches(test_repository):
"""git_checkout should work normally with valid branch names."""
# Get the default branch name
default_branch = test_repository.active_branch.name
# Create a branch to checkout
test_repository.git.branch("valid-checkout-branch")
result = git_checkout(test_repository, "valid-checkout-branch")
assert "Switched to branch 'valid-checkout-branch'" in result
assert test_repository.active_branch.name == "valid-checkout-branch"
# Checkout back to default branch
result = git_checkout(test_repository, default_branch)
assert "Switched to branch" in result
assert test_repository.active_branch.name == default_branch
def test_git_diff_rejects_malicious_refs(test_repository):
"""git_diff should reject refs starting with '-' even if they exist.
This tests defense in depth against an attacker who creates malicious
refs via filesystem manipulation (e.g. using mcp-filesystem to write
to .git/refs/heads/--output=...).
"""
import os
# Manually create a malicious ref by writing directly to .git/refs
sha = test_repository.head.commit.hexsha
refs_dir = Path(test_repository.git_dir) / "refs" / "heads"
malicious_ref_path = refs_dir / "--output=evil.txt"
malicious_ref_path.write_text(sha)
# Even though the ref exists, it should be rejected
with pytest.raises(BadName):
git_diff(test_repository, "--output=evil.txt")
# Verify no file was created (the attack was blocked)
assert not os.path.exists("evil.txt")
# Cleanup
malicious_ref_path.unlink()
def test_git_checkout_rejects_malicious_refs(test_repository):
"""git_checkout should reject refs starting with '-' even if they exist."""
# Manually create a malicious ref
sha = test_repository.head.commit.hexsha
refs_dir = Path(test_repository.git_dir) / "refs" / "heads"
malicious_ref_path = refs_dir / "--orphan=evil"
malicious_ref_path.write_text(sha)
# Even though the ref exists, it should be rejected
with pytest.raises(BadName):
git_checkout(test_repository, "--orphan=evil")
# Cleanup
malicious_ref_path.unlink()

824
src/git/uv.lock generated

File diff suppressed because it is too large Load Diff

View File

@@ -173,14 +173,14 @@ The server can be configured using the following environment variables:
"@modelcontextprotocol/server-memory"
],
"env": {
"MEMORY_FILE_PATH": "/path/to/custom/memory.json"
"MEMORY_FILE_PATH": "/path/to/custom/memory.jsonl"
}
}
}
}
```
- `MEMORY_FILE_PATH`: Path to the memory storage JSON file (default: `memory.json` in the server directory)
- `MEMORY_FILE_PATH`: Path to the memory storage JSONL file (default: `memory.jsonl` in the server directory)
# VS Code Installation Instructions
@@ -198,7 +198,7 @@ Add the configuration to your user-level MCP configuration file. Open the Comman
**Method 2: Workspace Configuration**
Alternatively, you can add the configuration to a file called `.vscode/mcp.json` in your workspace. This will allow you to share the configuration with others.
> For more details about MCP configuration in VS Code, see the [official VS Code MCP documentation](https://code.visualstudio.com/docs/copilot/mcp).
> For more details about MCP configuration in VS Code, see the [official VS Code MCP documentation](https://code.visualstudio.com/docs/copilot/customization/mcp-servers).
#### NPX

View File

@@ -0,0 +1,156 @@
import { describe, it, expect, beforeEach, afterEach, vi } from 'vitest';
import { promises as fs } from 'fs';
import path from 'path';
import { fileURLToPath } from 'url';
import { ensureMemoryFilePath, defaultMemoryPath } from '../index.js';
describe('ensureMemoryFilePath', () => {
const testDir = path.dirname(fileURLToPath(import.meta.url));
const oldMemoryPath = path.join(testDir, '..', 'memory.json');
const newMemoryPath = path.join(testDir, '..', 'memory.jsonl');
let originalEnv: string | undefined;
beforeEach(() => {
// Save original environment variable
originalEnv = process.env.MEMORY_FILE_PATH;
// Delete environment variable
delete process.env.MEMORY_FILE_PATH;
});
afterEach(async () => {
// Restore original environment variable
if (originalEnv !== undefined) {
process.env.MEMORY_FILE_PATH = originalEnv;
} else {
delete process.env.MEMORY_FILE_PATH;
}
// Clean up test files
try {
await fs.unlink(oldMemoryPath);
} catch {
// Ignore if file doesn't exist
}
try {
await fs.unlink(newMemoryPath);
} catch {
// Ignore if file doesn't exist
}
});
describe('with MEMORY_FILE_PATH environment variable', () => {
it('should return absolute path when MEMORY_FILE_PATH is absolute', async () => {
const absolutePath = '/tmp/custom-memory.jsonl';
process.env.MEMORY_FILE_PATH = absolutePath;
const result = await ensureMemoryFilePath();
expect(result).toBe(absolutePath);
});
it('should convert relative path to absolute when MEMORY_FILE_PATH is relative', async () => {
const relativePath = 'custom-memory.jsonl';
process.env.MEMORY_FILE_PATH = relativePath;
const result = await ensureMemoryFilePath();
expect(path.isAbsolute(result)).toBe(true);
expect(result).toContain('custom-memory.jsonl');
});
it('should handle Windows absolute paths', async () => {
const windowsPath = 'C:\\temp\\memory.jsonl';
process.env.MEMORY_FILE_PATH = windowsPath;
const result = await ensureMemoryFilePath();
// On Windows, should return as-is; on Unix, will be treated as relative
if (process.platform === 'win32') {
expect(result).toBe(windowsPath);
} else {
expect(path.isAbsolute(result)).toBe(true);
}
});
});
describe('without MEMORY_FILE_PATH environment variable', () => {
it('should return default path when no files exist', async () => {
const result = await ensureMemoryFilePath();
expect(result).toBe(defaultMemoryPath);
});
it('should migrate from memory.json to memory.jsonl when only old file exists', async () => {
// Create old memory.json file
await fs.writeFile(oldMemoryPath, '{"test":"data"}');
const consoleErrorSpy = vi.spyOn(console, 'error').mockImplementation(() => {});
const result = await ensureMemoryFilePath();
expect(result).toBe(defaultMemoryPath);
// Verify migration happened
const newFileExists = await fs.access(newMemoryPath).then(() => true).catch(() => false);
const oldFileExists = await fs.access(oldMemoryPath).then(() => true).catch(() => false);
expect(newFileExists).toBe(true);
expect(oldFileExists).toBe(false);
// Verify console messages
expect(consoleErrorSpy).toHaveBeenCalledWith(
expect.stringContaining('DETECTED: Found legacy memory.json file')
);
expect(consoleErrorSpy).toHaveBeenCalledWith(
expect.stringContaining('COMPLETED: Successfully migrated')
);
consoleErrorSpy.mockRestore();
});
it('should use new file when both old and new files exist', async () => {
// Create both files
await fs.writeFile(oldMemoryPath, '{"old":"data"}');
await fs.writeFile(newMemoryPath, '{"new":"data"}');
const consoleErrorSpy = vi.spyOn(console, 'error').mockImplementation(() => {});
const result = await ensureMemoryFilePath();
expect(result).toBe(defaultMemoryPath);
// Verify no migration happened (both files should still exist)
const newFileExists = await fs.access(newMemoryPath).then(() => true).catch(() => false);
const oldFileExists = await fs.access(oldMemoryPath).then(() => true).catch(() => false);
expect(newFileExists).toBe(true);
expect(oldFileExists).toBe(true);
// Verify no console messages about migration
expect(consoleErrorSpy).not.toHaveBeenCalled();
consoleErrorSpy.mockRestore();
});
it('should preserve file content during migration', async () => {
const testContent = '{"entities": [{"name": "test", "type": "person"}]}';
await fs.writeFile(oldMemoryPath, testContent);
await ensureMemoryFilePath();
const migratedContent = await fs.readFile(newMemoryPath, 'utf-8');
expect(migratedContent).toBe(testContent);
});
});
describe('defaultMemoryPath', () => {
it('should end with memory.jsonl', () => {
expect(defaultMemoryPath).toMatch(/memory\.jsonl$/);
});
it('should be an absolute path', () => {
expect(path.isAbsolute(defaultMemoryPath)).toBe(true);
});
});
});

View File

@@ -0,0 +1,483 @@
import { describe, it, expect, beforeEach, afterEach } from 'vitest';
import { promises as fs } from 'fs';
import path from 'path';
import { fileURLToPath } from 'url';
import { KnowledgeGraphManager, Entity, Relation, KnowledgeGraph } from '../index.js';
describe('KnowledgeGraphManager', () => {
let manager: KnowledgeGraphManager;
let testFilePath: string;
beforeEach(async () => {
// Create a temporary test file path
testFilePath = path.join(
path.dirname(fileURLToPath(import.meta.url)),
`test-memory-${Date.now()}.jsonl`
);
manager = new KnowledgeGraphManager(testFilePath);
});
afterEach(async () => {
// Clean up test file
try {
await fs.unlink(testFilePath);
} catch (error) {
// Ignore errors if file doesn't exist
}
});
describe('createEntities', () => {
it('should create new entities', async () => {
const entities: Entity[] = [
{ name: 'Alice', entityType: 'person', observations: ['works at Acme Corp'] },
{ name: 'Bob', entityType: 'person', observations: ['likes programming'] },
];
const newEntities = await manager.createEntities(entities);
expect(newEntities).toHaveLength(2);
expect(newEntities).toEqual(entities);
const graph = await manager.readGraph();
expect(graph.entities).toHaveLength(2);
});
it('should not create duplicate entities', async () => {
const entities: Entity[] = [
{ name: 'Alice', entityType: 'person', observations: ['works at Acme Corp'] },
];
await manager.createEntities(entities);
const newEntities = await manager.createEntities(entities);
expect(newEntities).toHaveLength(0);
const graph = await manager.readGraph();
expect(graph.entities).toHaveLength(1);
});
it('should handle empty entity arrays', async () => {
const newEntities = await manager.createEntities([]);
expect(newEntities).toHaveLength(0);
});
});
describe('createRelations', () => {
it('should create new relations', async () => {
await manager.createEntities([
{ name: 'Alice', entityType: 'person', observations: [] },
{ name: 'Bob', entityType: 'person', observations: [] },
]);
const relations: Relation[] = [
{ from: 'Alice', to: 'Bob', relationType: 'knows' },
];
const newRelations = await manager.createRelations(relations);
expect(newRelations).toHaveLength(1);
expect(newRelations).toEqual(relations);
const graph = await manager.readGraph();
expect(graph.relations).toHaveLength(1);
});
it('should not create duplicate relations', async () => {
await manager.createEntities([
{ name: 'Alice', entityType: 'person', observations: [] },
{ name: 'Bob', entityType: 'person', observations: [] },
]);
const relations: Relation[] = [
{ from: 'Alice', to: 'Bob', relationType: 'knows' },
];
await manager.createRelations(relations);
const newRelations = await manager.createRelations(relations);
expect(newRelations).toHaveLength(0);
const graph = await manager.readGraph();
expect(graph.relations).toHaveLength(1);
});
it('should handle empty relation arrays', async () => {
const newRelations = await manager.createRelations([]);
expect(newRelations).toHaveLength(0);
});
});
describe('addObservations', () => {
it('should add observations to existing entities', async () => {
await manager.createEntities([
{ name: 'Alice', entityType: 'person', observations: ['works at Acme Corp'] },
]);
const results = await manager.addObservations([
{ entityName: 'Alice', contents: ['likes coffee', 'has a dog'] },
]);
expect(results).toHaveLength(1);
expect(results[0].entityName).toBe('Alice');
expect(results[0].addedObservations).toHaveLength(2);
const graph = await manager.readGraph();
const alice = graph.entities.find(e => e.name === 'Alice');
expect(alice?.observations).toHaveLength(3);
});
it('should not add duplicate observations', async () => {
await manager.createEntities([
{ name: 'Alice', entityType: 'person', observations: ['works at Acme Corp'] },
]);
await manager.addObservations([
{ entityName: 'Alice', contents: ['likes coffee'] },
]);
const results = await manager.addObservations([
{ entityName: 'Alice', contents: ['likes coffee', 'has a dog'] },
]);
expect(results[0].addedObservations).toHaveLength(1);
expect(results[0].addedObservations).toContain('has a dog');
const graph = await manager.readGraph();
const alice = graph.entities.find(e => e.name === 'Alice');
expect(alice?.observations).toHaveLength(3);
});
it('should throw error for non-existent entity', async () => {
await expect(
manager.addObservations([
{ entityName: 'NonExistent', contents: ['some observation'] },
])
).rejects.toThrow('Entity with name NonExistent not found');
});
});
describe('deleteEntities', () => {
it('should delete entities', async () => {
await manager.createEntities([
{ name: 'Alice', entityType: 'person', observations: [] },
{ name: 'Bob', entityType: 'person', observations: [] },
]);
await manager.deleteEntities(['Alice']);
const graph = await manager.readGraph();
expect(graph.entities).toHaveLength(1);
expect(graph.entities[0].name).toBe('Bob');
});
it('should cascade delete relations when deleting entities', async () => {
await manager.createEntities([
{ name: 'Alice', entityType: 'person', observations: [] },
{ name: 'Bob', entityType: 'person', observations: [] },
{ name: 'Charlie', entityType: 'person', observations: [] },
]);
await manager.createRelations([
{ from: 'Alice', to: 'Bob', relationType: 'knows' },
{ from: 'Bob', to: 'Charlie', relationType: 'knows' },
]);
await manager.deleteEntities(['Bob']);
const graph = await manager.readGraph();
expect(graph.entities).toHaveLength(2);
expect(graph.relations).toHaveLength(0);
});
it('should handle deleting non-existent entities', async () => {
await manager.deleteEntities(['NonExistent']);
const graph = await manager.readGraph();
expect(graph.entities).toHaveLength(0);
});
});
describe('deleteObservations', () => {
it('should delete observations from entities', async () => {
await manager.createEntities([
{ name: 'Alice', entityType: 'person', observations: ['works at Acme Corp', 'likes coffee'] },
]);
await manager.deleteObservations([
{ entityName: 'Alice', observations: ['likes coffee'] },
]);
const graph = await manager.readGraph();
const alice = graph.entities.find(e => e.name === 'Alice');
expect(alice?.observations).toHaveLength(1);
expect(alice?.observations).toContain('works at Acme Corp');
});
it('should handle deleting from non-existent entities', async () => {
await manager.deleteObservations([
{ entityName: 'NonExistent', observations: ['some observation'] },
]);
// Should not throw error
const graph = await manager.readGraph();
expect(graph.entities).toHaveLength(0);
});
});
describe('deleteRelations', () => {
it('should delete specific relations', async () => {
await manager.createEntities([
{ name: 'Alice', entityType: 'person', observations: [] },
{ name: 'Bob', entityType: 'person', observations: [] },
]);
await manager.createRelations([
{ from: 'Alice', to: 'Bob', relationType: 'knows' },
{ from: 'Alice', to: 'Bob', relationType: 'works_with' },
]);
await manager.deleteRelations([
{ from: 'Alice', to: 'Bob', relationType: 'knows' },
]);
const graph = await manager.readGraph();
expect(graph.relations).toHaveLength(1);
expect(graph.relations[0].relationType).toBe('works_with');
});
});
describe('readGraph', () => {
it('should return empty graph when file does not exist', async () => {
const graph = await manager.readGraph();
expect(graph.entities).toHaveLength(0);
expect(graph.relations).toHaveLength(0);
});
it('should return complete graph with entities and relations', async () => {
await manager.createEntities([
{ name: 'Alice', entityType: 'person', observations: ['works at Acme Corp'] },
]);
await manager.createRelations([
{ from: 'Alice', to: 'Alice', relationType: 'self' },
]);
const graph = await manager.readGraph();
expect(graph.entities).toHaveLength(1);
expect(graph.relations).toHaveLength(1);
});
});
describe('searchNodes', () => {
beforeEach(async () => {
await manager.createEntities([
{ name: 'Alice', entityType: 'person', observations: ['works at Acme Corp', 'likes programming'] },
{ name: 'Bob', entityType: 'person', observations: ['works at TechCo'] },
{ name: 'Acme Corp', entityType: 'company', observations: ['tech company'] },
]);
await manager.createRelations([
{ from: 'Alice', to: 'Acme Corp', relationType: 'works_at' },
{ from: 'Bob', to: 'Acme Corp', relationType: 'competitor' },
]);
});
it('should search by entity name', async () => {
const result = await manager.searchNodes('Alice');
expect(result.entities).toHaveLength(1);
expect(result.entities[0].name).toBe('Alice');
});
it('should search by entity type', async () => {
const result = await manager.searchNodes('company');
expect(result.entities).toHaveLength(1);
expect(result.entities[0].name).toBe('Acme Corp');
});
it('should search by observation content', async () => {
const result = await manager.searchNodes('programming');
expect(result.entities).toHaveLength(1);
expect(result.entities[0].name).toBe('Alice');
});
it('should be case insensitive', async () => {
const result = await manager.searchNodes('ALICE');
expect(result.entities).toHaveLength(1);
expect(result.entities[0].name).toBe('Alice');
});
it('should include relations between matched entities', async () => {
const result = await manager.searchNodes('Acme');
expect(result.entities).toHaveLength(2); // Alice and Acme Corp
expect(result.relations).toHaveLength(1); // Only Alice -> Acme Corp relation
});
it('should return empty graph for no matches', async () => {
const result = await manager.searchNodes('NonExistent');
expect(result.entities).toHaveLength(0);
expect(result.relations).toHaveLength(0);
});
});
describe('openNodes', () => {
beforeEach(async () => {
await manager.createEntities([
{ name: 'Alice', entityType: 'person', observations: [] },
{ name: 'Bob', entityType: 'person', observations: [] },
{ name: 'Charlie', entityType: 'person', observations: [] },
]);
await manager.createRelations([
{ from: 'Alice', to: 'Bob', relationType: 'knows' },
{ from: 'Bob', to: 'Charlie', relationType: 'knows' },
]);
});
it('should open specific nodes by name', async () => {
const result = await manager.openNodes(['Alice', 'Bob']);
expect(result.entities).toHaveLength(2);
expect(result.entities.map(e => e.name)).toContain('Alice');
expect(result.entities.map(e => e.name)).toContain('Bob');
});
it('should include relations between opened nodes', async () => {
const result = await manager.openNodes(['Alice', 'Bob']);
expect(result.relations).toHaveLength(1);
expect(result.relations[0].from).toBe('Alice');
expect(result.relations[0].to).toBe('Bob');
});
it('should exclude relations to unopened nodes', async () => {
const result = await manager.openNodes(['Bob']);
expect(result.relations).toHaveLength(0);
});
it('should handle opening non-existent nodes', async () => {
const result = await manager.openNodes(['NonExistent']);
expect(result.entities).toHaveLength(0);
});
it('should handle empty node list', async () => {
const result = await manager.openNodes([]);
expect(result.entities).toHaveLength(0);
expect(result.relations).toHaveLength(0);
});
});
describe('file persistence', () => {
it('should persist data across manager instances', async () => {
await manager.createEntities([
{ name: 'Alice', entityType: 'person', observations: ['persistent data'] },
]);
// Create new manager instance with same file path
const manager2 = new KnowledgeGraphManager(testFilePath);
const graph = await manager2.readGraph();
expect(graph.entities).toHaveLength(1);
expect(graph.entities[0].name).toBe('Alice');
});
it('should handle JSONL format correctly', async () => {
await manager.createEntities([
{ name: 'Alice', entityType: 'person', observations: [] },
]);
await manager.createRelations([
{ from: 'Alice', to: 'Alice', relationType: 'self' },
]);
// Read file directly
const fileContent = await fs.readFile(testFilePath, 'utf-8');
const lines = fileContent.split('\n').filter(line => line.trim());
expect(lines).toHaveLength(2);
expect(JSON.parse(lines[0])).toHaveProperty('type', 'entity');
expect(JSON.parse(lines[1])).toHaveProperty('type', 'relation');
});
it('should strip type field from entities when loading from file', async () => {
// Create entities and relations (these get saved with type field)
await manager.createEntities([
{ name: 'Alice', entityType: 'person', observations: ['test observation'] },
{ name: 'Bob', entityType: 'person', observations: [] },
]);
await manager.createRelations([
{ from: 'Alice', to: 'Bob', relationType: 'knows' },
]);
// Verify file contains type field (order may vary)
const fileContent = await fs.readFile(testFilePath, 'utf-8');
const fileLines = fileContent.split('\n').filter(line => line.trim());
const fileItems = fileLines.map(line => JSON.parse(line));
const fileEntity = fileItems.find(item => item.type === 'entity');
const fileRelation = fileItems.find(item => item.type === 'relation');
expect(fileEntity).toBeDefined();
expect(fileEntity).toHaveProperty('type', 'entity');
expect(fileRelation).toBeDefined();
expect(fileRelation).toHaveProperty('type', 'relation');
// Create new manager instance to force reload from file
const manager2 = new KnowledgeGraphManager(testFilePath);
const graph = await manager2.readGraph();
// Verify loaded entities don't have type field
expect(graph.entities).toHaveLength(2);
graph.entities.forEach(entity => {
expect(entity).not.toHaveProperty('type');
expect(entity).toHaveProperty('name');
expect(entity).toHaveProperty('entityType');
expect(entity).toHaveProperty('observations');
});
// Verify loaded relations don't have type field
expect(graph.relations).toHaveLength(1);
graph.relations.forEach(relation => {
expect(relation).not.toHaveProperty('type');
expect(relation).toHaveProperty('from');
expect(relation).toHaveProperty('to');
expect(relation).toHaveProperty('relationType');
});
});
it('should strip type field from searchNodes results', async () => {
await manager.createEntities([
{ name: 'Alice', entityType: 'person', observations: ['works at Acme'] },
]);
await manager.createRelations([
{ from: 'Alice', to: 'Alice', relationType: 'self' },
]);
// Create new manager instance to force reload from file
const manager2 = new KnowledgeGraphManager(testFilePath);
const result = await manager2.searchNodes('Alice');
// Verify search results don't have type field
expect(result.entities).toHaveLength(1);
expect(result.entities[0]).not.toHaveProperty('type');
expect(result.entities[0].name).toBe('Alice');
expect(result.relations).toHaveLength(1);
expect(result.relations[0]).not.toHaveProperty('type');
expect(result.relations[0].from).toBe('Alice');
});
it('should strip type field from openNodes results', async () => {
await manager.createEntities([
{ name: 'Alice', entityType: 'person', observations: [] },
{ name: 'Bob', entityType: 'person', observations: [] },
]);
await manager.createRelations([
{ from: 'Alice', to: 'Bob', relationType: 'knows' },
]);
// Create new manager instance to force reload from file
const manager2 = new KnowledgeGraphManager(testFilePath);
const result = await manager2.openNodes(['Alice', 'Bob']);
// Verify open results don't have type field
expect(result.entities).toHaveLength(2);
result.entities.forEach(entity => {
expect(entity).not.toHaveProperty('type');
});
expect(result.relations).toHaveLength(1);
expect(result.relations[0]).not.toHaveProperty('type');
});
});
});

View File

@@ -1,53 +1,93 @@
#!/usr/bin/env node
import { Server } from "@modelcontextprotocol/sdk/server/index.js";
import { McpServer } from "@modelcontextprotocol/sdk/server/mcp.js";
import { StdioServerTransport } from "@modelcontextprotocol/sdk/server/stdio.js";
import {
CallToolRequestSchema,
ListToolsRequestSchema,
} from "@modelcontextprotocol/sdk/types.js";
import { z } from "zod";
import { promises as fs } from 'fs';
import path from 'path';
import { fileURLToPath } from 'url';
// Define memory file path using environment variable with fallback
const defaultMemoryPath = path.join(path.dirname(fileURLToPath(import.meta.url)), 'memory.json');
export const defaultMemoryPath = path.join(path.dirname(fileURLToPath(import.meta.url)), 'memory.jsonl');
// If MEMORY_FILE_PATH is just a filename, put it in the same directory as the script
const MEMORY_FILE_PATH = process.env.MEMORY_FILE_PATH
? path.isAbsolute(process.env.MEMORY_FILE_PATH)
? process.env.MEMORY_FILE_PATH
: path.join(path.dirname(fileURLToPath(import.meta.url)), process.env.MEMORY_FILE_PATH)
: defaultMemoryPath;
// Handle backward compatibility: migrate memory.json to memory.jsonl if needed
export async function ensureMemoryFilePath(): Promise<string> {
if (process.env.MEMORY_FILE_PATH) {
// Custom path provided, use it as-is (with absolute path resolution)
return path.isAbsolute(process.env.MEMORY_FILE_PATH)
? process.env.MEMORY_FILE_PATH
: path.join(path.dirname(fileURLToPath(import.meta.url)), process.env.MEMORY_FILE_PATH);
}
// No custom path set, check for backward compatibility migration
const oldMemoryPath = path.join(path.dirname(fileURLToPath(import.meta.url)), 'memory.json');
const newMemoryPath = defaultMemoryPath;
try {
// Check if old file exists and new file doesn't
await fs.access(oldMemoryPath);
try {
await fs.access(newMemoryPath);
// Both files exist, use new one (no migration needed)
return newMemoryPath;
} catch {
// Old file exists, new file doesn't - migrate
console.error('DETECTED: Found legacy memory.json file, migrating to memory.jsonl for JSONL format compatibility');
await fs.rename(oldMemoryPath, newMemoryPath);
console.error('COMPLETED: Successfully migrated memory.json to memory.jsonl');
return newMemoryPath;
}
} catch {
// Old file doesn't exist, use new path
return newMemoryPath;
}
}
// Initialize memory file path (will be set during startup)
let MEMORY_FILE_PATH: string;
// We are storing our memory using entities, relations, and observations in a graph structure
interface Entity {
export interface Entity {
name: string;
entityType: string;
observations: string[];
}
interface Relation {
export interface Relation {
from: string;
to: string;
relationType: string;
}
interface KnowledgeGraph {
export interface KnowledgeGraph {
entities: Entity[];
relations: Relation[];
}
// The KnowledgeGraphManager class contains all operations to interact with the knowledge graph
class KnowledgeGraphManager {
export class KnowledgeGraphManager {
constructor(private memoryFilePath: string) {}
private async loadGraph(): Promise<KnowledgeGraph> {
try {
const data = await fs.readFile(MEMORY_FILE_PATH, "utf-8");
const data = await fs.readFile(this.memoryFilePath, "utf-8");
const lines = data.split("\n").filter(line => line.trim() !== "");
return lines.reduce((graph: KnowledgeGraph, line) => {
const item = JSON.parse(line);
if (item.type === "entity") graph.entities.push(item as Entity);
if (item.type === "relation") graph.relations.push(item as Relation);
if (item.type === "entity") {
graph.entities.push({
name: item.name,
entityType: item.entityType,
observations: item.observations
});
}
if (item.type === "relation") {
graph.relations.push({
from: item.from,
to: item.to,
relationType: item.relationType
});
}
return graph;
}, { entities: [], relations: [] });
} catch (error) {
@@ -60,10 +100,20 @@ class KnowledgeGraphManager {
private async saveGraph(graph: KnowledgeGraph): Promise<void> {
const lines = [
...graph.entities.map(e => JSON.stringify({ type: "entity", ...e })),
...graph.relations.map(r => JSON.stringify({ type: "relation", ...r })),
...graph.entities.map(e => JSON.stringify({
type: "entity",
name: e.name,
entityType: e.entityType,
observations: e.observations
})),
...graph.relations.map(r => JSON.stringify({
type: "relation",
from: r.from,
to: r.to,
relationType: r.relationType
})),
];
await fs.writeFile(MEMORY_FILE_PATH, lines.join("\n"));
await fs.writeFile(this.memoryFilePath, lines.join("\n"));
}
async createEntities(entities: Entity[]): Promise<Entity[]> {
@@ -183,233 +233,245 @@ class KnowledgeGraphManager {
}
}
const knowledgeGraphManager = new KnowledgeGraphManager();
let knowledgeGraphManager: KnowledgeGraphManager;
// Zod schemas for entities and relations
const EntitySchema = z.object({
name: z.string().describe("The name of the entity"),
entityType: z.string().describe("The type of the entity"),
observations: z.array(z.string()).describe("An array of observation contents associated with the entity")
});
const RelationSchema = z.object({
from: z.string().describe("The name of the entity where the relation starts"),
to: z.string().describe("The name of the entity where the relation ends"),
relationType: z.string().describe("The type of the relation")
});
// The server instance and tools exposed to Claude
const server = new Server({
const server = new McpServer({
name: "memory-server",
version: "0.6.3",
}, {
capabilities: {
tools: {},
});
// Register create_entities tool
server.registerTool(
"create_entities",
{
title: "Create Entities",
description: "Create multiple new entities in the knowledge graph",
inputSchema: {
entities: z.array(EntitySchema)
},
},);
server.setRequestHandler(ListToolsRequestSchema, async () => {
return {
tools: [
{
name: "create_entities",
description: "Create multiple new entities in the knowledge graph",
inputSchema: {
type: "object",
properties: {
entities: {
type: "array",
items: {
type: "object",
properties: {
name: { type: "string", description: "The name of the entity" },
entityType: { type: "string", description: "The type of the entity" },
observations: {
type: "array",
items: { type: "string" },
description: "An array of observation contents associated with the entity"
},
},
required: ["name", "entityType", "observations"],
},
},
},
required: ["entities"],
},
},
{
name: "create_relations",
description: "Create multiple new relations between entities in the knowledge graph. Relations should be in active voice",
inputSchema: {
type: "object",
properties: {
relations: {
type: "array",
items: {
type: "object",
properties: {
from: { type: "string", description: "The name of the entity where the relation starts" },
to: { type: "string", description: "The name of the entity where the relation ends" },
relationType: { type: "string", description: "The type of the relation" },
},
required: ["from", "to", "relationType"],
},
},
},
required: ["relations"],
},
},
{
name: "add_observations",
description: "Add new observations to existing entities in the knowledge graph",
inputSchema: {
type: "object",
properties: {
observations: {
type: "array",
items: {
type: "object",
properties: {
entityName: { type: "string", description: "The name of the entity to add the observations to" },
contents: {
type: "array",
items: { type: "string" },
description: "An array of observation contents to add"
},
},
required: ["entityName", "contents"],
},
},
},
required: ["observations"],
},
},
{
name: "delete_entities",
description: "Delete multiple entities and their associated relations from the knowledge graph",
inputSchema: {
type: "object",
properties: {
entityNames: {
type: "array",
items: { type: "string" },
description: "An array of entity names to delete"
},
},
required: ["entityNames"],
},
},
{
name: "delete_observations",
description: "Delete specific observations from entities in the knowledge graph",
inputSchema: {
type: "object",
properties: {
deletions: {
type: "array",
items: {
type: "object",
properties: {
entityName: { type: "string", description: "The name of the entity containing the observations" },
observations: {
type: "array",
items: { type: "string" },
description: "An array of observations to delete"
},
},
required: ["entityName", "observations"],
},
},
},
required: ["deletions"],
},
},
{
name: "delete_relations",
description: "Delete multiple relations from the knowledge graph",
inputSchema: {
type: "object",
properties: {
relations: {
type: "array",
items: {
type: "object",
properties: {
from: { type: "string", description: "The name of the entity where the relation starts" },
to: { type: "string", description: "The name of the entity where the relation ends" },
relationType: { type: "string", description: "The type of the relation" },
},
required: ["from", "to", "relationType"],
},
description: "An array of relations to delete"
},
},
required: ["relations"],
},
},
{
name: "read_graph",
description: "Read the entire knowledge graph",
inputSchema: {
type: "object",
properties: {},
},
},
{
name: "search_nodes",
description: "Search for nodes in the knowledge graph based on a query",
inputSchema: {
type: "object",
properties: {
query: { type: "string", description: "The search query to match against entity names, types, and observation content" },
},
required: ["query"],
},
},
{
name: "open_nodes",
description: "Open specific nodes in the knowledge graph by their names",
inputSchema: {
type: "object",
properties: {
names: {
type: "array",
items: { type: "string" },
description: "An array of entity names to retrieve",
},
},
required: ["names"],
},
},
],
};
});
server.setRequestHandler(CallToolRequestSchema, async (request) => {
const { name, arguments: args } = request.params;
if (name === "read_graph") {
return { content: [{ type: "text", text: JSON.stringify(await knowledgeGraphManager.readGraph(), null, 2) }] };
outputSchema: {
entities: z.array(EntitySchema)
}
},
async ({ entities }) => {
const result = await knowledgeGraphManager.createEntities(entities);
return {
content: [{ type: "text" as const, text: JSON.stringify(result, null, 2) }],
structuredContent: { entities: result }
};
}
);
if (!args) {
throw new Error(`No arguments provided for tool: ${name}`);
// Register create_relations tool
server.registerTool(
"create_relations",
{
title: "Create Relations",
description: "Create multiple new relations between entities in the knowledge graph. Relations should be in active voice",
inputSchema: {
relations: z.array(RelationSchema)
},
outputSchema: {
relations: z.array(RelationSchema)
}
},
async ({ relations }) => {
const result = await knowledgeGraphManager.createRelations(relations);
return {
content: [{ type: "text" as const, text: JSON.stringify(result, null, 2) }],
structuredContent: { relations: result }
};
}
);
switch (name) {
case "create_entities":
return { content: [{ type: "text", text: JSON.stringify(await knowledgeGraphManager.createEntities(args.entities as Entity[]), null, 2) }] };
case "create_relations":
return { content: [{ type: "text", text: JSON.stringify(await knowledgeGraphManager.createRelations(args.relations as Relation[]), null, 2) }] };
case "add_observations":
return { content: [{ type: "text", text: JSON.stringify(await knowledgeGraphManager.addObservations(args.observations as { entityName: string; contents: string[] }[]), null, 2) }] };
case "delete_entities":
await knowledgeGraphManager.deleteEntities(args.entityNames as string[]);
return { content: [{ type: "text", text: "Entities deleted successfully" }] };
case "delete_observations":
await knowledgeGraphManager.deleteObservations(args.deletions as { entityName: string; observations: string[] }[]);
return { content: [{ type: "text", text: "Observations deleted successfully" }] };
case "delete_relations":
await knowledgeGraphManager.deleteRelations(args.relations as Relation[]);
return { content: [{ type: "text", text: "Relations deleted successfully" }] };
case "search_nodes":
return { content: [{ type: "text", text: JSON.stringify(await knowledgeGraphManager.searchNodes(args.query as string), null, 2) }] };
case "open_nodes":
return { content: [{ type: "text", text: JSON.stringify(await knowledgeGraphManager.openNodes(args.names as string[]), null, 2) }] };
default:
throw new Error(`Unknown tool: ${name}`);
// Register add_observations tool
server.registerTool(
"add_observations",
{
title: "Add Observations",
description: "Add new observations to existing entities in the knowledge graph",
inputSchema: {
observations: z.array(z.object({
entityName: z.string().describe("The name of the entity to add the observations to"),
contents: z.array(z.string()).describe("An array of observation contents to add")
}))
},
outputSchema: {
results: z.array(z.object({
entityName: z.string(),
addedObservations: z.array(z.string())
}))
}
},
async ({ observations }) => {
const result = await knowledgeGraphManager.addObservations(observations);
return {
content: [{ type: "text" as const, text: JSON.stringify(result, null, 2) }],
structuredContent: { results: result }
};
}
});
);
// Register delete_entities tool
server.registerTool(
"delete_entities",
{
title: "Delete Entities",
description: "Delete multiple entities and their associated relations from the knowledge graph",
inputSchema: {
entityNames: z.array(z.string()).describe("An array of entity names to delete")
},
outputSchema: {
success: z.boolean(),
message: z.string()
}
},
async ({ entityNames }) => {
await knowledgeGraphManager.deleteEntities(entityNames);
return {
content: [{ type: "text" as const, text: "Entities deleted successfully" }],
structuredContent: { success: true, message: "Entities deleted successfully" }
};
}
);
// Register delete_observations tool
server.registerTool(
"delete_observations",
{
title: "Delete Observations",
description: "Delete specific observations from entities in the knowledge graph",
inputSchema: {
deletions: z.array(z.object({
entityName: z.string().describe("The name of the entity containing the observations"),
observations: z.array(z.string()).describe("An array of observations to delete")
}))
},
outputSchema: {
success: z.boolean(),
message: z.string()
}
},
async ({ deletions }) => {
await knowledgeGraphManager.deleteObservations(deletions);
return {
content: [{ type: "text" as const, text: "Observations deleted successfully" }],
structuredContent: { success: true, message: "Observations deleted successfully" }
};
}
);
// Register delete_relations tool
server.registerTool(
"delete_relations",
{
title: "Delete Relations",
description: "Delete multiple relations from the knowledge graph",
inputSchema: {
relations: z.array(RelationSchema).describe("An array of relations to delete")
},
outputSchema: {
success: z.boolean(),
message: z.string()
}
},
async ({ relations }) => {
await knowledgeGraphManager.deleteRelations(relations);
return {
content: [{ type: "text" as const, text: "Relations deleted successfully" }],
structuredContent: { success: true, message: "Relations deleted successfully" }
};
}
);
// Register read_graph tool
server.registerTool(
"read_graph",
{
title: "Read Graph",
description: "Read the entire knowledge graph",
inputSchema: {},
outputSchema: {
entities: z.array(EntitySchema),
relations: z.array(RelationSchema)
}
},
async () => {
const graph = await knowledgeGraphManager.readGraph();
return {
content: [{ type: "text" as const, text: JSON.stringify(graph, null, 2) }],
structuredContent: { ...graph }
};
}
);
// Register search_nodes tool
server.registerTool(
"search_nodes",
{
title: "Search Nodes",
description: "Search for nodes in the knowledge graph based on a query",
inputSchema: {
query: z.string().describe("The search query to match against entity names, types, and observation content")
},
outputSchema: {
entities: z.array(EntitySchema),
relations: z.array(RelationSchema)
}
},
async ({ query }) => {
const graph = await knowledgeGraphManager.searchNodes(query);
return {
content: [{ type: "text" as const, text: JSON.stringify(graph, null, 2) }],
structuredContent: { ...graph }
};
}
);
// Register open_nodes tool
server.registerTool(
"open_nodes",
{
title: "Open Nodes",
description: "Open specific nodes in the knowledge graph by their names",
inputSchema: {
names: z.array(z.string()).describe("An array of entity names to retrieve")
},
outputSchema: {
entities: z.array(EntitySchema),
relations: z.array(RelationSchema)
}
},
async ({ names }) => {
const graph = await knowledgeGraphManager.openNodes(names);
return {
content: [{ type: "text" as const, text: JSON.stringify(graph, null, 2) }],
structuredContent: { ...graph }
};
}
);
async function main() {
// Initialize memory file path with backward compatibility
MEMORY_FILE_PATH = await ensureMemoryFilePath();
// Initialize knowledge graph manager with the memory file path
knowledgeGraphManager = new KnowledgeGraphManager(MEMORY_FILE_PATH);
const transport = new StdioServerTransport();
await server.connect(transport);
console.error("Knowledge Graph MCP Server running on stdio");

View File

@@ -3,9 +3,14 @@
"version": "0.6.3",
"description": "MCP server for enabling memory for Claude through a knowledge graph",
"license": "MIT",
"mcpName": "io.github.modelcontextprotocol/server-memory",
"author": "Anthropic, PBC (https://anthropic.com)",
"homepage": "https://modelcontextprotocol.io",
"bugs": "https://github.com/modelcontextprotocol/servers/issues",
"repository": {
"type": "git",
"url": "https://github.com/modelcontextprotocol/servers.git"
},
"type": "module",
"bin": {
"mcp-server-memory": "dist/index.js"
@@ -16,14 +21,17 @@
"scripts": {
"build": "tsc && shx chmod +x dist/*.js",
"prepare": "npm run build",
"watch": "tsc --watch"
"watch": "tsc --watch",
"test": "vitest run --coverage"
},
"dependencies": {
"@modelcontextprotocol/sdk": "1.0.1"
"@modelcontextprotocol/sdk": "^1.25.2"
},
"devDependencies": {
"@types/node": "^22",
"@vitest/coverage-v8": "^2.1.8",
"shx": "^0.3.4",
"typescript": "^5.6.2"
"typescript": "^5.6.2",
"vitest": "^2.1.8"
}
}

View File

@@ -1,11 +1,14 @@
{
"extends": "../../tsconfig.json",
"compilerOptions": {
"outDir": "./dist",
"rootDir": "."
},
"include": [
"./**/*.ts"
]
}
"extends": "../../tsconfig.json",
"compilerOptions": {
"outDir": "./dist",
"rootDir": "."
},
"include": [
"./**/*.ts"
],
"exclude": [
"**/*.test.ts",
"vitest.config.ts"
]
}

View File

@@ -0,0 +1,14 @@
import { defineConfig } from 'vitest/config';
export default defineConfig({
test: {
globals: true,
environment: 'node',
include: ['**/__tests__/**/*.test.ts'],
coverage: {
provider: 'v8',
include: ['**/*.ts'],
exclude: ['**/__tests__/**', '**/dist/**'],
},
},
});

View File

@@ -96,7 +96,7 @@ Add the configuration to your user-level MCP configuration file. Open the Comman
**Method 2: Workspace Configuration**
Alternatively, you can add the configuration to a file called `.vscode/mcp.json` in your workspace. This will allow you to share the configuration with others.
> For more details about MCP configuration in VS Code, see the [official VS Code MCP documentation](https://code.visualstudio.com/docs/copilot/mcp).
> For more details about MCP configuration in VS Code, see the [official VS Code MCP documentation](https://code.visualstudio.com/docs/copilot/customization/mcp-servers).
For NPX installation:
@@ -132,6 +132,16 @@ For Docker installation:
}
```
### Usage with Codex CLI
Run the following:
#### npx
```bash
codex mcp add sequential-thinking npx -y @modelcontextprotocol/server-sequential-thinking
```
## Building
Docker:

View File

@@ -0,0 +1,308 @@
import { describe, it, expect, beforeEach, afterEach, vi } from 'vitest';
import { SequentialThinkingServer, ThoughtData } from '../lib.js';
// Mock chalk to avoid ESM issues
vi.mock('chalk', () => {
const chalkMock = {
yellow: (str: string) => str,
green: (str: string) => str,
blue: (str: string) => str,
};
return {
default: chalkMock,
};
});
describe('SequentialThinkingServer', () => {
let server: SequentialThinkingServer;
beforeEach(() => {
// Disable thought logging for tests
process.env.DISABLE_THOUGHT_LOGGING = 'true';
server = new SequentialThinkingServer();
});
// Note: Input validation tests removed - validation now happens at the tool
// registration layer via Zod schemas before processThought is called
describe('processThought - valid inputs', () => {
it('should accept valid basic thought', () => {
const input = {
thought: 'This is my first thought',
thoughtNumber: 1,
totalThoughts: 3,
nextThoughtNeeded: true
};
const result = server.processThought(input);
expect(result.isError).toBeUndefined();
const data = JSON.parse(result.content[0].text);
expect(data.thoughtNumber).toBe(1);
expect(data.totalThoughts).toBe(3);
expect(data.nextThoughtNeeded).toBe(true);
expect(data.thoughtHistoryLength).toBe(1);
});
it('should accept thought with optional fields', () => {
const input = {
thought: 'Revising my earlier idea',
thoughtNumber: 2,
totalThoughts: 3,
nextThoughtNeeded: true,
isRevision: true,
revisesThought: 1,
needsMoreThoughts: false
};
const result = server.processThought(input);
expect(result.isError).toBeUndefined();
const data = JSON.parse(result.content[0].text);
expect(data.thoughtNumber).toBe(2);
expect(data.thoughtHistoryLength).toBe(1);
});
it('should track multiple thoughts in history', () => {
const input1 = {
thought: 'First thought',
thoughtNumber: 1,
totalThoughts: 3,
nextThoughtNeeded: true
};
const input2 = {
thought: 'Second thought',
thoughtNumber: 2,
totalThoughts: 3,
nextThoughtNeeded: true
};
const input3 = {
thought: 'Final thought',
thoughtNumber: 3,
totalThoughts: 3,
nextThoughtNeeded: false
};
server.processThought(input1);
server.processThought(input2);
const result = server.processThought(input3);
const data = JSON.parse(result.content[0].text);
expect(data.thoughtHistoryLength).toBe(3);
expect(data.nextThoughtNeeded).toBe(false);
});
it('should auto-adjust totalThoughts if thoughtNumber exceeds it', () => {
const input = {
thought: 'Thought 5',
thoughtNumber: 5,
totalThoughts: 3,
nextThoughtNeeded: true
};
const result = server.processThought(input);
const data = JSON.parse(result.content[0].text);
expect(data.totalThoughts).toBe(5);
});
});
describe('processThought - branching', () => {
it('should track branches correctly', () => {
const input1 = {
thought: 'Main thought',
thoughtNumber: 1,
totalThoughts: 3,
nextThoughtNeeded: true
};
const input2 = {
thought: 'Branch A thought',
thoughtNumber: 2,
totalThoughts: 3,
nextThoughtNeeded: true,
branchFromThought: 1,
branchId: 'branch-a'
};
const input3 = {
thought: 'Branch B thought',
thoughtNumber: 2,
totalThoughts: 3,
nextThoughtNeeded: false,
branchFromThought: 1,
branchId: 'branch-b'
};
server.processThought(input1);
server.processThought(input2);
const result = server.processThought(input3);
const data = JSON.parse(result.content[0].text);
expect(data.branches).toContain('branch-a');
expect(data.branches).toContain('branch-b');
expect(data.branches.length).toBe(2);
expect(data.thoughtHistoryLength).toBe(3);
});
it('should allow multiple thoughts in same branch', () => {
const input1 = {
thought: 'Branch thought 1',
thoughtNumber: 1,
totalThoughts: 2,
nextThoughtNeeded: true,
branchFromThought: 1,
branchId: 'branch-a'
};
const input2 = {
thought: 'Branch thought 2',
thoughtNumber: 2,
totalThoughts: 2,
nextThoughtNeeded: false,
branchFromThought: 1,
branchId: 'branch-a'
};
server.processThought(input1);
const result = server.processThought(input2);
const data = JSON.parse(result.content[0].text);
expect(data.branches).toContain('branch-a');
expect(data.branches.length).toBe(1);
});
});
describe('processThought - edge cases', () => {
it('should handle very long thought strings', () => {
const input = {
thought: 'a'.repeat(10000),
thoughtNumber: 1,
totalThoughts: 1,
nextThoughtNeeded: false
};
const result = server.processThought(input);
expect(result.isError).toBeUndefined();
});
it('should handle thoughtNumber = 1, totalThoughts = 1', () => {
const input = {
thought: 'Only thought',
thoughtNumber: 1,
totalThoughts: 1,
nextThoughtNeeded: false
};
const result = server.processThought(input);
expect(result.isError).toBeUndefined();
const data = JSON.parse(result.content[0].text);
expect(data.thoughtNumber).toBe(1);
expect(data.totalThoughts).toBe(1);
});
it('should handle nextThoughtNeeded = false', () => {
const input = {
thought: 'Final thought',
thoughtNumber: 3,
totalThoughts: 3,
nextThoughtNeeded: false
};
const result = server.processThought(input);
const data = JSON.parse(result.content[0].text);
expect(data.nextThoughtNeeded).toBe(false);
});
});
describe('processThought - response format', () => {
it('should return correct response structure on success', () => {
const input = {
thought: 'Test thought',
thoughtNumber: 1,
totalThoughts: 1,
nextThoughtNeeded: false
};
const result = server.processThought(input);
expect(result).toHaveProperty('content');
expect(Array.isArray(result.content)).toBe(true);
expect(result.content.length).toBe(1);
expect(result.content[0]).toHaveProperty('type', 'text');
expect(result.content[0]).toHaveProperty('text');
});
it('should return valid JSON in response', () => {
const input = {
thought: 'Test thought',
thoughtNumber: 1,
totalThoughts: 1,
nextThoughtNeeded: false
};
const result = server.processThought(input);
expect(() => JSON.parse(result.content[0].text)).not.toThrow();
});
});
describe('processThought - with logging enabled', () => {
let serverWithLogging: SequentialThinkingServer;
beforeEach(() => {
// Enable thought logging for these tests
delete process.env.DISABLE_THOUGHT_LOGGING;
serverWithLogging = new SequentialThinkingServer();
});
afterEach(() => {
// Reset to disabled for other tests
process.env.DISABLE_THOUGHT_LOGGING = 'true';
});
it('should format and log regular thoughts', () => {
const input = {
thought: 'Test thought with logging',
thoughtNumber: 1,
totalThoughts: 3,
nextThoughtNeeded: true
};
const result = serverWithLogging.processThought(input);
expect(result.isError).toBeUndefined();
});
it('should format and log revision thoughts', () => {
const input = {
thought: 'Revised thought',
thoughtNumber: 2,
totalThoughts: 3,
nextThoughtNeeded: true,
isRevision: true,
revisesThought: 1
};
const result = serverWithLogging.processThought(input);
expect(result.isError).toBeUndefined();
});
it('should format and log branch thoughts', () => {
const input = {
thought: 'Branch thought',
thoughtNumber: 2,
totalThoughts: 3,
nextThoughtNeeded: false,
branchFromThought: 1,
branchId: 'branch-a'
};
const result = serverWithLogging.processThought(input);
expect(result.isError).toBeUndefined();
});
});
});

View File

@@ -1,145 +1,22 @@
#!/usr/bin/env node
import { Server } from "@modelcontextprotocol/sdk/server/index.js";
import { McpServer } from "@modelcontextprotocol/sdk/server/mcp.js";
import { StdioServerTransport } from "@modelcontextprotocol/sdk/server/stdio.js";
import {
CallToolRequestSchema,
ListToolsRequestSchema,
Tool,
} from "@modelcontextprotocol/sdk/types.js";
// Fixed chalk import for ESM
import chalk from 'chalk';
import { z } from "zod";
import { SequentialThinkingServer } from './lib.js';
interface ThoughtData {
thought: string;
thoughtNumber: number;
totalThoughts: number;
isRevision?: boolean;
revisesThought?: number;
branchFromThought?: number;
branchId?: string;
needsMoreThoughts?: boolean;
nextThoughtNeeded: boolean;
}
const server = new McpServer({
name: "sequential-thinking-server",
version: "0.2.0",
});
class SequentialThinkingServer {
private thoughtHistory: ThoughtData[] = [];
private branches: Record<string, ThoughtData[]> = {};
private disableThoughtLogging: boolean;
const thinkingServer = new SequentialThinkingServer();
constructor() {
this.disableThoughtLogging = (process.env.DISABLE_THOUGHT_LOGGING || "").toLowerCase() === "true";
}
private validateThoughtData(input: unknown): ThoughtData {
const data = input as Record<string, unknown>;
if (!data.thought || typeof data.thought !== 'string') {
throw new Error('Invalid thought: must be a string');
}
if (!data.thoughtNumber || typeof data.thoughtNumber !== 'number') {
throw new Error('Invalid thoughtNumber: must be a number');
}
if (!data.totalThoughts || typeof data.totalThoughts !== 'number') {
throw new Error('Invalid totalThoughts: must be a number');
}
if (typeof data.nextThoughtNeeded !== 'boolean') {
throw new Error('Invalid nextThoughtNeeded: must be a boolean');
}
return {
thought: data.thought,
thoughtNumber: data.thoughtNumber,
totalThoughts: data.totalThoughts,
nextThoughtNeeded: data.nextThoughtNeeded,
isRevision: data.isRevision as boolean | undefined,
revisesThought: data.revisesThought as number | undefined,
branchFromThought: data.branchFromThought as number | undefined,
branchId: data.branchId as string | undefined,
needsMoreThoughts: data.needsMoreThoughts as boolean | undefined,
};
}
private formatThought(thoughtData: ThoughtData): string {
const { thoughtNumber, totalThoughts, thought, isRevision, revisesThought, branchFromThought, branchId } = thoughtData;
let prefix = '';
let context = '';
if (isRevision) {
prefix = chalk.yellow('🔄 Revision');
context = ` (revising thought ${revisesThought})`;
} else if (branchFromThought) {
prefix = chalk.green('🌿 Branch');
context = ` (from thought ${branchFromThought}, ID: ${branchId})`;
} else {
prefix = chalk.blue('💭 Thought');
context = '';
}
const header = `${prefix} ${thoughtNumber}/${totalThoughts}${context}`;
const border = '─'.repeat(Math.max(header.length, thought.length) + 4);
return `
${border}
${header}
${border}
${thought.padEnd(border.length - 2)}
${border}`;
}
public processThought(input: unknown): { content: Array<{ type: string; text: string }>; isError?: boolean } {
try {
const validatedInput = this.validateThoughtData(input);
if (validatedInput.thoughtNumber > validatedInput.totalThoughts) {
validatedInput.totalThoughts = validatedInput.thoughtNumber;
}
this.thoughtHistory.push(validatedInput);
if (validatedInput.branchFromThought && validatedInput.branchId) {
if (!this.branches[validatedInput.branchId]) {
this.branches[validatedInput.branchId] = [];
}
this.branches[validatedInput.branchId].push(validatedInput);
}
if (!this.disableThoughtLogging) {
const formattedThought = this.formatThought(validatedInput);
console.error(formattedThought);
}
return {
content: [{
type: "text",
text: JSON.stringify({
thoughtNumber: validatedInput.thoughtNumber,
totalThoughts: validatedInput.totalThoughts,
nextThoughtNeeded: validatedInput.nextThoughtNeeded,
branches: Object.keys(this.branches),
thoughtHistoryLength: this.thoughtHistory.length
}, null, 2)
}]
};
} catch (error) {
return {
content: [{
type: "text",
text: JSON.stringify({
error: error instanceof Error ? error.message : String(error),
status: 'failed'
}, null, 2)
}],
isError: true
};
}
}
}
const SEQUENTIAL_THINKING_TOOL: Tool = {
name: "sequentialthinking",
description: `A detailed tool for dynamic and reflective problem-solving through thoughts.
server.registerTool(
"sequentialthinking",
{
title: "Sequential Thinking",
description: `A detailed tool for dynamic and reflective problem-solving through thoughts.
This tool helps analyze problems through a flexible thinking process that can adapt and evolve.
Each thought can build on, question, or revise previous insights as understanding deepens.
@@ -165,21 +42,21 @@ Key features:
Parameters explained:
- thought: Your current thinking step, which can include:
* Regular analytical steps
* Revisions of previous thoughts
* Questions about previous decisions
* Realizations about needing more analysis
* Changes in approach
* Hypothesis generation
* Hypothesis verification
- next_thought_needed: True if you need more thinking, even if at what seemed like the end
- thought_number: Current number in sequence (can go beyond initial total if needed)
- total_thoughts: Current estimate of thoughts needed (can be adjusted up/down)
- is_revision: A boolean indicating if this thought revises previous thinking
- revises_thought: If is_revision is true, which thought number is being reconsidered
- branch_from_thought: If branching, which thought number is the branching point
- branch_id: Identifier for the current branch (if any)
- needs_more_thoughts: If reaching end but realizing more thoughts needed
* Regular analytical steps
* Revisions of previous thoughts
* Questions about previous decisions
* Realizations about needing more analysis
* Changes in approach
* Hypothesis generation
* Hypothesis verification
- nextThoughtNeeded: True if you need more thinking, even if at what seemed like the end
- thoughtNumber: Current number in sequence (can go beyond initial total if needed)
- totalThoughts: Current estimate of thoughts needed (can be adjusted up/down)
- isRevision: A boolean indicating if this thought revises previous thinking
- revisesThought: If is_revision is true, which thought number is being reconsidered
- branchFromThought: If branching, which thought number is the branching point
- branchId: Identifier for the current branch (if any)
- needsMoreThoughts: If reaching end but realizing more thoughts needed
You should:
1. Start with an initial estimate of needed thoughts, but be ready to adjust
@@ -192,87 +69,43 @@ You should:
8. Verify the hypothesis based on the Chain of Thought steps
9. Repeat the process until satisfied with the solution
10. Provide a single, ideally correct answer as the final output
11. Only set next_thought_needed to false when truly done and a satisfactory answer is reached`,
inputSchema: {
type: "object",
properties: {
thought: {
type: "string",
description: "Your current thinking step"
},
nextThoughtNeeded: {
type: "boolean",
description: "Whether another thought step is needed"
},
thoughtNumber: {
type: "integer",
description: "Current thought number",
minimum: 1
},
totalThoughts: {
type: "integer",
description: "Estimated total thoughts needed",
minimum: 1
},
isRevision: {
type: "boolean",
description: "Whether this revises previous thinking"
},
revisesThought: {
type: "integer",
description: "Which thought is being reconsidered",
minimum: 1
},
branchFromThought: {
type: "integer",
description: "Branching point thought number",
minimum: 1
},
branchId: {
type: "string",
description: "Branch identifier"
},
needsMoreThoughts: {
type: "boolean",
description: "If more thoughts are needed"
}
11. Only set nextThoughtNeeded to false when truly done and a satisfactory answer is reached`,
inputSchema: {
thought: z.string().describe("Your current thinking step"),
nextThoughtNeeded: z.boolean().describe("Whether another thought step is needed"),
thoughtNumber: z.number().int().min(1).describe("Current thought number (numeric value, e.g., 1, 2, 3)"),
totalThoughts: z.number().int().min(1).describe("Estimated total thoughts needed (numeric value, e.g., 5, 10)"),
isRevision: z.boolean().optional().describe("Whether this revises previous thinking"),
revisesThought: z.number().int().min(1).optional().describe("Which thought is being reconsidered"),
branchFromThought: z.number().int().min(1).optional().describe("Branching point thought number"),
branchId: z.string().optional().describe("Branch identifier"),
needsMoreThoughts: z.boolean().optional().describe("If more thoughts are needed")
},
outputSchema: {
thoughtNumber: z.number(),
totalThoughts: z.number(),
nextThoughtNeeded: z.boolean(),
branches: z.array(z.string()),
thoughtHistoryLength: z.number()
},
required: ["thought", "nextThoughtNeeded", "thoughtNumber", "totalThoughts"]
}
};
const server = new Server(
{
name: "sequential-thinking-server",
version: "0.2.0",
},
{
capabilities: {
tools: {},
},
async (args) => {
const result = thinkingServer.processThought(args);
if (result.isError) {
return result;
}
// Parse the JSON response to get structured content
const parsedContent = JSON.parse(result.content[0].text);
return {
content: result.content,
structuredContent: parsedContent
};
}
);
const thinkingServer = new SequentialThinkingServer();
server.setRequestHandler(ListToolsRequestSchema, async () => ({
tools: [SEQUENTIAL_THINKING_TOOL],
}));
server.setRequestHandler(CallToolRequestSchema, async (request) => {
if (request.params.name === "sequentialthinking") {
return thinkingServer.processThought(request.params.arguments);
}
return {
content: [{
type: "text",
text: `Unknown tool: ${request.params.name}`
}],
isError: true
};
});
async function runServer() {
const transport = new StdioServerTransport();
await server.connect(transport);

Some files were not shown because too many files have changed in this diff Show More