diff --git a/README.md b/README.md
index 7c52b4ea..57ac8cc7 100644
--- a/README.md
+++ b/README.md
@@ -5,12 +5,16 @@ to community built servers and additional resources.
The servers in this repository showcase the versatility and extensibility of MCP, demonstrating how it can be used to give Large Language Models (LLMs) secure, controlled access to tools and data sources.
Typically, each MCP server is implemented with an MCP SDK:
+
- [C# MCP SDK](https://github.com/modelcontextprotocol/csharp-sdk)
- [Go MCP SDK](https://github.com/modelcontextprotocol/go-sdk)
- [Java MCP SDK](https://github.com/modelcontextprotocol/java-sdk)
- [Kotlin MCP SDK](https://github.com/modelcontextprotocol/kotlin-sdk)
- [Python MCP SDK](https://github.com/modelcontextprotocol/python-sdk)
-- [Typescript MCP SDK](https://github.com/modelcontextprotocol/typescript-sdk)
+- [Ruby MCP SDK](https://github.com/modelcontextprotocol/ruby-sdk)
+- [Rust MCP SDK](https://github.com/modelcontextprotocol/rust-sdk)
+- [Swift MCP SDK](https://github.com/modelcontextprotocol/swift-sdk)
+- [TypeScript MCP SDK](https://github.com/modelcontextprotocol/typescript-sdk)
> Note: Lists in this README are maintained in alphabetical order to minimize merge conflicts when adding new items.
@@ -228,7 +232,7 @@ Official integrations are maintained by companies building production ready MCP
-
**[Linear](https://linear.app/docs/mcp)** - Search, create, and update Linear issues, projects, and comments.
-
**[Lingo.dev](https://github.com/lingodotdev/lingo.dev/blob/main/mcp.md)** - Make your AI agent speak every language on the planet, using [Lingo.dev](https://lingo.dev) Localization Engine.
-
**[LinkedIn MCP Runner](https://github.com/ertiqah/linkedin-mcp-runner)** - Write, edit, and schedule LinkedIn posts right from ChatGPT and Claude with [LiGo](https://ligo.ertiqah.com/).
--
**[Lisply](https://github.com/gornskew/lisply-mcp)** - Flexible frontend for compliant Lisp-speaking backends.
+-
**[Lisply](https://github.com/gornskew/lisply-mcp)** - Flexible frontend for compliant Lisp-speaking backends.
-
**[Litmus.io](https://github.com/litmusautomation/litmus-mcp-server)** - Official MCP server for configuring [Litmus](https://litmus.io) Edge for Industrial Data Collection, Edge Analytics & Industrial AI.
-
**[Liveblocks](https://github.com/liveblocks/liveblocks-mcp-server)** - Ready‑made features for AI & human collaboration—use this to develop your [Liveblocks](https://liveblocks.io) app quicker.
-
**[Logfire](https://github.com/pydantic/logfire-mcp)** - Provides access to OpenTelemetry traces and metrics through Logfire.
@@ -241,7 +245,6 @@ Official integrations are maintained by companies building production ready MCP
-
**[MCP Discovery](https://github.com/rust-mcp-stack/mcp-discovery)** - A lightweight CLI tool built in Rust for discovering MCP server capabilities.
-
**[MCP Toolbox for Databases](https://github.com/googleapis/genai-toolbox)** - Open source MCP server specializing in easy, fast, and secure tools for Databases. Supports AlloyDB, BigQuery, Bigtable, Cloud SQL, Dgraph, MySQL, Neo4j, Postgres, Spanner, and more.
-
**[Meilisearch](https://github.com/meilisearch/meilisearch-mcp)** - Interact & query with Meilisearch (Full-text & semantic search API)
--
**[Memgraph](https://github.com/memgraph/mcp-memgraph)** - Query your data in [Memgraph](https://memgraph.com/) graph database.
-
**[Memgraph](https://github.com/memgraph/ai-toolkit/tree/main/integrations/mcp-memgraph)** - Query your data in [Memgraph](https://memgraph.com/) graph database.
-
**[Mercado Libre](https://mcp.mercadolibre.com/)** - Mercado Libre's official MCP server.
-
**[Mercado Pago](https://mcp.mercadopago.com/)** - Mercado Pago's official MCP server.
@@ -408,7 +411,7 @@ A growing set of community-developed and maintained servers demonstrates various
- **[Ableton Live](https://github.com/ahujasid/ableton-mcp)** (by ahujasid) - Ableton integration allowing prompt enabled music creation.
- **[Actor Critic Thinking](https://github.com/aquarius-wing/actor-critic-thinking-mcp)** - Actor-critic thinking for performance evaluation
- **[AgentBay](https://github.com/Michael98671/agentbay)** - An MCP server for providing serverless cloud infrastructure for AI agents.
-- **[AgentMode](https://www.agentmode.app) - Connect to dozens of databases, data warehouses, Github & more, from a single MCP server. Run the Docker image locally, in the cloud, or on-premise.
+- **[AgentMode](https://www.agentmode.app)** - Connect to dozens of databases, data warehouses, Github & more, from a single MCP server. Run the Docker image locally, in the cloud, or on-premise.
- **[AI Agent Marketplace Index](https://github.com/AI-Agent-Hub/ai-agent-marketplace-index-mcp)** - MCP server to search more than 5000+ AI agents and tools of various categories from [AI Agent Marketplace Index](http://www.deepnlp.org/store/ai-agent) and monitor traffic of AI Agents.
- **[AI Tasks](https://github.com/jbrinkman/valkey-ai-tasks)** - Let the AI manage complex plans with integrated task management and tracking tools. Supports STDIO, SSE and Streamable HTTP transports.
- **[ai-Bible](https://github.com/AdbC99/ai-bible)** - Search the bible reliably and repeatably [ai-Bible Labs](https://ai-bible.com)
@@ -1069,6 +1072,7 @@ These are high-level frameworks that make it easier to build MCP servers or clie
* **[mcp_sse (Elixir)](https://github.com/kEND/mcp_sse)** An SSE implementation in Elixir for rapidly creating MCP servers.
* **[Next.js MCP Server Template](https://github.com/vercel-labs/mcp-for-next.js)** (Typescript) - A starter Next.js project that uses the MCP Adapter to allow MCP clients to connect and access resources.
* **[Quarkus MCP Server SDK](https://github.com/quarkiverse/quarkus-mcp-server)** (Java)
+- **[R mcptools](https://github.com/posit-dev/mcptools)** - An R SDK for creating R-based MCP servers and retrieving functionality from third-party MCP servers as R functions.
* **[SAP ABAP MCP Server SDK](https://github.com/abap-ai/mcp)** - Build SAP ABAP based MCP servers. ABAP 7.52 based with 7.02 downport; runs on R/3 & S/4HANA on-premises, currently not cloud-ready.
* **[Spring AI MCP Server](https://docs.spring.io/spring-ai/reference/api/mcp/mcp-server-boot-starter-docs.html)** - Provides auto-configuration for setting up an MCP server in Spring Boot applications.
* **[Template MCP Server](https://github.com/mcpdotdirect/template-mcp-server)** - A CLI tool to create a new Model Context Protocol server project with TypeScript support, dual transport options, and an extensible structure
@@ -1091,6 +1095,7 @@ These are high-level frameworks that make it easier to build MCP servers or clie
Additional resources on MCP.
+- **[A2A-MCP Java Bridge](https://github.com/vishalmysore/a2ajava)** - A2AJava brings powerful A2A-MCP integration directly into your Java applications. It enables developers to annotate standard Java methods and instantly expose them as MCP Server, A2A-discoverable actions — with no boilerplate or service registration overhead.
- **[AiMCP](https://www.aimcp.info)** - A collection of MCP clients&servers to find the right mcp tools by **[Hekmon](https://github.com/hekmon8)**
- **[Awesome Crypto MCP Servers by badkk](https://github.com/badkk/awesome-crypto-mcp-servers)** - A curated list of MCP servers by **[Luke Fan](https://github.com/badkk)**
- **[Awesome MCP Servers by appcypher](https://github.com/appcypher/awesome-mcp-servers)** - A curated list of MCP servers by **[Stephen Akinyemi](https://github.com/appcypher)**
diff --git a/package-lock.json b/package-lock.json
index c785a237..6a9bac93 100644
--- a/package-lock.json
+++ b/package-lock.json
@@ -6159,7 +6159,7 @@
"version": "0.6.2",
"license": "MIT",
"dependencies": {
- "@modelcontextprotocol/sdk": "^1.12.3",
+ "@modelcontextprotocol/sdk": "^1.17.0",
"diff": "^5.1.0",
"glob": "^10.3.10",
"minimatch": "^10.0.1",
@@ -6182,9 +6182,9 @@
}
},
"src/filesystem/node_modules/@modelcontextprotocol/sdk": {
- "version": "1.12.3",
- "resolved": "https://registry.npmjs.org/@modelcontextprotocol/sdk/-/sdk-1.12.3.tgz",
- "integrity": "sha512-DyVYSOafBvk3/j1Oka4z5BWT8o4AFmoNyZY9pALOm7Lh3GZglR71Co4r4dEUoqDWdDazIZQHBe7J2Nwkg6gHgQ==",
+ "version": "1.17.0",
+ "resolved": "https://registry.npmjs.org/@modelcontextprotocol/sdk/-/sdk-1.17.0.tgz",
+ "integrity": "sha512-qFfbWFA7r1Sd8D697L7GkTd36yqDuTkvz0KfOGkgXR8EUhQn3/EDNIR/qUdQNMT8IjmasBvHWuXeisxtXTQT2g==",
"license": "MIT",
"dependencies": {
"ajv": "^6.12.6",
@@ -6192,6 +6192,7 @@
"cors": "^2.8.5",
"cross-spawn": "^7.0.5",
"eventsource": "^3.0.2",
+ "eventsource-parser": "^3.0.0",
"express": "^5.0.1",
"express-rate-limit": "^7.5.0",
"pkce-challenge": "^5.0.0",
diff --git a/src/filesystem/README.md b/src/filesystem/README.md
index cd6d0a9f..ac63f39a 100644
--- a/src/filesystem/README.md
+++ b/src/filesystem/README.md
@@ -9,11 +9,11 @@ Node.js server implementing Model Context Protocol (MCP) for filesystem operatio
- Move files/directories
- Search files
- Get file metadata
-- Dynamic directory access control via [Roots](https://modelcontextprotocol.io/docs/concepts/roots)
+- Dynamic directory access control via [Roots](https://modelcontextprotocol.io/docs/learn/client-concepts#roots)
## Directory Access Control
-The server uses a flexible directory access control system. Directories can be specified via command-line arguments or dynamically via [Roots](https://modelcontextprotocol.io/docs/concepts/roots).
+The server uses a flexible directory access control system. Directories can be specified via command-line arguments or dynamically via [Roots](https://modelcontextprotocol.io/docs/learn/client-concepts#roots).
### Method 1: Command-line Arguments
Specify Allowed directories when starting the server:
@@ -22,7 +22,7 @@ mcp-server-filesystem /path/to/dir1 /path/to/dir2
```
### Method 2: MCP Roots (Recommended)
-MCP clients that support [Roots](https://modelcontextprotocol.io/docs/concepts/roots) can dynamically update the Allowed directories.
+MCP clients that support [Roots](https://modelcontextprotocol.io/docs/learn/client-concepts#roots) can dynamically update the Allowed directories.
Roots notified by Client to Server, completely replace any server-side Allowed directories when provided.
@@ -70,10 +70,19 @@ The server's directory access control follows this flow:
### Tools
-- **read_file**
- - Read complete contents of a file
- - Input: `path` (string)
- - Reads complete file contents with UTF-8 encoding
+- **read_text_file**
+ - Read complete contents of a file as text
+ - Inputs:
+ - `path` (string)
+ - `head` (number, optional): First N lines
+ - `tail` (number, optional): Last N lines
+ - Always treats the file as UTF-8 text regardless of extension
+
+- **read_media_file**
+ - Read an image or audio file
+ - Inputs:
+ - `path` (string)
+ - Streams the file and returns base64 data with the corresponding MIME type
- **read_multiple_files**
- Read multiple files simultaneously
diff --git a/src/filesystem/index.ts b/src/filesystem/index.ts
index 524c9c26..6723f436 100644
--- a/src/filesystem/index.ts
+++ b/src/filesystem/index.ts
@@ -10,6 +10,7 @@ import {
type Root,
} from "@modelcontextprotocol/sdk/types.js";
import fs from "fs/promises";
+import { createReadStream } from "fs";
import path from "path";
import os from 'os';
import { randomBytes } from 'crypto';
@@ -116,12 +117,16 @@ async function validatePath(requestedPath: string): Promise {
}
// Schema definitions
-const ReadFileArgsSchema = z.object({
+const ReadTextFileArgsSchema = z.object({
path: z.string(),
tail: z.number().optional().describe('If provided, returns only the last N lines of the file'),
head: z.number().optional().describe('If provided, returns only the first N lines of the file')
});
+const ReadMediaFileArgsSchema = z.object({
+ path: z.string()
+});
+
const ReadMultipleFilesArgsSchema = z.object({
paths: z.array(z.string()),
});
@@ -374,10 +379,10 @@ async function applyFileEdits(
function formatSize(bytes: number): string {
const units = ['B', 'KB', 'MB', 'GB', 'TB'];
if (bytes === 0) return '0 B';
-
+
const i = Math.floor(Math.log(bytes) / Math.log(1024));
if (i === 0) return `${bytes} ${units[i]}`;
-
+
return `${(bytes / Math.pow(1024, i)).toFixed(2)} ${units[i]}`;
}
@@ -386,9 +391,9 @@ async function tailFile(filePath: string, numLines: number): Promise {
const CHUNK_SIZE = 1024; // Read 1KB at a time
const stats = await fs.stat(filePath);
const fileSize = stats.size;
-
+
if (fileSize === 0) return '';
-
+
// Open file for reading
const fileHandle = await fs.open(filePath, 'r');
try {
@@ -397,36 +402,36 @@ async function tailFile(filePath: string, numLines: number): Promise {
let chunk = Buffer.alloc(CHUNK_SIZE);
let linesFound = 0;
let remainingText = '';
-
+
// Read chunks from the end of the file until we have enough lines
while (position > 0 && linesFound < numLines) {
const size = Math.min(CHUNK_SIZE, position);
position -= size;
-
+
const { bytesRead } = await fileHandle.read(chunk, 0, size, position);
if (!bytesRead) break;
-
+
// Get the chunk as a string and prepend any remaining text from previous iteration
const readData = chunk.slice(0, bytesRead).toString('utf-8');
const chunkText = readData + remainingText;
-
+
// Split by newlines and count
const chunkLines = normalizeLineEndings(chunkText).split('\n');
-
+
// If this isn't the end of the file, the first line is likely incomplete
// Save it to prepend to the next chunk
if (position > 0) {
remainingText = chunkLines[0];
chunkLines.shift(); // Remove the first (incomplete) line
}
-
+
// Add lines to our result (up to the number we need)
for (let i = chunkLines.length - 1; i >= 0 && linesFound < numLines; i--) {
lines.unshift(chunkLines[i]);
linesFound++;
}
}
-
+
return lines.join('\n');
} finally {
await fileHandle.close();
@@ -441,14 +446,14 @@ async function headFile(filePath: string, numLines: number): Promise {
let buffer = '';
let bytesRead = 0;
const chunk = Buffer.alloc(1024); // 1KB buffer
-
+
// Read chunks and count lines until we have enough or reach EOF
while (lines.length < numLines) {
const result = await fileHandle.read(chunk, 0, chunk.length, bytesRead);
if (result.bytesRead === 0) break; // End of file
bytesRead += result.bytesRead;
buffer += chunk.slice(0, result.bytesRead).toString('utf-8');
-
+
const newLineIndex = buffer.lastIndexOf('\n');
if (newLineIndex !== -1) {
const completeLines = buffer.slice(0, newLineIndex).split('\n');
@@ -459,32 +464,63 @@ async function headFile(filePath: string, numLines: number): Promise {
}
}
}
-
+
// If there is leftover content and we still need lines, add it
if (buffer.length > 0 && lines.length < numLines) {
lines.push(buffer);
}
-
+
return lines.join('\n');
} finally {
await fileHandle.close();
}
}
+// Reads a file as a stream of buffers, concatenates them, and then encodes
+// the result to a Base64 string. This is a memory-efficient way to handle
+// binary data from a stream before the final encoding.
+async function readFileAsBase64Stream(filePath: string): Promise {
+ return new Promise((resolve, reject) => {
+ const stream = createReadStream(filePath);
+ const chunks: Buffer[] = [];
+ stream.on('data', (chunk) => {
+ chunks.push(chunk as Buffer);
+ });
+ stream.on('end', () => {
+ const finalBuffer = Buffer.concat(chunks);
+ resolve(finalBuffer.toString('base64'));
+ });
+ stream.on('error', (err) => reject(err));
+ });
+}
+
// Tool handlers
server.setRequestHandler(ListToolsRequestSchema, async () => {
return {
tools: [
{
name: "read_file",
+ description: "Read the complete contents of a file as text. DEPRECATED: Use read_text_file instead.",
+ inputSchema: zodToJsonSchema(ReadTextFileArgsSchema) as ToolInput,
+ },
+ {
+ name: "read_text_file",
description:
- "Read the complete contents of a file from the file system. " +
+ "Read the complete contents of a file from the file system as text. " +
"Handles various text encodings and provides detailed error messages " +
"if the file cannot be read. Use this tool when you need to examine " +
"the contents of a single file. Use the 'head' parameter to read only " +
"the first N lines of a file, or the 'tail' parameter to read only " +
- "the last N lines of a file. Only works within allowed directories.",
- inputSchema: zodToJsonSchema(ReadFileArgsSchema) as ToolInput,
+ "the last N lines of a file. Operates on the file as text regardless of extension. " +
+ "Only works within allowed directories.",
+ inputSchema: zodToJsonSchema(ReadTextFileArgsSchema) as ToolInput,
+ },
+ {
+ name: "read_media_file",
+ description:
+ "Read an image or audio file. Returns the base64 encoded data and MIME type. " +
+ "Only works within allowed directories.",
+ inputSchema: zodToJsonSchema(ReadMediaFileArgsSchema) as ToolInput,
},
{
name: "read_multiple_files",
@@ -597,17 +633,18 @@ server.setRequestHandler(CallToolRequestSchema, async (request) => {
const { name, arguments: args } = request.params;
switch (name) {
- case "read_file": {
- const parsed = ReadFileArgsSchema.safeParse(args);
+ case "read_file":
+ case "read_text_file": {
+ const parsed = ReadTextFileArgsSchema.safeParse(args);
if (!parsed.success) {
- throw new Error(`Invalid arguments for read_file: ${parsed.error}`);
+ throw new Error(`Invalid arguments for read_text_file: ${parsed.error}`);
}
const validPath = await validatePath(parsed.data.path);
-
+
if (parsed.data.head && parsed.data.tail) {
throw new Error("Cannot specify both head and tail parameters simultaneously");
}
-
+
if (parsed.data.tail) {
// Use memory-efficient tail implementation for large files
const tailContent = await tailFile(validPath, parsed.data.tail);
@@ -615,7 +652,7 @@ server.setRequestHandler(CallToolRequestSchema, async (request) => {
content: [{ type: "text", text: tailContent }],
};
}
-
+
if (parsed.data.head) {
// Use memory-efficient head implementation for large files
const headContent = await headFile(validPath, parsed.data.head);
@@ -623,13 +660,45 @@ server.setRequestHandler(CallToolRequestSchema, async (request) => {
content: [{ type: "text", text: headContent }],
};
}
-
+
const content = await fs.readFile(validPath, "utf-8");
return {
content: [{ type: "text", text: content }],
};
}
+ case "read_media_file": {
+ const parsed = ReadMediaFileArgsSchema.safeParse(args);
+ if (!parsed.success) {
+ throw new Error(`Invalid arguments for read_media_file: ${parsed.error}`);
+ }
+ const validPath = await validatePath(parsed.data.path);
+ const extension = path.extname(validPath).toLowerCase();
+ const mimeTypes: Record = {
+ ".png": "image/png",
+ ".jpg": "image/jpeg",
+ ".jpeg": "image/jpeg",
+ ".gif": "image/gif",
+ ".webp": "image/webp",
+ ".bmp": "image/bmp",
+ ".svg": "image/svg+xml",
+ ".mp3": "audio/mpeg",
+ ".wav": "audio/wav",
+ ".ogg": "audio/ogg",
+ ".flac": "audio/flac",
+ };
+ const mimeType = mimeTypes[extension] || "application/octet-stream";
+ const data = await readFileAsBase64Stream(validPath);
+ const type = mimeType.startsWith("image/")
+ ? "image"
+ : mimeType.startsWith("audio/")
+ ? "audio"
+ : "blob";
+ return {
+ content: [{ type, data, mimeType }],
+ };
+ }
+
case "read_multiple_files": {
const parsed = ReadMultipleFilesArgsSchema.safeParse(args);
if (!parsed.success) {
@@ -734,7 +803,7 @@ server.setRequestHandler(CallToolRequestSchema, async (request) => {
}
const validPath = await validatePath(parsed.data.path);
const entries = await fs.readdir(validPath, { withFileTypes: true });
-
+
// Get detailed information for each entry
const detailedEntries = await Promise.all(
entries.map(async (entry) => {
@@ -757,7 +826,7 @@ server.setRequestHandler(CallToolRequestSchema, async (request) => {
}
})
);
-
+
// Sort entries based on sortBy parameter
const sortedEntries = [...detailedEntries].sort((a, b) => {
if (parsed.data.sortBy === 'size') {
@@ -766,29 +835,29 @@ server.setRequestHandler(CallToolRequestSchema, async (request) => {
// Default sort by name
return a.name.localeCompare(b.name);
});
-
+
// Format the output
- const formattedEntries = sortedEntries.map(entry =>
+ const formattedEntries = sortedEntries.map(entry =>
`${entry.isDirectory ? "[DIR]" : "[FILE]"} ${entry.name.padEnd(30)} ${
entry.isDirectory ? "" : formatSize(entry.size).padStart(10)
}`
);
-
+
// Add summary
const totalFiles = detailedEntries.filter(e => !e.isDirectory).length;
const totalDirs = detailedEntries.filter(e => e.isDirectory).length;
const totalSize = detailedEntries.reduce((sum, entry) => sum + (entry.isDirectory ? 0 : entry.size), 0);
-
+
const summary = [
"",
`Total: ${totalFiles} files, ${totalDirs} directories`,
`Combined size: ${formatSize(totalSize)}`
];
-
+
return {
- content: [{
- type: "text",
- text: [...formattedEntries, ...summary].join("\n")
+ content: [{
+ type: "text",
+ text: [...formattedEntries, ...summary].join("\n")
}],
};
}
diff --git a/src/filesystem/package.json b/src/filesystem/package.json
index 482f0cce..4d3ac320 100644
--- a/src/filesystem/package.json
+++ b/src/filesystem/package.json
@@ -20,7 +20,7 @@
"test": "jest --config=jest.config.cjs --coverage"
},
"dependencies": {
- "@modelcontextprotocol/sdk": "^1.12.3",
+ "@modelcontextprotocol/sdk": "^1.17.0",
"diff": "^5.1.0",
"glob": "^10.3.10",
"minimatch": "^10.0.1",
@@ -38,4 +38,4 @@
"ts-node": "^10.9.2",
"typescript": "^5.8.2"
}
-}
\ No newline at end of file
+}