diff --git a/src/fetch/pyproject.toml b/src/fetch/pyproject.toml
index 1d43cae8..ed76fdcd 100644
--- a/src/fetch/pyproject.toml
+++ b/src/fetch/pyproject.toml
@@ -17,7 +17,7 @@ classifiers = [
]
dependencies = [
"markdownify>=0.13.1",
- "mcp>=1.0.0",
+ "mcp>=1.1.3",
"protego>=0.3.1",
"pydantic>=2.0.0",
"readabilipy>=0.2.0",
diff --git a/src/fetch/src/mcp_server_fetch/server.py b/src/fetch/src/mcp_server_fetch/server.py
index ef029a49..b3f670ce 100644
--- a/src/fetch/src/mcp_server_fetch/server.py
+++ b/src/fetch/src/mcp_server_fetch/server.py
@@ -7,6 +7,7 @@ from mcp.shared.exceptions import McpError
from mcp.server import Server
from mcp.server.stdio import stdio_server
from mcp.types import (
+ ErrorData,
GetPromptResult,
Prompt,
PromptArgument,
@@ -79,15 +80,15 @@ async def check_may_autonomously_fetch_url(url: str, user_agent: str) -> None:
headers={"User-Agent": user_agent},
)
except HTTPError:
- raise McpError(
+ raise McpError(ErrorData(
INTERNAL_ERROR,
f"Failed to fetch robots.txt {robot_txt_url} due to a connection issue",
- )
+ ))
if response.status_code in (401, 403):
- raise McpError(
+ raise McpError(ErrorData(
INTERNAL_ERROR,
f"When fetching robots.txt ({robot_txt_url}), received status {response.status_code} so assuming that autonomous fetching is not allowed, the user can try manually fetching by using the fetch prompt",
- )
+ ))
elif 400 <= response.status_code < 500:
return
robot_txt = response.text
@@ -96,7 +97,7 @@ async def check_may_autonomously_fetch_url(url: str, user_agent: str) -> None:
)
robot_parser = Protego.parse(processed_robot_txt)
if not robot_parser.can_fetch(str(url), user_agent):
- raise McpError(
+ raise McpError(ErrorData(
INTERNAL_ERROR,
f"The sites robots.txt ({robot_txt_url}), specifies that autonomous fetching of this page is not allowed, "
f"{user_agent}\n"
@@ -104,7 +105,7 @@ async def check_may_autonomously_fetch_url(url: str, user_agent: str) -> None:
f"\n{robot_txt}\n\n"
f"The assistant must let the user know that it failed to view the page. The assistant may provide further guidance based on the above information.\n"
f"The assistant can tell the user that they can try manually fetching the page by using the fetch prompt within their UI.",
- )
+ ))
async def fetch_url(
@@ -124,12 +125,12 @@ async def fetch_url(
timeout=30,
)
except HTTPError as e:
- raise McpError(INTERNAL_ERROR, f"Failed to fetch {url}: {e!r}")
+ raise McpError(ErrorData(INTERNAL_ERROR, f"Failed to fetch {url}: {e!r}"))
if response.status_code >= 400:
- raise McpError(
+ raise McpError(ErrorData(
INTERNAL_ERROR,
f"Failed to fetch {url} - status code {response.status_code}",
- )
+ ))
page_raw = response.text
@@ -221,11 +222,11 @@ Although originally you did not have internet access, and were advised to refuse
try:
args = Fetch(**arguments)
except ValueError as e:
- raise McpError(INVALID_PARAMS, str(e))
+ raise McpError(ErrorData(INVALID_PARAMS, str(e)))
url = str(args.url)
if not url:
- raise McpError(INVALID_PARAMS, "URL is required")
+ raise McpError(ErrorData(INVALID_PARAMS, "URL is required"))
if not ignore_robots_txt:
await check_may_autonomously_fetch_url(url, user_agent_autonomous)
@@ -253,7 +254,7 @@ Although originally you did not have internet access, and were advised to refuse
@server.get_prompt()
async def get_prompt(name: str, arguments: dict | None) -> GetPromptResult:
if not arguments or "url" not in arguments:
- raise McpError(INVALID_PARAMS, "URL is required")
+ raise McpError(ErrorData(INVALID_PARAMS, "URL is required"))
url = arguments["url"]