Skip to content

Commit 52aa4e8

Browse files
author
codecommit-at-091424816836
committed
first push
0 parents  commit 52aa4e8

File tree

6 files changed

+325
-0
lines changed

6 files changed

+325
-0
lines changed

README.md

+49
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,49 @@
1+
# MCP on AWS Bedrock
2+
A simple and clear example for implementation and understanding Anthropic MCP (on AWS Bedrock, with Nova).
3+
4+
## Overview
5+
This project demonstrates how to implement and use Anthropic's Model Context Protocol (MCP) with AWS Bedrock. It provides a client implementation that can interact with MCP-enabled tools through AWS Bedrock's runtime service.
6+
7+
## Prerequisites
8+
- Python 3.10 or higher
9+
- AWS account with Bedrock access
10+
- Configured AWS credentials
11+
- UV package manager
12+
13+
## Project Structure
14+
- `client.py`: Main client implementation for interacting with Bedrock and MCP tools
15+
- `mcp_simple_tool/`: Directory containing the MCP tool implementation
16+
- `server.py`: MCP tool server implementation
17+
- `__main__.py`: Entry point for the tool
18+
- `pyproject.toml`: Project dependencies and configuration
19+
20+
## Usage
21+
Run the client with:
22+
```bash
23+
uv venv --python 3.13.1
24+
uv pip install boto3
25+
uv run client.py
26+
```
27+
28+
The client will:
29+
1. Initialize a connection to AWS Bedrock
30+
2. Start the MCP tool server
31+
3. List available tools and convert them to the format required by Bedrock
32+
4. Handle communication between Bedrock and the MCP tools
33+
34+
## Features
35+
- Seamless integration with AWS Bedrock runtime
36+
- Tool format conversion for Bedrock compatibility
37+
- Asynchronous communication handling
38+
- Structured logging for debugging
39+
40+
## Contributing
41+
Feel free to submit issues and pull requests to improve the implementation.
42+
43+
## License
44+
MIT License
45+
46+
## References
47+
- [Anthropic MCP](https://modelcontextprotocol.io/)
48+
- [MCP Python SDK](https://github.com/modelcontextprotocol/python-sdk)
49+
- [AWS Bedrock](https://aws.amazon.com/bedrock/)

client.py

+129
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,129 @@
1+
import asyncio
2+
import json
3+
import boto3
4+
import logging
5+
from mcp.client.session import ClientSession
6+
from mcp.client.stdio import StdioServerParameters, stdio_client
7+
8+
logging.basicConfig(level=logging.INFO)
9+
logger = logging.getLogger(__name__)
10+
11+
12+
def convert_tool_format(tools):
13+
"""
14+
Converts tools into the format required for the Bedrock API.
15+
16+
Args:
17+
tools (list): List of tool objects
18+
19+
Returns:
20+
dict: Tools in the format required by Bedrock
21+
"""
22+
converted_tools = []
23+
24+
for tool in tools:
25+
converted_tool = {
26+
"toolSpec": {
27+
"name": tool.name,
28+
"description": tool.description,
29+
"inputSchema": {
30+
"json": tool.inputSchema
31+
}
32+
}
33+
}
34+
converted_tools.append(converted_tool)
35+
36+
return {"tools": converted_tools}
37+
38+
39+
async def main():
40+
# Initialize Bedrock client
41+
bedrock = boto3.client('bedrock-runtime')
42+
43+
async with stdio_client(
44+
StdioServerParameters(command="uv", args=["run", "mcp-simple-tool"])
45+
) as (read, write):
46+
async with ClientSession(read, write) as session:
47+
await session.initialize()
48+
49+
# List available tools and convert to serializable format
50+
tools_result = await session.list_tools()
51+
tools_list = [{"name": tool.name, "description": tool.description,
52+
"inputSchema": tool.inputSchema} for tool in tools_result.tools]
53+
logger.info("Available tools: %s", tools_list)
54+
55+
# Prepare the request for Nova Pro model
56+
system = [
57+
{
58+
"text": "You are a helpful AI assistant. You have access to the following tools: " +
59+
json.dumps(tools_list)
60+
}
61+
]
62+
63+
messages = [
64+
{
65+
"role": "user",
66+
"content": [{"text": "Hello, can you help me fetch the website 'https://www.example.com?'"}]
67+
}
68+
]
69+
70+
while True:
71+
# Call Bedrock with Nova Pro model
72+
response = bedrock.converse(
73+
modelId='us.amazon.nova-pro-v1:0',
74+
messages=messages,
75+
system=system,
76+
inferenceConfig={
77+
"maxTokens": 300,
78+
"topP": 0.1,
79+
"temperature": 0.3
80+
},
81+
toolConfig=convert_tool_format(tools_result.tools)
82+
)
83+
84+
output_message = response['output']['message']
85+
messages.append(output_message)
86+
stop_reason = response['stopReason']
87+
88+
# Print the model's response
89+
for content in output_message['content']:
90+
if 'text' in content:
91+
print("Model:", content['text'])
92+
93+
if stop_reason == 'tool_use':
94+
# Tool use requested. Call the tool and send the result to the model.
95+
tool_requests = response['output']['message']['content']
96+
for tool_request in tool_requests:
97+
if 'toolUse' in tool_request:
98+
tool = tool_request['toolUse']
99+
logger.info("Requesting tool %s. Request: %s",
100+
tool['name'], tool['toolUseId'])
101+
102+
try:
103+
# Call the tool through the MCP session
104+
tool_response = await session.call_tool(tool['name'], tool['input'])
105+
106+
# Convert tool response to expected format
107+
tool_result = {
108+
"toolUseId": tool['toolUseId'],
109+
"content": [{"text": str(tool_response)}]
110+
}
111+
except Exception as err:
112+
logger.error("Tool call failed: %s", str(err))
113+
tool_result = {
114+
"toolUseId": tool['toolUseId'],
115+
"content": [{"text": f"Error: {str(err)}"}],
116+
"status": "error"
117+
}
118+
119+
# Add tool result to messages
120+
messages.append({
121+
"role": "user",
122+
"content": [{"toolResult": tool_result}]
123+
})
124+
else:
125+
# No more tool use requests, we're done
126+
break
127+
128+
if __name__ == "__main__":
129+
asyncio.run(main())

mcp_simple_tool/__init__.py

Whitespace-only changes.

mcp_simple_tool/__main__.py

+5
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,5 @@
1+
import sys
2+
3+
from server import main
4+
5+
sys.exit(main())

mcp_simple_tool/server.py

+100
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,100 @@
1+
import anyio
2+
import click
3+
import httpx
4+
import mcp.types as types
5+
from mcp.server import Server
6+
7+
8+
async def fetch_website(
9+
url: str,
10+
) -> list[types.TextContent | types.ImageContent | types.EmbeddedResource]:
11+
headers = {
12+
"User-Agent": "MCP Test Server (github.com/modelcontextprotocol/python-sdk)"
13+
}
14+
async with httpx.AsyncClient(follow_redirects=True, headers=headers) as client:
15+
response = await client.get(url)
16+
response.raise_for_status()
17+
return [types.TextContent(type="text", text=response.text)]
18+
19+
20+
@click.command()
21+
@click.option("--port", default=8000, help="Port to listen on for SSE")
22+
@click.option(
23+
"--transport",
24+
type=click.Choice(["stdio", "sse"]),
25+
default="stdio",
26+
help="Transport type",
27+
)
28+
def main(port: int, transport: str) -> int:
29+
app = Server("mcp-website-fetcher")
30+
31+
@app.call_tool()
32+
async def fetch_tool(
33+
name: str, arguments: dict
34+
) -> list[types.TextContent | types.ImageContent | types.EmbeddedResource]:
35+
if name != "fetch":
36+
raise ValueError(f"Unknown tool: {name}")
37+
if "url" not in arguments:
38+
raise ValueError("Missing required argument 'url'")
39+
return await fetch_website(arguments["url"])
40+
41+
@app.list_tools()
42+
async def list_tools() -> list[types.Tool]:
43+
return [
44+
types.Tool(
45+
name="fetch",
46+
description="Fetches a website and returns its content",
47+
inputSchema={
48+
"type": "object",
49+
"required": ["url"],
50+
"properties": {
51+
"url": {
52+
"type": "string",
53+
"description": "URL to fetch",
54+
}
55+
},
56+
},
57+
)
58+
]
59+
60+
if transport == "sse":
61+
from mcp.server.sse import SseServerTransport
62+
from starlette.applications import Starlette
63+
from starlette.routing import Route
64+
65+
sse = SseServerTransport("/messages")
66+
67+
async def handle_sse(request):
68+
async with sse.connect_sse(
69+
request.scope, request.receive, request._send
70+
) as streams:
71+
await app.run(
72+
streams[0], streams[1], app.create_initialization_options()
73+
)
74+
75+
async def handle_messages(request):
76+
await sse.handle_post_message(request.scope, request.receive, request._send)
77+
78+
starlette_app = Starlette(
79+
debug=True,
80+
routes=[
81+
Route("/sse", endpoint=handle_sse),
82+
Route("/messages", endpoint=handle_messages, methods=["POST"]),
83+
],
84+
)
85+
86+
import uvicorn
87+
88+
uvicorn.run(starlette_app, host="0.0.0.0", port=port)
89+
else:
90+
from mcp.server.stdio import stdio_server
91+
92+
async def arun():
93+
async with stdio_server() as streams:
94+
await app.run(
95+
streams[0], streams[1], app.create_initialization_options()
96+
)
97+
98+
anyio.run(arun)
99+
100+
return 0

pyproject.toml

+42
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,42 @@
1+
[project]
2+
name = "mcp-simple-tool"
3+
version = "0.1.0"
4+
description = "A simple MCP server exposing a website fetching tool"
5+
readme = "README.md"
6+
requires-python = ">=3.10"
7+
authors = [{ name = "Anthropic, PBC." }]
8+
maintainers = [
9+
{ name = "David Soria Parra", email = "[email protected]" },
10+
{ name = "Justin Spahr-Summers", email = "[email protected]" },
11+
]
12+
keywords = ["mcp", "llm", "automation", "web", "fetch"]
13+
license = { text = "MIT" }
14+
classifiers = [
15+
"Development Status :: 4 - Beta",
16+
"Intended Audience :: Developers",
17+
"License :: OSI Approved :: MIT License",
18+
"Programming Language :: Python :: 3",
19+
"Programming Language :: Python :: 3.10",
20+
]
21+
dependencies = ["anyio>=4.5", "click>=8.1.0", "httpx>=0.27", "mcp"]
22+
23+
[project.scripts]
24+
mcp-simple-tool = "mcp_simple_tool.server:main"
25+
26+
[build-system]
27+
requires = ["hatchling"]
28+
build-backend = "hatchling.build"
29+
30+
[tool.hatch.build.targets.wheel]
31+
packages = ["mcp_simple_tool"]
32+
33+
[tool.ruff.lint]
34+
select = ["E", "F", "I"]
35+
ignore = []
36+
37+
[tool.ruff]
38+
line-length = 88
39+
target-version = "py310"
40+
41+
[tool.uv]
42+
dev-dependencies = ["pyright>=1.1.378", "pytest>=8.3.3", "ruff>=0.6.9"]

0 commit comments

Comments
 (0)