Skip to content

Commit 398839a

Browse files
authored
feat: add chat proto image support (#76)
* feat: add chat proto image support * chore: update deployed agent addresses
1 parent a718534 commit 398839a

File tree

16 files changed

+412
-58
lines changed

16 files changed

+412
-58
lines changed
Lines changed: 8 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,8 @@
1+
# ASI1-Mini Agent
2+
3+
![domain:knowledge-base](https://img.shields.io/badge/knowledge--base-3D8BD3?style=flat&logo=data%3Aimage%2Fsvg%2Bxml%3Bbase64%2CPHN2ZyB3aWR0aD0iMTAiIGhlaWdodD0iOCIgdmlld0JveD0iMCAwIDEwIDgiIGZpbGw9Im5vbmUiIHhtbG5zPSJodHRwOi8vd3d3LnczLm9yZy8yMDAwL3N2ZyI%2BCjxwYXRoIGQ9Ik00Ljc1IDAuNTAwMDA4VjYuMzc1MDFMMS41IDUuNzUwMDFWMC41NDY4ODNDMS41IDAuMjM0MzgzIDEuNzY1NjIgNy44MzMzNWUtMDYgMi4wNzgxMiAwLjA2MjUwNzhMNC43NSAwLjUwMDAwOFpNMS4zOTA2MiA2LjM0Mzc2TDUgNy4wNjI1MUw4LjU5Mzc1IDYuMzQzNzZDOC44MjgxMiA2LjI5Njg4IDkgNi4wNzgxMyA5IDUuODQzNzZWMC40NTMxMzNMOS4zOTA2MiAwLjM3NTAwOEM5LjcwMzEyIDAuMzEyNTA4IDEwIDAuNTQ2ODgzIDEwIDAuODU5MzgzVjYuNTkzNzZDMTAgNi44NDM3NiA5LjgyODEyIDcuMDQ2ODggOS41OTM3NSA3LjA5Mzc2TDUgOC4wMDAwMUwwLjM5MDYyNSA3LjA5Mzc2QzAuMTU2MjUgNy4wNDY4OCAwIDYuODI4MTMgMCA2LjU5Mzc2VjAuODU5MzgzQzAgMC41NDY4ODMgMC4yODEyNSAwLjMxMjUwOCAwLjU5Mzc1IDAuMzc1MDA4TDEgMC40NTMxMzNWNS44NDM3NkMxIDYuMDkzNzYgMS4xNTYyNSA2LjI5Njg4IDEuMzkwNjIgNi4zNDM3NlpNNS4yNSA2LjM3NTAxVjAuNTAwMDA4TDcuOTA2MjUgMC4wNjI1MDc4QzguMjE4NzUgNy44MzMzNWUtMDYgOC41IDAuMjM0MzgzIDguNSAwLjU0Njg4M1Y1Ljc1MDAxTDUuMjUgNi4zNzUwMVoiIGZpbGw9IndoaXRlIi8%2BCjwvc3ZnPgo%3D)
4+
![tech:llm](https://img.shields.io/badge/llm-E85D2E?style=flat&logo=data%3Aimage%2Fsvg%2Bxml%3Bbase64%2CPHN2ZyB3aWR0aD0iMTAiIGhlaWdodD0iOCIgdmlld0JveD0iMCAwIDEwIDgiIGZpbGw9Im5vbmUiIHhtbG5zPSJodHRwOi8vd3d3LnczLm9yZy8yMDAwL3N2ZyI%2BCjxwYXRoIGQ9Ik00LjUgMUM0LjUgMS4yMTg3NSA0LjQyMTg4IDEuNDIxODggNC4zMTI1IDEuNTc4MTJMNC43NjU2MiAyLjU2MjVDNC45MjE4OCAyLjUzMTI1IDUuMDc4MTIgMi41IDUuMjUgMi41QzUuODEyNSAyLjUgNi4zMjgxMiAyLjcxODc1IDYuNzE4NzUgMy4wNjI1TDggMi4xMDkzOEM4IDIuMDc4MTIgOCAyLjA0Njg4IDggMkM4IDEuNDUzMTIgOC40Mzc1IDEgOSAxQzkuNTQ2ODggMSAxMCAxLjQ1MzEyIDEwIDJDMTAgMi41NjI1IDkuNTQ2ODggMyA5IDNDOC44NDM3NSAzIDguNzE4NzUgMi45ODQzOCA4LjU5Mzc1IDIuOTIxODhMNy4zMTI1IDMuODU5MzhDNy40MjE4OCA0LjE0MDYyIDcuNSA0LjQzNzUgNy41IDQuNzVDNy41IDUgNy40NTMxMiA1LjIzNDM4IDcuMzc1IDUuNDUzMTJMOC41IDYuMTI1QzguNjU2MjUgNi4wNDY4OCA4LjgxMjUgNiA5IDZDOS41NDY4OCA2IDEwIDYuNDUzMTIgMTAgN0MxMCA3LjU2MjUgOS41NDY4OCA4IDkgOEM4LjQzNzUgOCA4IDcuNTYyNSA4IDdWNi45ODQzOEw2Ljg1OTM4IDYuMzEyNUM2LjQ1MzEyIDYuNzM0MzggNS44NzUgNyA1LjI1IDdDNC4xNzE4OCA3IDMuMjgxMjUgNi4yNjU2MiAzLjA0Njg4IDUuMjVIMS44NTkzOEMxLjY4NzUgNS41NjI1IDEuMzU5MzggNS43NSAxIDUuNzVDMC40Mzc1IDUuNzUgMCA1LjMxMjUgMCA0Ljc1QzAgNC4yMDMxMiAwLjQzNzUgMy43NSAxIDMuNzVDMS4zNTkzOCAzLjc1IDEuNjg3NSAzLjk1MzEyIDEuODU5MzggNC4yNUgzLjA0Njg4QzMuMTcxODggMy43MzQzOCAzLjQ1MzEyIDMuMjk2ODggMy44NTkzOCAyLjk4NDM4TDMuNDA2MjUgMkMyLjg5MDYyIDEuOTUzMTIgMi41IDEuNTMxMjUgMi41IDFDMi41IDAuNDUzMTI1IDIuOTM3NSAwIDMuNSAwQzQuMDQ2ODggMCA0LjUgMC40NTMxMjUgNC41IDFaTTUuMjUgNS41QzUuNTE1NjIgNS41IDUuNzUgNS4zNTkzOCA1Ljg5MDYyIDUuMTI1QzYuMDMxMjUgNC45MDYyNSA2LjAzMTI1IDQuNjA5MzggNS44OTA2MiA0LjM3NUM1Ljc1IDQuMTU2MjUgNS41MTU2MiA0IDUuMjUgNEM0Ljk2ODc1IDQgNC43MzQzOCA0LjE1NjI1IDQuNTkzNzUgNC4zNzVDNC40NTMxMiA0LjYwOTM4IDQuNDUzMTIgNC45MDYyNSA0LjU5Mzc1IDUuMTI1QzQuNzM0MzggNS4zNTkzOCA0Ljk2ODc1IDUuNSA1LjI1IDUuNVoiIGZpbGw9IndoaXRlIi8%2BCjwvc3ZnPgo%3D)
5+
[![link to source code](https://img.shields.io/badge/Source%20Code-E8ECF1?style=flat&logo=data%3Aimage%2Fsvg%2Bxml%3Bbase64%2CPHN2ZyB3aWR0aD0iOCIgaGVpZ2h0PSI4IiB2aWV3Qm94PSIwIDAgOCA4IiBmaWxsPSJub25lIiB4bWxucz0iaHR0cDovL3d3dy53My5vcmcvMjAwMC9zdmciPgo8cGF0aCBkPSJNNCAwLjA5ODk5OUMxLjc5IDAuMDk4OTk5IDAgMS44OSAwIDQuMDk5QzAgNS44NjY2NyAxLjE0NiA3LjM2NTY2IDIuNzM1IDcuODk0QzIuOTM1IDcuOTMxNjYgMy4wMDgzMyA3LjgwOCAzLjAwODMzIDcuNzAxNjZDMy4wMDgzMyA3LjYwNjY2IDMuMDA1IDcuMzU1IDMuMDAzMzMgNy4wMjE2N0MxLjg5MDY3IDcuMjYzIDEuNjU2IDYuNDg1IDEuNjU2IDYuNDg1QzEuNDc0IDYuMDIzMzMgMS4yMTEgNS45IDEuMjExIDUuOUMwLjg0ODY2NyA1LjY1MiAxLjIzOSA1LjY1NyAxLjIzOSA1LjY1N0MxLjY0MDY3IDUuNjg1IDEuODUxNjcgNi4wNjkgMS44NTE2NyA2LjA2OUMyLjIwODMzIDYuNjgwNjcgMi43ODggNi41MDQgMy4wMTY2NyA2LjQwMTY2QzMuMDUyNjcgNi4xNDMgMy4xNTU2NyA1Ljk2NjY3IDMuMjcgNS44NjY2N0MyLjM4MTY3IDUuNzY2NjcgMS40NDggNS40MjI2NyAxLjQ0OCAzLjg5QzEuNDQ4IDMuNDUzMzMgMS42MDMgMy4wOTY2NyAxLjg1OTY3IDIuODE2NjdDMS44MTQ2NyAyLjcxNTY3IDEuNjc5NjcgMi4zMDkgMS44OTQ2NyAxLjc1OEMxLjg5NDY3IDEuNzU4IDIuMjI5NjcgMS42NTA2NyAyLjk5NDY3IDIuMTY4QzMuMzE0NjcgMi4wNzkgMy42NTQ2NyAyLjAzNSAzLjk5NDY3IDIuMDMzQzQuMzM0NjcgMi4wMzUgNC42NzQ2NyAyLjA3OSA0Ljk5NDY3IDIuMTY4QzUuNzU0NjcgMS42NTA2NyA2LjA4OTY3IDEuNzU4IDYuMDg5NjcgMS43NThDNi4zMDQ2NyAyLjMwOSA2LjE2OTY3IDIuNzE1NjcgNi4xMjk2NyAyLjgxNjY3QzYuMzg0NjcgMy4wOTY2NyA2LjUzOTY3IDMuNDUzMzMgNi41Mzk2NyAzLjg5QzYuNTM5NjcgNS40MjY2NyA1LjYwNDY3IDUuNzY1IDQuNzE0NjcgNS44NjMzM0M0Ljg1NDY3IDUuOTgzMzMgNC45ODQ2NyA2LjIyODY2IDQuOTg0NjcgNi42MDMzM0M0Ljk4NDY3IDcuMTM4NjYgNC45Nzk2NyA3LjU2ODY3IDQuOTc5NjcgNy42OTg2N0M0Ljk3OTY3IDcuODAzNjcgNS4wNDk2NyA3LjkyODY3IDUuMjU0NjcgNy44ODg2N0M2Ljg1NSA3LjM2NCA4IDUuODY0IDggNC4wOTlDOCAxLjg5IDYuMjA5IDAuMDk4OTk5IDQgMC4wOTg5OTlaIiBmaWxsPSIjNTU2NTc4Ii8%2BCjwvc3ZnPgo%3D)](https://github.com/fetchai/uAgent-Examples/tree/main/6-deployed-agents/knowledge-base/asi1-agent)
6+
[![live](https://img.shields.io/badge/Live-8A2BE2?style=flat&logo=data%3Aimage%2Fsvg%2Bxml%3Bbase64%2CPHN2ZyB3aWR0aD0iMTAiIGhlaWdodD0iOCIgdmlld0JveD0iMCAwIDEwIDgiIGZpbGw9Im5vbmUiIHhtbG5zPSJodHRwOi8vd3d3LnczLm9yZy8yMDAwL3N2ZyI%2BCjxwYXRoIGQ9Ik0yLjI1IDcuNUMxIDcuNSAwIDYuNSAwIDUuMjVDMCA0LjI4MTI1IDAuNjI1IDMuNDM3NSAxLjUgMy4xNDA2MkMxLjUgMy4wOTM3NSAxLjUgMy4wNDY4OCAxLjUgM0MxLjUgMS42MjUgMi42MDkzOCAwLjUgNCAwLjVDNC45MjE4OCAwLjUgNS43MzQzOCAxLjAxNTYyIDYuMTU2MjUgMS43NjU2MkM2LjM5MDYyIDEuNTkzNzUgNi42ODc1IDEuNSA3IDEuNUM3LjgyODEyIDEuNSA4LjUgMi4xNzE4OCA4LjUgM0M4LjUgMy4yMDMxMiA4LjQ1MzEyIDMuMzc1IDguMzkwNjIgMy41NDY4OEM5LjMxMjUgMy43MzQzOCAxMCA0LjU0Njg4IDEwIDUuNUMxMCA2LjYwOTM4IDkuMDkzNzUgNy41IDggNy41SDIuMjVaTTYuNzY1NjIgMy43NjU2MkM2LjkwNjI1IDMuNjI1IDYuOTA2MjUgMy4zOTA2MiA2Ljc2NTYyIDMuMjVDNi42MDkzOCAzLjA5Mzc1IDYuMzc1IDMuMDkzNzUgNi4yMzQzOCAzLjI1TDQuNSA0Ljk4NDM4TDMuNzY1NjIgNC4yNUMzLjYwOTM4IDQuMDkzNzUgMy4zNzUgNC4wOTM3NSAzLjIzNDM4IDQuMjVDMy4wNzgxMiA0LjM5MDYyIDMuMDc4MTIgNC42MjUgMy4yMzQzOCA0Ljc2NTYyTDQuMjM0MzggNS43NjU2MkM0LjM3NSA1LjkyMTg4IDQuNjA5MzggNS45MjE4OCA0Ljc2NTYyIDUuNzY1NjJMNi43NjU2MiAzLjc2NTYyWiIgZmlsbD0id2hpdGUiLz4KPC9zdmc%2BCg%3D%3D)](https://agentverse.ai/agents/details/agent1q0h70caed8ax769shpemapzkyk65uscw4xwk6dc4t3emvp5jdcvqs9xs32y/profile)
7+
8+
This agent is a simple wrapper around `asi1-mini` Large Language Model. For more details go to https://docs.asi1.ai/docs
Lines changed: 70 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,70 @@
1+
import os
2+
from enum import Enum
3+
4+
from protocols import chat_proto
5+
from uagents import Agent, Context, Model
6+
from uagents.experimental.quota import QuotaProtocol
7+
8+
AGENT_SEED = os.getenv("AGENT_SEED", "asi1-test-agent")
9+
AGENT_NAME = os.getenv("AGENT_NAME", "ASI1-Mini Agent")
10+
11+
12+
PORT = 8000
13+
agent = Agent(
14+
name=AGENT_NAME,
15+
seed=AGENT_SEED,
16+
port=PORT,
17+
endpoint=f"http://localhost:{PORT}/submit",
18+
)
19+
20+
agent.include(chat_proto, publish_manifest=True)
21+
22+
23+
### Health check related code
24+
def agent_is_healthy() -> bool:
25+
"""
26+
Implement the actual health check logic here.
27+
28+
For example, check if the agent can connect to a third party API,
29+
check if the agent has enough resources, etc.
30+
"""
31+
condition = True # TODO: logic here
32+
return bool(condition)
33+
34+
35+
class HealthCheck(Model):
36+
pass
37+
38+
39+
class HealthStatus(str, Enum):
40+
HEALTHY = "healthy"
41+
UNHEALTHY = "unhealthy"
42+
43+
44+
class AgentHealth(Model):
45+
agent_name: str
46+
status: HealthStatus
47+
48+
49+
health_protocol = QuotaProtocol(
50+
storage_reference=agent.storage, name="HealthProtocol", version="0.1.0"
51+
)
52+
53+
54+
@health_protocol.on_message(HealthCheck, replies={AgentHealth})
55+
async def handle_health_check(ctx: Context, sender: str, msg: HealthCheck):
56+
status = HealthStatus.UNHEALTHY
57+
try:
58+
if agent_is_healthy():
59+
status = HealthStatus.HEALTHY
60+
except Exception as err:
61+
ctx.logger.error(err)
62+
finally:
63+
await ctx.send(sender, AgentHealth(agent_name=AGENT_NAME, status=status))
64+
65+
66+
agent.include(health_protocol, publish_manifest=True)
67+
68+
69+
if __name__ == "__main__":
70+
agent.run()
Lines changed: 66 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,66 @@
1+
import os
2+
from typing import Any, List
3+
from openai import OpenAI, OpenAIError
4+
from openai.types.chat import ChatCompletionMessageParam
5+
6+
7+
ASI1_URL = "https://api.asi1.ai/v1"
8+
MODEL_NAME = os.getenv("MODEL_NAME", "asi1-mini")
9+
ASI1_API_KEY = os.getenv("ASI1_API_KEY", "")
10+
MAX_TOKENS = int(os.getenv("MAX_TOKENS", "4096"))
11+
12+
13+
if not ASI1_API_KEY:
14+
raise ValueError(
15+
"You need to provide an API key:https://docs.asi1.ai/docs/getting-api-key"
16+
)
17+
18+
19+
client = OpenAI(api_key=ASI1_API_KEY, base_url=ASI1_URL)
20+
21+
22+
# Send a prompt and context to the AI model and return the content of the completion
23+
def get_completion(
24+
content: List[dict[str, Any]],
25+
max_tokens: int = MAX_TOKENS,
26+
) -> str:
27+
message_parts = []
28+
29+
for item in content:
30+
if item.get("type") == "text":
31+
message_parts.append({"type": "text", "text": item["text"]})
32+
elif item.get("type") == "resource":
33+
mime_type = item["mime_type"]
34+
if mime_type.startswith("image/"):
35+
message_parts.append({
36+
"type": "image_url",
37+
"image_url": {
38+
"url": f"data:{mime_type};base64,{item['contents']}",
39+
},
40+
})
41+
else:
42+
return f"Unsupported mime type: {mime_type}"
43+
44+
45+
if len(message_parts) == 1 and message_parts[0].get("type") == "text":
46+
user_content = message_parts[0]["text"]
47+
else:
48+
user_content = message_parts
49+
50+
try:
51+
messages: List[ChatCompletionMessageParam] = [
52+
{"role": "system", "content": "You are Fetch.ai agent, running on AgentVerse.ai platform."},
53+
{"role": "user", "content": user_content},
54+
]
55+
response = client.chat.completions.create(
56+
model=MODEL_NAME,
57+
messages=messages,
58+
max_tokens=max_tokens,
59+
)
60+
61+
content = response.choices[0].message.content
62+
63+
return str(content)
64+
65+
except OpenAIError as e:
66+
return f"An error occurred: {e}"
Lines changed: 82 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,82 @@
1+
import os
2+
from datetime import datetime
3+
from uuid import uuid4
4+
5+
from ai import get_completion
6+
from uagents import Context, Protocol
7+
from uagents_core.contrib.protocols.chat import (
8+
ChatAcknowledgement,
9+
ChatMessage,
10+
MetadataContent,
11+
ResourceContent,
12+
StartSessionContent,
13+
TextContent,
14+
chat_protocol_spec,
15+
)
16+
from uagents_core.storage import ExternalStorage
17+
18+
STORAGE_URL = os.getenv("AGENTVERSE_URL", "https://agentverse.ai") + "/v1/storage"
19+
20+
21+
def create_text_chat(text: str) -> ChatMessage:
22+
return ChatMessage(
23+
timestamp=datetime.utcnow(),
24+
msg_id=uuid4(),
25+
content=[TextContent(type="text", text=text)],
26+
)
27+
28+
29+
def create_metadata(metadata: dict[str, str]) -> ChatMessage:
30+
return ChatMessage(
31+
timestamp=datetime.utcnow(),
32+
msg_id=uuid4(),
33+
content=[MetadataContent(type="metadata", metadata=metadata)],
34+
)
35+
36+
chat_proto = Protocol(spec=chat_protocol_spec)
37+
38+
@chat_proto.on_message(ChatMessage)
39+
async def handle_message(ctx: Context, sender: str, msg: ChatMessage):
40+
ctx.logger.info(f"Got a message from {sender}")
41+
await ctx.send(sender, ChatAcknowledgement(
42+
timestamp=datetime.utcnow(), acknowledged_msg_id=msg.msg_id)
43+
)
44+
45+
prompt_content = []
46+
47+
for item in msg.content:
48+
if isinstance(item, StartSessionContent):
49+
await ctx.send(sender, create_metadata({"attachments": "true"}))
50+
51+
elif isinstance(item, TextContent):
52+
prompt_content.append({"type": "text", "text": item.text})
53+
54+
elif isinstance(item, ResourceContent):
55+
try:
56+
external_storage = ExternalStorage(
57+
identity=ctx.agent.identity,
58+
storage_url=STORAGE_URL,
59+
)
60+
data = external_storage.download(str(item.resource_id))
61+
prompt_content.append({
62+
"type": "resource",
63+
"mime_type": data["mime_type"],
64+
"contents": data["contents"],
65+
})
66+
67+
except Exception as e:
68+
ctx.logger.error(f"Failed to download resource: {e}")
69+
await ctx.send(sender, create_text_chat("Failed to download resource."))
70+
return
71+
72+
else:
73+
ctx.logger.warning(f"Unexpected content type from {sender}")
74+
75+
if prompt_content:
76+
response = get_completion(prompt_content)
77+
await ctx.send(sender, create_text_chat(response or "No response generated."))
78+
79+
80+
@chat_proto.on_message(ChatAcknowledgement)
81+
async def handle_ack(ctx: Context, sender: str, msg: ChatAcknowledgement):
82+
ctx.logger.info(f"Got an acknowledgement from {sender} for {msg.acknowledged_msg_id}")
Lines changed: 14 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,14 @@
1+
[tool.poetry]
2+
name = "asi1-agent"
3+
version = "0.1.0"
4+
description = ""
5+
authors = ["Attila Bagoly <[email protected]>"]
6+
readme = "README.md"
7+
8+
[tool.poetry.dependencies]
9+
python = "^3.10,<3.13"
10+
uagents = "^0.20.1"
11+
12+
[build-system]
13+
requires = ["poetry-core"]
14+
build-backend = "poetry.core.masonry.api"

6-deployed-agents/knowledge-base/claude-ai-agent/README.md

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -80,7 +80,7 @@ class StructuredOutputResponse(Model):
8080
agent = Agent()
8181

8282

83-
AI_AGENT_ADDRESS = "<deployed_agent_address>"
83+
AI_AGENT_ADDRESS = "{{ .Agent.Address }}"
8484

8585

8686
class Location(Model):

6-deployed-agents/knowledge-base/claude-ai-agent/ai.py

Lines changed: 21 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -49,13 +49,33 @@ def get_text_completion(prompt: str, tool: dict[str, Any] | None = None) -> str
4949
def get_completion(
5050
content: list[dict[str, Any]], tool: dict[str, Any] | None = None
5151
) -> str | None:
52+
53+
processed_content = []
54+
55+
for item in content:
56+
if item.get("type") == "text":
57+
processed_content.append({"type": "text", "text": item["text"]})
58+
elif item.get("type") == "resource":
59+
mime_type = item["mime_type"]
60+
if mime_type.startswith("image/"):
61+
processed_content.append({
62+
"type": "image",
63+
"source": {
64+
"type": "base64",
65+
"media_type": mime_type,
66+
"data": item["contents"],
67+
}
68+
})
69+
else:
70+
return f"Unsupported mime type: {mime_type}"
71+
5272
data = {
5373
"model": MODEL_ENGINE,
5474
"max_tokens": MAX_TOKENS,
5575
"messages": [
5676
{
5777
"role": "user",
58-
"content": content,
78+
"content": processed_content,
5979
}
6080
],
6181
}

6-deployed-agents/knowledge-base/claude-ai-agent/chat_proto.py

Lines changed: 7 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -50,7 +50,7 @@ async def handle_message(ctx: Context, sender: str, msg: ChatMessage):
5050
timestamp=datetime.utcnow(), acknowledged_msg_id=msg.msg_id
5151
),
5252
)
53-
ctx.storage.set(str(ctx.session), sender)
53+
5454
prompt_content = []
5555
for item in msg.content:
5656
if isinstance(item, StartSessionContent):
@@ -64,21 +64,12 @@ async def handle_message(ctx: Context, sender: str, msg: ChatMessage):
6464
storage_url=STORAGE_URL,
6565
)
6666
data = external_storage.download(str(item.resource_id))
67-
if "image" in data["mime_type"]:
68-
prompt_content.append(
69-
{
70-
"type": "image",
71-
"source": {
72-
"type": "base64",
73-
"media_type": data["mime_type"],
74-
"data": data["contents"],
75-
},
76-
}
77-
)
78-
else:
79-
ctx.logger.warning(
80-
f"Got unexpected resource type: {data['mime_type']}"
81-
)
67+
prompt_content.append({
68+
"type": "resource",
69+
"mime_type": data["mime_type"],
70+
"contents": data["contents"],
71+
})
72+
8273
except Exception as ex:
8374
ctx.logger.error(f"Failed to download resource: {ex}")
8475
await ctx.send(sender, create_text_chat("Failed to download resource."))

6-deployed-agents/knowledge-base/google-gemini-agent/README.md

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -103,7 +103,7 @@ class StructuredOutputResponse(Model):
103103
agent = Agent()
104104

105105

106-
AI_AGENT_ADDRESS = "<deployed_agent_address>"
106+
AI_AGENT_ADDRESS = "{{ .Agent.Address }}"
107107

108108

109109
class Location(Model):

6-deployed-agents/knowledge-base/google-gemini-agent/agent.py

Lines changed: 7 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -3,7 +3,7 @@
33
from enum import Enum
44
from typing import Any
55

6-
from ai import get_completion
6+
from ai import get_completion, get_text_completion
77
from chat_proto import chat_proto
88
from uagents import Agent, Context, Model
99
from uagents.experimental.quota import QuotaProtocol, RateLimit, AccessControlList
@@ -56,27 +56,27 @@ class StructuredOutputResponse(Model):
5656
storage_reference=agent.storage,
5757
name="LLM-Text-Response",
5858
version="0.1.0",
59-
default_rate_limit=RateLimit(window_size_minutes=60, max_requests=6), acl=acl,
59+
default_rate_limit=RateLimit(window_size_minutes=60, max_requests=6, acl=acl),
6060
)
6161

6262
code_proto = QuotaProtocol(
6363
storage_reference=agent.storage,
6464
name="LLM-Code-Generator",
6565
version="0.1.0",
66-
default_rate_limit=RateLimit(window_size_minutes=60, max_requests=6), acl=acl,
66+
default_rate_limit=RateLimit(window_size_minutes=60, max_requests=6, acl=acl),
6767
)
6868

6969
struct_proto = QuotaProtocol(
7070
storage_reference=agent.storage,
7171
name="LLM-Structured-Response",
7272
version="0.1.0",
73-
default_rate_limit=RateLimit(window_size_minutes=60, max_requests=6), acl=acl,
73+
default_rate_limit=RateLimit(window_size_minutes=60, max_requests=6, acl=acl),
7474
)
7575

7676

7777
@text_proto.on_message(TextPrompt, replies={TextResponse, ErrorMessage})
7878
async def handle_request(ctx: Context, sender: str, msg: TextPrompt):
79-
response = get_completion(msg.text, False)
79+
response = get_text_completion(msg.text)
8080
if response is None:
8181
await ctx.send(
8282
sender,
@@ -89,7 +89,7 @@ async def handle_request(ctx: Context, sender: str, msg: TextPrompt):
8989

9090
@code_proto.on_message(CodePrompt, replies={CodeResponse, ErrorMessage})
9191
async def handle_codegen_request(ctx: Context, sender: str, msg: CodePrompt):
92-
response = get_completion(msg.text, True)
92+
response = get_text_completion(msg.text, True)
9393
if response is None:
9494
await ctx.send(
9595
sender,
@@ -106,7 +106,7 @@ async def handle_codegen_request(ctx: Context, sender: str, msg: CodePrompt):
106106
async def handle_structured_request(
107107
ctx: Context, sender: str, msg: StructuredOutputPrompt
108108
):
109-
response = get_completion(msg.prompt, False, msg.output_schema)
109+
response = get_completion([{"type": "text", "text": msg.prompt}], response_schema=msg.output_schema)
110110
if response is None:
111111
await ctx.send(
112112
sender,

0 commit comments

Comments
 (0)