Skip to content

Commit f1bede1

Browse files
authored
Merge pull request #2561 from hlohaus/10Jan
Add CohereForAI provider,
2 parents c159eeb + 2064bb7 commit f1bede1

File tree

11 files changed

+212
-76
lines changed

11 files changed

+212
-76
lines changed

docker/Dockerfile

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -84,4 +84,4 @@ RUN pip install --break-system-packages --upgrade pip \
8484
ADD --chown=$G4F_USER:$G4F_USER g4f $G4F_DIR/g4f
8585

8686
# Expose ports
87-
EXPOSE 8080 1337
87+
EXPOSE 8080 1337 7900

g4f/Provider/You.py

Lines changed: 12 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -10,7 +10,7 @@
1010
from ..image import ImageResponse, ImagePreview, EXTENSIONS_MAP, to_bytes, is_accepted_format
1111
from ..requests import StreamSession, FormData, raise_for_status, get_nodriver
1212
from ..cookies import get_cookies
13-
from ..errors import MissingRequirementsError
13+
from ..errors import MissingRequirementsError, ResponseError
1414
from .. import debug
1515

1616
class You(AsyncGeneratorProvider, ProviderModelMixin):
@@ -23,18 +23,19 @@ class You(AsyncGeneratorProvider, ProviderModelMixin):
2323
models = [
2424
default_model,
2525
"gpt-4o",
26+
"gpt-4o-mini",
2627
"gpt-4-turbo",
27-
"gpt-4",
28+
"grok-2",
2829
"claude-3.5-sonnet",
30+
"claude-3.5-haiku",
2931
"claude-3-opus",
3032
"claude-3-sonnet",
3133
"claude-3-haiku",
32-
"claude-2",
34+
"llama-3.3-70b",
3335
"llama-3.1-70b",
3436
"llama-3",
3537
"gemini-1-5-flash",
3638
"gemini-1-5-pro",
37-
"gemini-1-0-pro",
3839
"databricks-dbrx-instruct",
3940
"command-r",
4041
"command-r-plus",
@@ -105,19 +106,14 @@ async def create_async_generator(
105106
"conversationTurnId": str(uuid.uuid4()),
106107
"chatId": str(uuid.uuid4()),
107108
}
108-
params = {
109-
"userFiles": upload,
110-
"selectedChatMode": chat_mode,
111-
}
112109
if chat_mode == "custom":
113110
if debug.logging:
114111
print(f"You model: {model}")
115-
params["selectedAiModel"] = model.replace("-", "_")
112+
data["selectedAiModel"] = model.replace("-", "_")
116113

117-
async with (session.post if chat_mode == "default" else session.get)(
114+
async with session.get(
118115
f"{cls.url}/api/streamingSearch",
119-
data=data if chat_mode == "default" else None,
120-
params=params if chat_mode == "default" else data,
116+
params=data,
121117
headers=headers,
122118
cookies=cookies
123119
) as response:
@@ -126,9 +122,13 @@ async def create_async_generator(
126122
if line.startswith(b'event: '):
127123
event = line[7:].decode()
128124
elif line.startswith(b'data: '):
125+
if event == "error":
126+
raise ResponseError(line[6:])
129127
if event in ["youChatUpdate", "youChatToken"]:
130128
data = json.loads(line[6:])
131129
if event == "youChatToken" and event in data and data[event]:
130+
if data[event].startswith("#### You\'ve hit your free quota for the Model Agent. For more usage of the Model Agent, learn more at:"):
131+
continue
132132
yield data[event]
133133
elif event == "youChatUpdate" and "t" in data and data["t"]:
134134
if chat_mode == "create":

g4f/Provider/hf_space/CohereForAI.py

Lines changed: 95 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,95 @@
1+
from __future__ import annotations
2+
3+
import json
4+
import uuid
5+
from aiohttp import ClientSession, FormData
6+
7+
from ...typing import AsyncResult, Messages
8+
from ...requests import raise_for_status
9+
from ..base_provider import AsyncGeneratorProvider, ProviderModelMixin
10+
from ..helper import format_prompt
11+
from ...providers.response import JsonConversation, TitleGeneration
12+
13+
class CohereForAI(AsyncGeneratorProvider, ProviderModelMixin):
14+
url = "https://cohereforai-c4ai-command.hf.space"
15+
conversation_url = f"{url}/conversation"
16+
17+
working = True
18+
19+
default_model = "command-r-plus-08-2024"
20+
models = [
21+
default_model,
22+
"command-r-08-2024",
23+
"command-r-plus",
24+
"command-r",
25+
"command-r7b-12-2024",
26+
]
27+
28+
@classmethod
29+
async def create_async_generator(
30+
cls, model: str, messages: Messages,
31+
api_key: str = None,
32+
proxy: str = None,
33+
conversation: JsonConversation = None,
34+
return_conversation: bool = False,
35+
**kwargs
36+
) -> AsyncResult:
37+
model = cls.get_model(model)
38+
headers = {
39+
"Origin": cls.url,
40+
"User-Agent": "Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:133.0) Gecko/20100101 Firefox/133.0",
41+
"Accept": "*/*",
42+
"Accept-Language": "en-US,en;q=0.5",
43+
"Referer": "https://cohereforai-c4ai-command.hf.space/",
44+
"Sec-Fetch-Dest": "empty",
45+
"Sec-Fetch-Mode": "cors",
46+
"Sec-Fetch-Site": "same-origin",
47+
"Priority": "u=4",
48+
}
49+
if api_key is not None:
50+
headers["Authorization"] = f"Bearer {api_key}"
51+
async with ClientSession(
52+
headers=headers,
53+
cookies=None if conversation is None else conversation.cookies
54+
) as session:
55+
system_prompt = "\n".join([message["content"] for message in messages if message["role"] == "system"])
56+
messages = [message for message in messages if message["role"] != "system"]
57+
inputs = format_prompt(messages) if conversation is None else messages[-1]["content"]
58+
if conversation is None or conversation.model != model or conversation.preprompt != system_prompt:
59+
data = {"model": model, "preprompt": system_prompt}
60+
async with session.post(cls.conversation_url, json=data, proxy=proxy) as response:
61+
await raise_for_status(response)
62+
conversation = JsonConversation(
63+
**await response.json(),
64+
**data,
65+
cookies={n: c.value for n, c in response.cookies.items()}
66+
)
67+
if return_conversation:
68+
yield conversation
69+
async with session.get(f"{cls.conversation_url}/{conversation.conversationId}/__data.json?x-sveltekit-invalidated=11", proxy=proxy) as response:
70+
await raise_for_status(response)
71+
node = json.loads((await response.text()).splitlines()[0])["nodes"][1]
72+
if node["type"] == "error":
73+
raise RuntimeError(node["error"])
74+
data = node["data"]
75+
message_id = data[data[data[data[0]["messages"]][-1]]["id"]]
76+
data = FormData()
77+
inputs = messages[-1]["content"]
78+
data.add_field(
79+
"data",
80+
json.dumps({"inputs": inputs, "id": message_id, "is_retry": False, "is_continue": False, "web_search": False, "tools": []}),
81+
content_type="application/json"
82+
)
83+
async with session.post(f"{cls.conversation_url}/{conversation.conversationId}", data=data, proxy=proxy) as response:
84+
await raise_for_status(response)
85+
async for chunk in response.content:
86+
try:
87+
data = json.loads(chunk)
88+
except (json.JSONDecodeError) as e:
89+
raise RuntimeError(f"Failed to read response: {chunk.decode(errors='replace')}", e)
90+
if data["type"] == "stream":
91+
yield data["token"].replace("\u0000", "")
92+
elif data["type"] == "title":
93+
yield TitleGeneration(data["title"])
94+
elif data["type"] == "finalAnswer":
95+
break

g4f/Provider/hf_space/__init__.py

Lines changed: 8 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -8,6 +8,7 @@
88
from .BlackForestLabsFlux1Schnell import BlackForestLabsFlux1Schnell
99
from .VoodoohopFlux1Schnell import VoodoohopFlux1Schnell
1010
from .StableDiffusion35Large import StableDiffusion35Large
11+
from .CohereForAI import CohereForAI
1112
from .Qwen_QVQ_72B import Qwen_QVQ_72B
1213

1314
class HuggingSpace(AsyncGeneratorProvider, ProviderModelMixin):
@@ -16,7 +17,7 @@ class HuggingSpace(AsyncGeneratorProvider, ProviderModelMixin):
1617
working = True
1718
default_model = BlackForestLabsFlux1Dev.default_model
1819
default_vision_model = Qwen_QVQ_72B.default_model
19-
providers = [BlackForestLabsFlux1Dev, BlackForestLabsFlux1Schnell, VoodoohopFlux1Schnell, StableDiffusion35Large, Qwen_QVQ_72B]
20+
providers = [BlackForestLabsFlux1Dev, BlackForestLabsFlux1Schnell, VoodoohopFlux1Schnell, StableDiffusion35Large, CohereForAI, Qwen_QVQ_72B]
2021

2122
@classmethod
2223
def get_parameters(cls, **kwargs) -> dict:
@@ -28,11 +29,13 @@ def get_parameters(cls, **kwargs) -> dict:
2829
@classmethod
2930
def get_models(cls, **kwargs) -> list[str]:
3031
if not cls.models:
32+
models = []
3133
for provider in cls.providers:
32-
cls.models.extend(provider.get_models(**kwargs))
33-
cls.models.extend(provider.model_aliases.keys())
34-
cls.models = list(set(cls.models))
35-
cls.models.sort()
34+
models.extend(provider.get_models(**kwargs))
35+
models.extend(provider.model_aliases.keys())
36+
models = list(set(models))
37+
models.sort()
38+
cls.models = models
3639
return cls.models
3740

3841
@classmethod

g4f/Provider/needs_auth/HuggingFace.py

Lines changed: 10 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -28,15 +28,16 @@ class HuggingFace(AsyncGeneratorProvider, ProviderModelMixin):
2828
def get_models(cls) -> list[str]:
2929
if not cls.models:
3030
url = "https://huggingface.co/api/models?inference=warm&pipeline_tag=text-generation"
31-
cls.models = [model["id"] for model in requests.get(url).json()]
32-
cls.models.append("meta-llama/Llama-3.2-11B-Vision-Instruct")
33-
cls.models.append("nvidia/Llama-3.1-Nemotron-70B-Instruct-HF")
34-
cls.models.sort()
35-
if not cls.image_models:
36-
url = "https://huggingface.co/api/models?pipeline_tag=text-to-image"
37-
cls.image_models = [model["id"] for model in requests.get(url).json() if model["trendingScore"] >= 20]
38-
cls.image_models.sort()
39-
cls.models.extend(cls.image_models)
31+
models = [model["id"] for model in requests.get(url).json()]
32+
models.append("meta-llama/Llama-3.2-11B-Vision-Instruct")
33+
models.append("nvidia/Llama-3.1-Nemotron-70B-Instruct-HF")
34+
models.sort()
35+
if not cls.image_models:
36+
url = "https://huggingface.co/api/models?pipeline_tag=text-to-image"
37+
cls.image_models = [model["id"] for model in requests.get(url).json() if model["trendingScore"] >= 20]
38+
cls.image_models.sort()
39+
models.extend(cls.image_models)
40+
cls.models = models
4041
return cls.models
4142

4243
@classmethod

g4f/Provider/needs_auth/__init__.py

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -25,7 +25,6 @@
2525
from .Raycast import Raycast
2626
from .Reka import Reka
2727
from .Replicate import Replicate
28-
from .Theb import Theb
2928
from .ThebApi import ThebApi
3029
from .WhiteRabbitNeo import WhiteRabbitNeo
3130
from .xAI import xAI
File renamed without changes.

g4f/Provider/not_working/__init__.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -14,4 +14,5 @@
1414
from .MagickPen import MagickPen
1515
from .MyShell import MyShell
1616
from .RobocodersAPI import RobocodersAPI
17+
from .Theb import Theb
1718
from .Upstage import Upstage

g4f/gui/client/index.html

Lines changed: 8 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -142,13 +142,20 @@ <h3>Settings</h3>
142142
<input type="checkbox" id="refine"/>
143143
<label for="refine" class="toogle" title=""></label>
144144
</div>
145+
<div class="field box">
146+
<label for="systemPrompt" class="label" title="">Default for System prompt</label>
147+
<textarea id="systemPrompt" placeholder="You are a helpful assistant."></textarea>
148+
</div>
145149
<div class="field box">
146150
<label for="message-input-height" class="label" title="">Input max. height</label>
147151
<input type="number" id="message-input-height" value="200"/>
148152
</div>
149153
<div class="field box">
150154
<label for="recognition-language" class="label" title="">Speech recognition language</label>
151155
<input type="text" id="recognition-language" value="" placeholder="navigator.language"/>
156+
<script>
157+
document.getElementById('recognition-language').placeholder = navigator.language;
158+
</script>
152159
</div>
153160
<div class="field box hidden">
154161
<label for="BingCreateImages-api_key" class="label" title="">Microsoft Designer in Bing:</label>
@@ -179,7 +186,7 @@ <h3>Settings</h3>
179186
</div>
180187
</div>
181188
<div class="conversation">
182-
<textarea id="systemPrompt" class="box" placeholder="System prompt"></textarea>
189+
<textarea id="chatPrompt" class="box" placeholder="System prompt"></textarea>
183190
<div id="messages" class="box"></div>
184191
<button class="slide-systemPrompt">
185192
<i class="fa-solid fa-angles-up"></i>

g4f/gui/client/static/css/style.css

Lines changed: 13 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -859,19 +859,22 @@ select:hover,
859859
display: none;
860860
}
861861

862-
#systemPrompt, .settings textarea, form textarea {
862+
#chatPrompt{
863+
min-height: 59px;
864+
height: 59px;
865+
resize: vertical;
866+
padding: var(--inner-gap) var(--section-gap);
867+
}
868+
869+
#systemPrompt, #chatPrompt, .settings textarea, form textarea {
863870
font-size: 15px;
864871
width: 100%;
865872
color: var(--colour-3);
866-
height: 59px;
867873
outline: none;
868-
padding: var(--inner-gap) var(--section-gap);
869-
resize: vertical;
870-
min-height: 59px;
871874
transition: max-height 0.15s ease-out;
872875
}
873876

874-
#systemPrompt:focus {
877+
#systemPrompt:focus, #chatPrompt:focus {
875878
min-height: 200px;
876879
max-height: 1000px;
877880
transition: max-height 0.25s ease-in;
@@ -940,7 +943,7 @@ select:hover,
940943
body:not(.white) .gradient{
941944
display: block;
942945
}
943-
.settings .label, form .label, .settings label, form label {
946+
.settings .label:not([for="systemPrompt"]), form .label {
944947
min-width: 200px;
945948
}
946949
}
@@ -1126,8 +1129,8 @@ ul {
11261129
display: flex;
11271130
}
11281131

1129-
#systemPrompt::placeholder {
1130-
text-align: center;
1132+
#chatPrompt {
1133+
padding-left: 60px;
11311134
}
11321135

11331136
.settings h3 {
@@ -1423,7 +1426,7 @@ form .field.saved .fa-xmark {
14231426
}
14241427

14251428
@media print {
1426-
#systemPrompt:placeholder-shown,
1429+
#chatPrompt:placeholder-shown,
14271430
.conversations,
14281431
.conversation .user-input,
14291432
.conversation .buttons,

0 commit comments

Comments
 (0)