Skip to content

Commit

Permalink
Merge pull request #2561 from hlohaus/10Jan
Browse files Browse the repository at this point in the history
Add CohereForAI provider,
  • Loading branch information
hlohaus authored Jan 12, 2025
2 parents c159eeb + 2064bb7 commit f1bede1
Show file tree
Hide file tree
Showing 11 changed files with 212 additions and 76 deletions.
2 changes: 1 addition & 1 deletion docker/Dockerfile
Original file line number Diff line number Diff line change
Expand Up @@ -84,4 +84,4 @@ RUN pip install --break-system-packages --upgrade pip \
ADD --chown=$G4F_USER:$G4F_USER g4f $G4F_DIR/g4f

# Expose ports
EXPOSE 8080 1337
EXPOSE 8080 1337 7900
24 changes: 12 additions & 12 deletions g4f/Provider/You.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,7 @@
from ..image import ImageResponse, ImagePreview, EXTENSIONS_MAP, to_bytes, is_accepted_format
from ..requests import StreamSession, FormData, raise_for_status, get_nodriver
from ..cookies import get_cookies
from ..errors import MissingRequirementsError
from ..errors import MissingRequirementsError, ResponseError
from .. import debug

class You(AsyncGeneratorProvider, ProviderModelMixin):
Expand All @@ -23,18 +23,19 @@ class You(AsyncGeneratorProvider, ProviderModelMixin):
models = [
default_model,
"gpt-4o",
"gpt-4o-mini",
"gpt-4-turbo",
"gpt-4",
"grok-2",
"claude-3.5-sonnet",
"claude-3.5-haiku",
"claude-3-opus",
"claude-3-sonnet",
"claude-3-haiku",
"claude-2",
"llama-3.3-70b",
"llama-3.1-70b",
"llama-3",
"gemini-1-5-flash",
"gemini-1-5-pro",
"gemini-1-0-pro",
"databricks-dbrx-instruct",
"command-r",
"command-r-plus",
Expand Down Expand Up @@ -105,19 +106,14 @@ async def create_async_generator(
"conversationTurnId": str(uuid.uuid4()),
"chatId": str(uuid.uuid4()),
}
params = {
"userFiles": upload,
"selectedChatMode": chat_mode,
}
if chat_mode == "custom":
if debug.logging:
print(f"You model: {model}")
params["selectedAiModel"] = model.replace("-", "_")
data["selectedAiModel"] = model.replace("-", "_")

async with (session.post if chat_mode == "default" else session.get)(
async with session.get(
f"{cls.url}/api/streamingSearch",
data=data if chat_mode == "default" else None,
params=params if chat_mode == "default" else data,
params=data,
headers=headers,
cookies=cookies
) as response:
Expand All @@ -126,9 +122,13 @@ async def create_async_generator(
if line.startswith(b'event: '):
event = line[7:].decode()
elif line.startswith(b'data: '):
if event == "error":
raise ResponseError(line[6:])
if event in ["youChatUpdate", "youChatToken"]:
data = json.loads(line[6:])
if event == "youChatToken" and event in data and data[event]:
if data[event].startswith("#### You\'ve hit your free quota for the Model Agent. For more usage of the Model Agent, learn more at:"):
continue
yield data[event]
elif event == "youChatUpdate" and "t" in data and data["t"]:
if chat_mode == "create":
Expand Down
95 changes: 95 additions & 0 deletions g4f/Provider/hf_space/CohereForAI.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,95 @@
from __future__ import annotations

import json
import uuid
from aiohttp import ClientSession, FormData

from ...typing import AsyncResult, Messages
from ...requests import raise_for_status
from ..base_provider import AsyncGeneratorProvider, ProviderModelMixin
from ..helper import format_prompt
from ...providers.response import JsonConversation, TitleGeneration

class CohereForAI(AsyncGeneratorProvider, ProviderModelMixin):
url = "https://cohereforai-c4ai-command.hf.space"
conversation_url = f"{url}/conversation"

working = True

default_model = "command-r-plus-08-2024"
models = [
default_model,
"command-r-08-2024",
"command-r-plus",
"command-r",
"command-r7b-12-2024",
]

@classmethod
async def create_async_generator(
cls, model: str, messages: Messages,
api_key: str = None,
proxy: str = None,
conversation: JsonConversation = None,
return_conversation: bool = False,
**kwargs
) -> AsyncResult:
model = cls.get_model(model)
headers = {
"Origin": cls.url,
"User-Agent": "Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:133.0) Gecko/20100101 Firefox/133.0",
"Accept": "*/*",
"Accept-Language": "en-US,en;q=0.5",
"Referer": "https://cohereforai-c4ai-command.hf.space/",
"Sec-Fetch-Dest": "empty",
"Sec-Fetch-Mode": "cors",
"Sec-Fetch-Site": "same-origin",
"Priority": "u=4",
}
if api_key is not None:
headers["Authorization"] = f"Bearer {api_key}"
async with ClientSession(
headers=headers,
cookies=None if conversation is None else conversation.cookies
) as session:
system_prompt = "\n".join([message["content"] for message in messages if message["role"] == "system"])
messages = [message for message in messages if message["role"] != "system"]
inputs = format_prompt(messages) if conversation is None else messages[-1]["content"]
if conversation is None or conversation.model != model or conversation.preprompt != system_prompt:
data = {"model": model, "preprompt": system_prompt}
async with session.post(cls.conversation_url, json=data, proxy=proxy) as response:
await raise_for_status(response)
conversation = JsonConversation(
**await response.json(),
**data,
cookies={n: c.value for n, c in response.cookies.items()}
)
if return_conversation:
yield conversation
async with session.get(f"{cls.conversation_url}/{conversation.conversationId}/__data.json?x-sveltekit-invalidated=11", proxy=proxy) as response:
await raise_for_status(response)
node = json.loads((await response.text()).splitlines()[0])["nodes"][1]
if node["type"] == "error":
raise RuntimeError(node["error"])
data = node["data"]
message_id = data[data[data[data[0]["messages"]][-1]]["id"]]
data = FormData()
inputs = messages[-1]["content"]
data.add_field(
"data",
json.dumps({"inputs": inputs, "id": message_id, "is_retry": False, "is_continue": False, "web_search": False, "tools": []}),
content_type="application/json"
)
async with session.post(f"{cls.conversation_url}/{conversation.conversationId}", data=data, proxy=proxy) as response:
await raise_for_status(response)
async for chunk in response.content:
try:
data = json.loads(chunk)
except (json.JSONDecodeError) as e:
raise RuntimeError(f"Failed to read response: {chunk.decode(errors='replace')}", e)
if data["type"] == "stream":
yield data["token"].replace("\u0000", "")
elif data["type"] == "title":
yield TitleGeneration(data["title"])
elif data["type"] == "finalAnswer":
break
13 changes: 8 additions & 5 deletions g4f/Provider/hf_space/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,7 @@
from .BlackForestLabsFlux1Schnell import BlackForestLabsFlux1Schnell
from .VoodoohopFlux1Schnell import VoodoohopFlux1Schnell
from .StableDiffusion35Large import StableDiffusion35Large
from .CohereForAI import CohereForAI
from .Qwen_QVQ_72B import Qwen_QVQ_72B

class HuggingSpace(AsyncGeneratorProvider, ProviderModelMixin):
Expand All @@ -16,7 +17,7 @@ class HuggingSpace(AsyncGeneratorProvider, ProviderModelMixin):
working = True
default_model = BlackForestLabsFlux1Dev.default_model
default_vision_model = Qwen_QVQ_72B.default_model
providers = [BlackForestLabsFlux1Dev, BlackForestLabsFlux1Schnell, VoodoohopFlux1Schnell, StableDiffusion35Large, Qwen_QVQ_72B]
providers = [BlackForestLabsFlux1Dev, BlackForestLabsFlux1Schnell, VoodoohopFlux1Schnell, StableDiffusion35Large, CohereForAI, Qwen_QVQ_72B]

@classmethod
def get_parameters(cls, **kwargs) -> dict:
Expand All @@ -28,11 +29,13 @@ def get_parameters(cls, **kwargs) -> dict:
@classmethod
def get_models(cls, **kwargs) -> list[str]:
if not cls.models:
models = []
for provider in cls.providers:
cls.models.extend(provider.get_models(**kwargs))
cls.models.extend(provider.model_aliases.keys())
cls.models = list(set(cls.models))
cls.models.sort()
models.extend(provider.get_models(**kwargs))
models.extend(provider.model_aliases.keys())
models = list(set(models))
models.sort()
cls.models = models
return cls.models

@classmethod
Expand Down
19 changes: 10 additions & 9 deletions g4f/Provider/needs_auth/HuggingFace.py
Original file line number Diff line number Diff line change
Expand Up @@ -28,15 +28,16 @@ class HuggingFace(AsyncGeneratorProvider, ProviderModelMixin):
def get_models(cls) -> list[str]:
if not cls.models:
url = "https://huggingface.co/api/models?inference=warm&pipeline_tag=text-generation"
cls.models = [model["id"] for model in requests.get(url).json()]
cls.models.append("meta-llama/Llama-3.2-11B-Vision-Instruct")
cls.models.append("nvidia/Llama-3.1-Nemotron-70B-Instruct-HF")
cls.models.sort()
if not cls.image_models:
url = "https://huggingface.co/api/models?pipeline_tag=text-to-image"
cls.image_models = [model["id"] for model in requests.get(url).json() if model["trendingScore"] >= 20]
cls.image_models.sort()
cls.models.extend(cls.image_models)
models = [model["id"] for model in requests.get(url).json()]
models.append("meta-llama/Llama-3.2-11B-Vision-Instruct")
models.append("nvidia/Llama-3.1-Nemotron-70B-Instruct-HF")
models.sort()
if not cls.image_models:
url = "https://huggingface.co/api/models?pipeline_tag=text-to-image"
cls.image_models = [model["id"] for model in requests.get(url).json() if model["trendingScore"] >= 20]
cls.image_models.sort()
models.extend(cls.image_models)
cls.models = models
return cls.models

@classmethod
Expand Down
1 change: 0 additions & 1 deletion g4f/Provider/needs_auth/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,6 @@
from .Raycast import Raycast
from .Reka import Reka
from .Replicate import Replicate
from .Theb import Theb
from .ThebApi import ThebApi
from .WhiteRabbitNeo import WhiteRabbitNeo
from .xAI import xAI
File renamed without changes.
1 change: 1 addition & 0 deletions g4f/Provider/not_working/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,4 +14,5 @@
from .MagickPen import MagickPen
from .MyShell import MyShell
from .RobocodersAPI import RobocodersAPI
from .Theb import Theb
from .Upstage import Upstage
9 changes: 8 additions & 1 deletion g4f/gui/client/index.html
Original file line number Diff line number Diff line change
Expand Up @@ -142,13 +142,20 @@ <h3>Settings</h3>
<input type="checkbox" id="refine"/>
<label for="refine" class="toogle" title=""></label>
</div>
<div class="field box">
<label for="systemPrompt" class="label" title="">Default for System prompt</label>
<textarea id="systemPrompt" placeholder="You are a helpful assistant."></textarea>
</div>
<div class="field box">
<label for="message-input-height" class="label" title="">Input max. height</label>
<input type="number" id="message-input-height" value="200"/>
</div>
<div class="field box">
<label for="recognition-language" class="label" title="">Speech recognition language</label>
<input type="text" id="recognition-language" value="" placeholder="navigator.language"/>
<script>
document.getElementById('recognition-language').placeholder = navigator.language;
</script>
</div>
<div class="field box hidden">
<label for="BingCreateImages-api_key" class="label" title="">Microsoft Designer in Bing:</label>
Expand Down Expand Up @@ -179,7 +186,7 @@ <h3>Settings</h3>
</div>
</div>
<div class="conversation">
<textarea id="systemPrompt" class="box" placeholder="System prompt"></textarea>
<textarea id="chatPrompt" class="box" placeholder="System prompt"></textarea>
<div id="messages" class="box"></div>
<button class="slide-systemPrompt">
<i class="fa-solid fa-angles-up"></i>
Expand Down
23 changes: 13 additions & 10 deletions g4f/gui/client/static/css/style.css
Original file line number Diff line number Diff line change
Expand Up @@ -859,19 +859,22 @@ select:hover,
display: none;
}

#systemPrompt, .settings textarea, form textarea {
#chatPrompt{
min-height: 59px;
height: 59px;
resize: vertical;
padding: var(--inner-gap) var(--section-gap);
}

#systemPrompt, #chatPrompt, .settings textarea, form textarea {
font-size: 15px;
width: 100%;
color: var(--colour-3);
height: 59px;
outline: none;
padding: var(--inner-gap) var(--section-gap);
resize: vertical;
min-height: 59px;
transition: max-height 0.15s ease-out;
}

#systemPrompt:focus {
#systemPrompt:focus, #chatPrompt:focus {
min-height: 200px;
max-height: 1000px;
transition: max-height 0.25s ease-in;
Expand Down Expand Up @@ -940,7 +943,7 @@ select:hover,
body:not(.white) .gradient{
display: block;
}
.settings .label, form .label, .settings label, form label {
.settings .label:not([for="systemPrompt"]), form .label {
min-width: 200px;
}
}
Expand Down Expand Up @@ -1126,8 +1129,8 @@ ul {
display: flex;
}

#systemPrompt::placeholder {
text-align: center;
#chatPrompt {
padding-left: 60px;
}

.settings h3 {
Expand Down Expand Up @@ -1423,7 +1426,7 @@ form .field.saved .fa-xmark {
}

@media print {
#systemPrompt:placeholder-shown,
#chatPrompt:placeholder-shown,
.conversations,
.conversation .user-input,
.conversation .buttons,
Expand Down
Loading

0 comments on commit f1bede1

Please sign in to comment.