Skip to content

Commit

Permalink
Merge pull request #2563 from hlohaus/12Jan
Browse files Browse the repository at this point in the history
Add conversation and continue support in DDG
  • Loading branch information
hlohaus authored Jan 12, 2025
2 parents 214567f + 2b5f6a4 commit f19cb91
Show file tree
Hide file tree
Showing 16 changed files with 128 additions and 143 deletions.
15 changes: 6 additions & 9 deletions g4f/Provider/AutonomousAI.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,8 +6,8 @@

from ..typing import AsyncResult, Messages
from ..requests.raise_for_status import raise_for_status
from ..providers.response import FinishReason
from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
from .helper import format_prompt

class AutonomousAI(AsyncGeneratorProvider, ProviderModelMixin):
url = "https://www.autonomous.ai/anon/"
Expand All @@ -32,7 +32,6 @@ class AutonomousAI(AsyncGeneratorProvider, ProviderModelMixin):
"qwen-2.5-coder-32b": "qwen_coder",
"hermes-3": "hermes",
"llama-3.2-90b": "vision",
"llama-3.3-70b": "summary"
}

@classmethod
Expand All @@ -57,12 +56,8 @@ async def create_async_generator(
}

async with ClientSession(headers=headers) as session:
prompt = format_prompt(messages)

# Encode message
message = [{"role": "user", "content": prompt}]
message_json = json.dumps(message)
encoded_message = base64.b64encode(message_json.encode('utf-8')).decode('utf-8')
message_json = json.dumps(messages)
encoded_message = base64.b64encode(message_json.encode()).decode(errors="ignore")

data = {
"messages": encoded_message,
Expand All @@ -84,7 +79,9 @@ async def create_async_generator(
chunk_data = json.loads(chunk_str.replace("data: ", ""))
if "choices" in chunk_data and chunk_data["choices"]:
delta = chunk_data["choices"][0].get("delta", {})
if "content" in delta:
if "content" in delta and delta["content"]:
yield delta["content"]
if "finish_reason" in chunk_data and chunk_data["finish_reason"]:
yield FinishReason(chunk_data["finish_reason"])
except json.JSONDecodeError:
continue
58 changes: 12 additions & 46 deletions g4f/Provider/CablyAI.py
Original file line number Diff line number Diff line change
@@ -1,38 +1,26 @@
from __future__ import annotations

from aiohttp import ClientSession
import json

from ..typing import AsyncResult, Messages
from ..requests.raise_for_status import raise_for_status
from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
from .helper import format_prompt

from .needs_auth import OpenaiAPI

class CablyAI(AsyncGeneratorProvider, ProviderModelMixin):
class CablyAI(OpenaiAPI):
url = "https://cablyai.com"
api_endpoint = "https://cablyai.com/v1/chat/completions"

login_url = None
needs_auth = False
api_base = "https://cablyai.com/v1"
working = True
supports_stream = True
supports_system_message = True
supports_message_history = True

default_model = "Cably-80B"
models = [default_model]

model_aliases = {"cably-80b": default_model}

@classmethod
async def create_async_generator(
def create_async_generator(
cls,
model: str,
messages: Messages,
stream: bool = False,
proxy: str = None,
**kwargs
) -> AsyncResult:
model = cls.get_model(model)
headers = {
'Accept': '*/*',
'Accept-Language': 'en-US,en;q=0.9',
Expand All @@ -41,31 +29,9 @@ async def create_async_generator(
'Referer': 'https://cablyai.com/chat',
'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/131.0.0.0 Safari/537.36'
}

async with ClientSession(headers=headers) as session:
data = {
"model": model,
"messages": messages,
"stream": stream
}

async with session.post(cls.api_endpoint, json=data, proxy=proxy) as response:
await raise_for_status(response)
buffer = ""
async for chunk in response.content:
if chunk:
buffer += chunk.decode()
while "\n\n" in buffer:
chunk_data, buffer = buffer.split("\n\n", 1)
if chunk_data.startswith("data: "):
try:
json_data = json.loads(chunk_data[6:])
if "choices" in json_data and json_data["choices"]:
content = json_data["choices"][0]["delta"].get("content", "")
if content:
yield content
except json.JSONDecodeError:
# Skip invalid JSON
pass
elif chunk_data.strip() == "data: [DONE]":
return
return super().create_async_generator(
model=model,
messages=messages,
headers=headers,
**kwargs
)
13 changes: 7 additions & 6 deletions g4f/Provider/ChatGLM.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,6 @@
from ..typing import AsyncResult, Messages
from ..requests.raise_for_status import raise_for_status
from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
from .helper import format_prompt

class ChatGLM(AsyncGeneratorProvider, ProviderModelMixin):
url = "https://chatglm.cn"
Expand All @@ -17,7 +16,7 @@ class ChatGLM(AsyncGeneratorProvider, ProviderModelMixin):
working = True
supports_stream = True
supports_system_message = False
supports_message_history = True
supports_message_history = False

default_model = "all-tools-230b"
models = [default_model]
Expand Down Expand Up @@ -47,7 +46,6 @@ async def create_async_generator(
}

async with ClientSession(headers=headers) as session:
prompt = format_prompt(messages)
data = {
"assistant_id": "65940acff94777010aa6b796",
"conversation_id": "",
Expand All @@ -62,17 +60,19 @@ async def create_async_generator(
},
"messages": [
{
"role": "user",
"role": message["role"],
"content": [
{
"type": "text",
"text": prompt
"text": message["content"]
}
]
}
for message in messages
]
}

yield_text = 0
async with session.post(cls.api_endpoint, json=data, proxy=proxy) as response:
await raise_for_status(response)
async for chunk in response.content:
Expand All @@ -85,8 +85,9 @@ async def create_async_generator(
if parts:
content = parts[0].get('content', [])
if content:
text = content[0].get('text', '')
text = content[0].get('text', '')[yield_text:]
if text:
yield text
yield_text += len(text)
except json.JSONDecodeError:
pass
14 changes: 3 additions & 11 deletions g4f/Provider/ChatGptEs.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,6 @@

import os
import re
import json

from aiohttp import ClientSession

Expand All @@ -17,20 +16,14 @@ class ChatGptEs(AsyncGeneratorProvider, ProviderModelMixin):

working = True
supports_stream = True
supports_system_message = True
supports_message_history = True
supports_system_message = False
supports_message_history = False

default_model = 'gpt-4o'
models = ['gpt-4', default_model, 'gpt-4o-mini']

SYSTEM_PROMPT = "Your default language is English. Always respond in English unless the user's message is in a different language. If the user's message is not in English, respond in the language of the user's message. Maintain this language behavior throughout the conversation unless explicitly instructed otherwise. User input:"

@classmethod
def get_model(cls, model: str) -> str:
if model in cls.models:
return model
return cls.model_aliases[model]

@classmethod
async def create_async_generator(
cls,
Expand Down Expand Up @@ -68,10 +61,9 @@ async def create_async_generator(
'wpaicg_chat_client_id': os.urandom(5).hex(),
'wpaicg_chat_history': None
}
print(payload['message'])

async with session.post(cls.api_endpoint, headers=headers, data=payload) as response:
response.raise_for_status()
await raise_for_status(response)
result = await response.json()
if "Du musst das Kästchen anklicken!" in result['data']:
raise ValueError(result['data'])
Expand Down
11 changes: 2 additions & 9 deletions g4f/Provider/ChatGptt.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,6 @@

import os
import re
import json

from aiohttp import ClientSession

Expand All @@ -23,12 +22,6 @@ class ChatGptt(AsyncGeneratorProvider, ProviderModelMixin):
default_model = 'gpt-4o'
models = ['gpt-4', default_model, 'gpt-4o-mini']

@classmethod
def get_model(cls, model: str) -> str:
if model in cls.models:
return model
return cls.model_aliases[model]

@classmethod
async def create_async_generator(
cls,
Expand Down Expand Up @@ -64,7 +57,7 @@ async def create_async_generator(
'wpaicg_chat_history': None
}

async with session.post(cls.api_endpoint, headers=headers, data=payload) as response:
async with session.post(cls.api_endpoint, headers=headers, data=payload, proxy=proxy) as response:
await raise_for_status(response)
result = await response.json()
yield result['data']
yield result['data']
94 changes: 57 additions & 37 deletions g4f/Provider/DDG.py
Original file line number Diff line number Diff line change
@@ -1,15 +1,23 @@
from __future__ import annotations

from aiohttp import ClientSession, ClientTimeout, ClientError
from aiohttp import ClientSession, ClientTimeout
import json
import asyncio
import random

from ..typing import AsyncResult, Messages
from ..typing import AsyncResult, Messages, Cookies
from ..requests.raise_for_status import raise_for_status
from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
from .helper import format_prompt
from ..providers.response import FinishReason, JsonConversation

class Conversation(JsonConversation):
vqd: str = None
message_history: Messages = []
cookies: dict = {}

def __init__(self, model: str):
self.model = model

class DDG(AsyncGeneratorProvider, ProviderModelMixin):
label = "DuckDuckGo AI Chat"
Expand Down Expand Up @@ -74,42 +82,54 @@ async def create_async_generator(
messages: Messages,
proxy: str = None,
timeout: int = 30,
cookies: Cookies = None,
conversation: Conversation = None,
return_conversation: bool = False,
**kwargs
) -> AsyncResult:
model = cls.get_model(model)

async with ClientSession(timeout=ClientTimeout(total=timeout)) as session:
try:
# Fetch VQD token with retries
vqd = await cls.fetch_vqd(session)

headers = {
"accept": "text/event-stream",
"content-type": "application/json",
"x-vqd-4": vqd,
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/121.0.0.0 Safari/537.36"
}

data = {
"model": model,
"messages": [{"role": "user", "content": format_prompt(messages)}],
}

async with session.post(cls.api_endpoint, json=data, headers=headers, proxy=proxy) as response:
await raise_for_status(response)
async for line in response.content:
line = line.decode("utf-8").strip()
if line.startswith("data:"):
try:
message = json.loads(line[5:].strip())
if "message" in message:
if cookies is None and conversation is not None:
cookies = conversation.cookies
async with ClientSession(timeout=ClientTimeout(total=timeout), cookies=cookies) as session:
# Fetch VQD token
if conversation is None:
conversation = Conversation(model)
conversation.vqd = await cls.fetch_vqd(session)
conversation.message_history = [{"role": "user", "content": format_prompt(messages)}]
else:
conversation.message_history.append(messages[-1])
headers = {
"accept": "text/event-stream",
"content-type": "application/json",
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/121.0.0.0 Safari/537.36",
"x-vqd-4": conversation.vqd,
}
data = {
"model": model,
"messages": conversation.message_history,
}
async with session.post(cls.api_endpoint, json=data, headers=headers, proxy=proxy) as response:
await raise_for_status(response)
reason = None
full_message = ""
async for line in response.content:
line = line.decode("utf-8").strip()
if line.startswith("data:"):
try:
message = json.loads(line[5:].strip())
if "message" in message:
if message["message"]:
yield message["message"]
except json.JSONDecodeError:
continue

except ClientError as e:
raise Exception(f"HTTP ClientError occurred: {e}")
except asyncio.TimeoutError:
raise Exception("Request timed out.")
except Exception as e:
raise Exception(f"An error occurred: {str(e)}")
full_message += message["message"]
reason = "length"
else:
reason = "stop"
except json.JSONDecodeError:
continue
if return_conversation:
conversation.message_history.append({"role": "assistant", "content": full_message})
conversation.vqd = response.headers.get("x-vqd-4", conversation.vqd)
conversation.cookies = {n: c.value for n, c in session.cookie_jar.filter_cookies(cls.url).items()}
yield conversation
if reason is not None:
yield FinishReason(reason)
2 changes: 1 addition & 1 deletion g4f/Provider/ImageLabs.py
Original file line number Diff line number Diff line change
Expand Up @@ -48,7 +48,7 @@ async def create_async_generator(
}

async with ClientSession(headers=headers) as session:
prompt = messages[-1]["content"]
prompt = messages[-1]["content"] if prompt is None else prompt

# Generate image
payload = {
Expand Down
2 changes: 2 additions & 0 deletions g4f/Provider/Pizzagpt.py
Original file line number Diff line number Diff line change
Expand Up @@ -46,4 +46,6 @@ async def create_async_generator(
response_json = await response.json()
content = response_json.get("answer", response_json).get("content")
if content:
if "misuse detected. please get in touch" in content:
raise ValueError(content)
yield content
Loading

0 comments on commit f19cb91

Please sign in to comment.