Skip to content
This repository was archived by the owner on Mar 14, 2024. It is now read-only.

Commit 4084836

Browse files
authored
Add files via upload
1 parent b17e41f commit 4084836

21 files changed

+1410
-0
lines changed

g4f/Provider/deprecated/Acytoo.py

Lines changed: 51 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,51 @@
1+
from __future__ import annotations
2+
3+
from aiohttp import ClientSession
4+
5+
from ...typing import AsyncResult, Messages
6+
from ..base_provider import AsyncGeneratorProvider
7+
8+
9+
class Acytoo(AsyncGeneratorProvider):
10+
url = 'https://chat.acytoo.com'
11+
working = False
12+
supports_message_history = True
13+
supports_gpt_35_turbo = True
14+
15+
@classmethod
16+
async def create_async_generator(
17+
cls,
18+
model: str,
19+
messages: Messages,
20+
proxy: str = None,
21+
**kwargs
22+
) -> AsyncResult:
23+
async with ClientSession(
24+
headers=_create_header()
25+
) as session:
26+
async with session.post(
27+
f'{cls.url}/api/completions',
28+
proxy=proxy,
29+
json=_create_payload(messages, **kwargs)
30+
) as response:
31+
response.raise_for_status()
32+
async for stream in response.content.iter_any():
33+
if stream:
34+
yield stream.decode()
35+
36+
37+
def _create_header():
38+
return {
39+
'accept': '*/*',
40+
'content-type': 'application/json',
41+
}
42+
43+
44+
def _create_payload(messages: Messages, temperature: float = 0.5, **kwargs):
45+
return {
46+
'key' : '',
47+
'model' : 'gpt-3.5-turbo',
48+
'messages' : messages,
49+
'temperature' : temperature,
50+
'password' : ''
51+
}

g4f/Provider/deprecated/AiService.py

Lines changed: 39 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,39 @@
1+
from __future__ import annotations
2+
3+
import requests
4+
5+
from ...typing import Any, CreateResult, Messages
6+
from ..base_provider import BaseProvider
7+
8+
9+
class AiService(BaseProvider):
10+
url = "https://aiservice.vercel.app/"
11+
working = False
12+
supports_gpt_35_turbo = True
13+
14+
@staticmethod
15+
def create_completion(
16+
model: str,
17+
messages: Messages,
18+
stream: bool,
19+
**kwargs: Any,
20+
) -> CreateResult:
21+
base = (
22+
"\n".join(
23+
f"{message['role']}: {message['content']}" for message in messages
24+
)
25+
+ "\nassistant: "
26+
)
27+
headers = {
28+
"accept": "*/*",
29+
"content-type": "text/plain;charset=UTF-8",
30+
"sec-fetch-dest": "empty",
31+
"sec-fetch-mode": "cors",
32+
"sec-fetch-site": "same-origin",
33+
"Referer": "https://aiservice.vercel.app/chat",
34+
}
35+
data = {"input": base}
36+
url = "https://aiservice.vercel.app/api/chat/answer"
37+
response = requests.post(url, headers=headers, json=data)
38+
response.raise_for_status()
39+
yield response.json()["data"]

g4f/Provider/deprecated/Aibn.py

Lines changed: 46 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,46 @@
1+
from __future__ import annotations
2+
3+
import time
4+
import hashlib
5+
6+
from ...typing import AsyncResult, Messages
7+
from ...requests import StreamSession
8+
from ..base_provider import AsyncGeneratorProvider
9+
10+
11+
class Aibn(AsyncGeneratorProvider):
12+
url = "https://aibn.cc"
13+
working = False
14+
supports_message_history = True
15+
supports_gpt_35_turbo = True
16+
17+
@classmethod
18+
async def create_async_generator(
19+
cls,
20+
model: str,
21+
messages: Messages,
22+
proxy: str = None,
23+
timeout: int = 120,
24+
**kwargs
25+
) -> AsyncResult:
26+
async with StreamSession(
27+
impersonate="chrome107",
28+
proxies={"https": proxy},
29+
timeout=timeout
30+
) as session:
31+
timestamp = int(time.time())
32+
data = {
33+
"messages": messages,
34+
"pass": None,
35+
"sign": generate_signature(timestamp, messages[-1]["content"]),
36+
"time": timestamp
37+
}
38+
async with session.post(f"{cls.url}/api/generate", json=data) as response:
39+
response.raise_for_status()
40+
async for chunk in response.iter_content():
41+
yield chunk.decode()
42+
43+
44+
def generate_signature(timestamp: int, message: str, secret: str = "undefined"):
45+
data = f"{timestamp}:{message}:{secret}"
46+
return hashlib.sha256(data.encode()).hexdigest()

g4f/Provider/deprecated/Ails.py

Lines changed: 90 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,90 @@
1+
from __future__ import annotations
2+
3+
import hashlib
4+
import time
5+
import uuid
6+
import json
7+
from datetime import datetime
8+
from aiohttp import ClientSession
9+
10+
from ...typing import SHA256, AsyncResult, Messages
11+
from ..base_provider import AsyncGeneratorProvider
12+
13+
14+
class Ails(AsyncGeneratorProvider):
15+
url = "https://ai.ls"
16+
working = False
17+
supports_message_history = True
18+
supports_gpt_35_turbo = True
19+
20+
@staticmethod
21+
async def create_async_generator(
22+
model: str,
23+
messages: Messages,
24+
stream: bool,
25+
proxy: str = None,
26+
**kwargs
27+
) -> AsyncResult:
28+
headers = {
29+
"authority": "api.caipacity.com",
30+
"accept": "*/*",
31+
"accept-language": "en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3",
32+
"authorization": "Bearer free",
33+
"client-id": str(uuid.uuid4()),
34+
"client-v": "0.1.278",
35+
"content-type": "application/json",
36+
"origin": "https://ai.ls",
37+
"referer": "https://ai.ls/",
38+
"sec-ch-ua": '"Not.A/Brand";v="8", "Chromium";v="114", "Google Chrome";v="114"',
39+
"sec-ch-ua-mobile": "?0",
40+
"sec-ch-ua-platform": '"Windows"',
41+
"sec-fetch-dest": "empty",
42+
"sec-fetch-mode": "cors",
43+
"sec-fetch-site": "cross-site",
44+
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/114.0.0.0 Safari/537.36",
45+
"from-url": "https://ai.ls/?chat=1"
46+
}
47+
async with ClientSession(
48+
headers=headers
49+
) as session:
50+
timestamp = _format_timestamp(int(time.time() * 1000))
51+
json_data = {
52+
"model": "gpt-3.5-turbo",
53+
"temperature": kwargs.get("temperature", 0.6),
54+
"stream": True,
55+
"messages": messages,
56+
"d": datetime.now().strftime("%Y-%m-%d"),
57+
"t": timestamp,
58+
"s": _hash({"t": timestamp, "m": messages[-1]["content"]}),
59+
}
60+
async with session.post(
61+
"https://api.caipacity.com/v1/chat/completions",
62+
proxy=proxy,
63+
json=json_data
64+
) as response:
65+
response.raise_for_status()
66+
start = "data: "
67+
async for line in response.content:
68+
line = line.decode('utf-8')
69+
if line.startswith(start) and line != "data: [DONE]":
70+
line = line[len(start):-1]
71+
line = json.loads(line)
72+
token = line["choices"][0]["delta"].get("content")
73+
74+
if token:
75+
if "ai.ls" in token or "ai.ci" in token:
76+
raise Exception(f"Response Error: {token}")
77+
yield token
78+
79+
80+
def _hash(json_data: dict[str, str]) -> SHA256:
81+
base_string: str = f'{json_data["t"]}:{json_data["m"]}:WI,2rU#_r:r~aF4aJ36[.Z(/8Rv93Rf:{len(json_data["m"])}'
82+
83+
return SHA256(hashlib.sha256(base_string.encode()).hexdigest())
84+
85+
86+
def _format_timestamp(timestamp: int) -> str:
87+
e = timestamp
88+
n = e % 10
89+
r = n + 1 if n % 2 == 0 else n
90+
return str(e - n + r)

g4f/Provider/deprecated/Aivvm.py

Lines changed: 72 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,72 @@
1+
from __future__ import annotations
2+
import requests
3+
4+
from ..base_provider import BaseProvider
5+
from ...typing import CreateResult, Messages
6+
from json import dumps
7+
8+
# to recreate this easily, send a post request to https://chat.aivvm.com/api/models
9+
models = {
10+
'gpt-3.5-turbo': {'id': 'gpt-3.5-turbo', 'name': 'GPT-3.5'},
11+
'gpt-3.5-turbo-0613': {'id': 'gpt-3.5-turbo-0613', 'name': 'GPT-3.5-0613'},
12+
'gpt-3.5-turbo-16k': {'id': 'gpt-3.5-turbo-16k', 'name': 'GPT-3.5-16K'},
13+
'gpt-3.5-turbo-16k-0613': {'id': 'gpt-3.5-turbo-16k-0613', 'name': 'GPT-3.5-16K-0613'},
14+
'gpt-4': {'id': 'gpt-4', 'name': 'GPT-4'},
15+
'gpt-4-0613': {'id': 'gpt-4-0613', 'name': 'GPT-4-0613'},
16+
'gpt-4-32k': {'id': 'gpt-4-32k', 'name': 'GPT-4-32K'},
17+
'gpt-4-32k-0613': {'id': 'gpt-4-32k-0613', 'name': 'GPT-4-32K-0613'},
18+
}
19+
20+
class Aivvm(BaseProvider):
21+
url = 'https://chat.aivvm.com'
22+
supports_stream = True
23+
working = False
24+
supports_gpt_35_turbo = True
25+
supports_gpt_4 = True
26+
27+
@classmethod
28+
def create_completion(cls,
29+
model: str,
30+
messages: Messages,
31+
stream: bool,
32+
**kwargs
33+
) -> CreateResult:
34+
if not model:
35+
model = "gpt-3.5-turbo"
36+
elif model not in models:
37+
raise ValueError(f"Model is not supported: {model}")
38+
39+
json_data = {
40+
"model" : models[model],
41+
"messages" : messages,
42+
"key" : "",
43+
"prompt" : kwargs.get("system_message", "You are ChatGPT, a large language model trained by OpenAI. Follow the user's instructions carefully. Respond using markdown."),
44+
"temperature" : kwargs.get("temperature", 0.7)
45+
}
46+
47+
data = dumps(json_data)
48+
49+
headers = {
50+
"accept" : "text/event-stream",
51+
"accept-language" : "en-US,en;q=0.9",
52+
"content-type" : "application/json",
53+
"content-length" : str(len(data)),
54+
"sec-ch-ua" : "\"Chrome\";v=\"117\", \"Not;A=Brand\";v=\"8\", \"Chromium\";v=\"117\"",
55+
"sec-ch-ua-mobile" : "?0",
56+
"sec-ch-ua-platform": "\"Windows\"",
57+
"sec-fetch-dest" : "empty",
58+
"sec-fetch-mode" : "cors",
59+
"sec-fetch-site" : "same-origin",
60+
"sec-gpc" : "1",
61+
"referrer" : "https://chat.aivvm.com/",
62+
"user-agent" : "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/117.0.0.0 Safari/537.36"
63+
}
64+
65+
response = requests.post("https://chat.aivvm.com/api/chat", headers=headers, data=data, stream=True)
66+
response.raise_for_status()
67+
68+
for chunk in response.iter_content(chunk_size=4096):
69+
try:
70+
yield chunk.decode("utf-8")
71+
except UnicodeDecodeError:
72+
yield chunk.decode("unicode-escape")

g4f/Provider/deprecated/ChatgptDuo.py

Lines changed: 47 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,47 @@
1+
from __future__ import annotations
2+
3+
from ...typing import Messages
4+
from curl_cffi.requests import AsyncSession
5+
from ..base_provider import AsyncProvider, format_prompt
6+
7+
8+
class ChatgptDuo(AsyncProvider):
9+
url = "https://chatgptduo.com"
10+
supports_gpt_35_turbo = True
11+
working = False
12+
13+
@classmethod
14+
async def create_async(
15+
cls,
16+
model: str,
17+
messages: Messages,
18+
proxy: str = None,
19+
timeout: int = 120,
20+
**kwargs
21+
) -> str:
22+
async with AsyncSession(
23+
impersonate="chrome107",
24+
proxies={"https": proxy},
25+
timeout=timeout
26+
) as session:
27+
prompt = format_prompt(messages),
28+
data = {
29+
"prompt": prompt,
30+
"search": prompt,
31+
"purpose": "ask",
32+
}
33+
response = await session.post(f"{cls.url}/", data=data)
34+
response.raise_for_status()
35+
data = response.json()
36+
37+
cls._sources = [{
38+
"title": source["title"],
39+
"url": source["link"],
40+
"snippet": source["snippet"]
41+
} for source in data["results"]]
42+
43+
return data["answer"]
44+
45+
@classmethod
46+
def get_sources(cls):
47+
return cls._sources

0 commit comments

Comments
 (0)