Skip to content
This repository was archived by the owner on Mar 14, 2024. It is now read-only.

Commit b17e41f

Browse files
authored
Add files via upload
1 parent 90657ad commit b17e41f

File tree

9 files changed

+936
-0
lines changed

9 files changed

+936
-0
lines changed

g4f/Provider/needs_auth/Bard.py

Lines changed: 78 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,78 @@
1+
from __future__ import annotations
2+
3+
import time
4+
import os
5+
from selenium.webdriver.common.by import By
6+
from selenium.webdriver.support.ui import WebDriverWait
7+
from selenium.webdriver.support import expected_conditions as EC
8+
from selenium.webdriver.common.keys import Keys
9+
10+
from ...typing import CreateResult, Messages
11+
from ..base_provider import BaseProvider
12+
from ..helper import format_prompt
13+
from ...webdriver import WebDriver, WebDriverSession
14+
15+
class Bard(BaseProvider):
16+
url = "https://bard.google.com"
17+
working = True
18+
needs_auth = True
19+
20+
@classmethod
21+
def create_completion(
22+
cls,
23+
model: str,
24+
messages: Messages,
25+
stream: bool,
26+
proxy: str = None,
27+
webdriver: WebDriver = None,
28+
user_data_dir: str = None,
29+
headless: bool = True,
30+
**kwargs
31+
) -> CreateResult:
32+
prompt = format_prompt(messages)
33+
session = WebDriverSession(webdriver, user_data_dir, headless, proxy=proxy)
34+
with session as driver:
35+
try:
36+
driver.get(f"{cls.url}/chat")
37+
wait = WebDriverWait(driver, 10 if headless else 240)
38+
wait.until(EC.visibility_of_element_located((By.CSS_SELECTOR, "div.ql-editor.textarea")))
39+
except:
40+
# Reopen browser for login
41+
if not webdriver:
42+
driver = session.reopen()
43+
driver.get(f"{cls.url}/chat")
44+
login_url = os.environ.get("G4F_LOGIN_URL")
45+
if login_url:
46+
yield f"Please login: [Google Bard]({login_url})\n\n"
47+
wait = WebDriverWait(driver, 240)
48+
wait.until(EC.visibility_of_element_located((By.CSS_SELECTOR, "div.ql-editor.textarea")))
49+
else:
50+
raise RuntimeError("Prompt textarea not found. You may not be logged in.")
51+
52+
# Add hook in XMLHttpRequest
53+
script = """
54+
const _http_request_open = XMLHttpRequest.prototype.open;
55+
window._message = "";
56+
XMLHttpRequest.prototype.open = function(method, url) {
57+
if (url.includes("/assistant.lamda.BardFrontendService/StreamGenerate")) {
58+
this.addEventListener("load", (event) => {
59+
window._message = JSON.parse(JSON.parse(this.responseText.split("\\n")[3])[0][2])[4][0][1][0];
60+
});
61+
}
62+
return _http_request_open.call(this, method, url);
63+
}
64+
"""
65+
driver.execute_script(script)
66+
67+
# Submit prompt
68+
driver.find_element(By.CSS_SELECTOR, "div.ql-editor.textarea").send_keys(prompt)
69+
driver.find_element(By.CSS_SELECTOR, "div.ql-editor.textarea").send_keys(Keys.ENTER)
70+
71+
# Yield response
72+
while True:
73+
chunk = driver.execute_script("return window._message;")
74+
if chunk:
75+
yield chunk
76+
return
77+
else:
78+
time.sleep(0.1)
Lines changed: 60 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,60 @@
1+
from __future__ import annotations
2+
3+
import json, uuid
4+
5+
from aiohttp import ClientSession
6+
7+
from ...typing import AsyncResult, Messages
8+
from ..base_provider import AsyncGeneratorProvider
9+
from ..helper import format_prompt, get_cookies
10+
11+
12+
class HuggingChat(AsyncGeneratorProvider):
13+
url = "https://huggingface.co/chat"
14+
working = True
15+
model = "meta-llama/Llama-2-70b-chat-hf"
16+
17+
@classmethod
18+
async def create_async_generator(
19+
cls,
20+
model: str,
21+
messages: Messages,
22+
stream: bool = True,
23+
proxy: str = None,
24+
web_search: bool = False,
25+
cookies: dict = None,
26+
**kwargs
27+
) -> AsyncResult:
28+
model = model if model else cls.model
29+
if not cookies:
30+
cookies = get_cookies(".huggingface.co")
31+
32+
headers = {
33+
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/111.0.0.0 Safari/537.36',
34+
}
35+
async with ClientSession(
36+
cookies=cookies,
37+
headers=headers
38+
) as session:
39+
async with session.post(f"{cls.url}/conversation", json={"model": model}, proxy=proxy) as response:
40+
conversation_id = (await response.json())["conversationId"]
41+
42+
send = {
43+
"id": str(uuid.uuid4()),
44+
"inputs": format_prompt(messages),
45+
"is_retry": False,
46+
"response_id": str(uuid.uuid4()),
47+
"web_search": web_search
48+
}
49+
async with session.post(f"{cls.url}/conversation/{conversation_id}", json=send, proxy=proxy) as response:
50+
async for line in response.content:
51+
line = json.loads(line[:-1])
52+
if "type" not in line:
53+
raise RuntimeError(f"Response: {line}")
54+
elif line["type"] == "stream":
55+
yield line["token"]
56+
elif line["type"] == "finalAnswer":
57+
break
58+
59+
async with session.delete(f"{cls.url}/conversation/{conversation_id}", proxy=proxy) as response:
60+
response.raise_for_status()
Lines changed: 89 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,89 @@
1+
from __future__ import annotations
2+
3+
import json
4+
5+
from aiohttp import ClientSession
6+
7+
from ...typing import AsyncResult, Messages
8+
from ..base_provider import AsyncGeneratorProvider
9+
from ..helper import format_prompt, get_cookies
10+
11+
12+
class OpenAssistant(AsyncGeneratorProvider):
13+
url = "https://open-assistant.io/chat"
14+
needs_auth = True
15+
working = False
16+
model = "OA_SFT_Llama_30B_6"
17+
18+
@classmethod
19+
async def create_async_generator(
20+
cls,
21+
model: str,
22+
messages: Messages,
23+
proxy: str = None,
24+
cookies: dict = None,
25+
**kwargs
26+
) -> AsyncResult:
27+
if not cookies:
28+
cookies = get_cookies("open-assistant.io")
29+
30+
headers = {
31+
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/111.0.0.0 Safari/537.36',
32+
}
33+
async with ClientSession(
34+
cookies=cookies,
35+
headers=headers
36+
) as session:
37+
async with session.post("https://open-assistant.io/api/chat", proxy=proxy) as response:
38+
chat_id = (await response.json())["id"]
39+
40+
data = {
41+
"chat_id": chat_id,
42+
"content": f"<s>[INST]\n{format_prompt(messages)}\n[/INST]",
43+
"parent_id": None
44+
}
45+
async with session.post("https://open-assistant.io/api/chat/prompter_message", proxy=proxy, json=data) as response:
46+
parent_id = (await response.json())["id"]
47+
48+
data = {
49+
"chat_id": chat_id,
50+
"parent_id": parent_id,
51+
"model_config_name": model if model else cls.model,
52+
"sampling_parameters":{
53+
"top_k": 50,
54+
"top_p": None,
55+
"typical_p": None,
56+
"temperature": 0.35,
57+
"repetition_penalty": 1.1111111111111112,
58+
"max_new_tokens": 1024,
59+
**kwargs
60+
},
61+
"plugins":[]
62+
}
63+
async with session.post("https://open-assistant.io/api/chat/assistant_message", proxy=proxy, json=data) as response:
64+
data = await response.json()
65+
if "id" in data:
66+
message_id = data["id"]
67+
elif "message" in data:
68+
raise RuntimeError(data["message"])
69+
else:
70+
response.raise_for_status()
71+
72+
params = {
73+
'chat_id': chat_id,
74+
'message_id': message_id,
75+
}
76+
async with session.post("https://open-assistant.io/api/chat/events", proxy=proxy, params=params) as response:
77+
start = "data: "
78+
async for line in response.content:
79+
line = line.decode("utf-8")
80+
if line and line.startswith(start):
81+
line = json.loads(line[len(start):])
82+
if line["event_type"] == "token":
83+
yield line["text"]
84+
85+
params = {
86+
'chat_id': chat_id,
87+
}
88+
async with session.delete("https://open-assistant.io/api/chat", proxy=proxy, params=params) as response:
89+
response.raise_for_status()

0 commit comments

Comments
 (0)