Skip to content

Commit

Permalink
Merge branch 'main' into patch-2
Browse files Browse the repository at this point in the history
  • Loading branch information
xtekky authored Apr 30, 2023
2 parents 55990be + f7dab64 commit ef9127d
Show file tree
Hide file tree
Showing 10 changed files with 168 additions and 19 deletions.
6 changes: 6 additions & 0 deletions gpt4free/forefront/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -98,12 +98,15 @@ def create(
action_type='new',
default_persona='607e41fe-95be-497e-8e97-010a59b2e2c0', # default
model='gpt-4',
proxy=None
) -> Generator[ForeFrontResponse, None, None]:
if not token:
raise Exception('Token is required!')
if not chat_id:
chat_id = str(uuid4())

proxies = { 'http': 'http://' + proxy, 'https': 'http://' + proxy } if proxy else None

headers = {
'authority': 'chat-server.tenant-forefront-default.knative.chi.coreweave.com',
'accept': '*/*',
Expand Down Expand Up @@ -135,6 +138,7 @@ def create(
for chunk in post(
'https://chat-server.tenant-forefront-default.knative.chi.coreweave.com/chat',
headers=headers,
proxies=proxies,
json=json_data,
stream=True,
).iter_lines():
Expand Down Expand Up @@ -169,6 +173,7 @@ def create(
action_type='new',
default_persona='607e41fe-95be-497e-8e97-010a59b2e2c0', # default
model='gpt-4',
proxy=None
) -> ForeFrontResponse:
text = ''
final_response = None
Expand All @@ -179,6 +184,7 @@ def create(
action_type=action_type,
default_persona=default_persona,
model=model,
proxy=proxy
):
if response:
final_response = response
Expand Down
8 changes: 7 additions & 1 deletion gpt4free/quora/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -187,7 +187,7 @@ def create(
enable_bot_creation: bool = False,
):
client = TLS(client_identifier='chrome110')
client.proxies = {'http': f'http://{proxy}', 'https': f'http://{proxy}'} if proxy else None
client.proxies = {'http': f'http://{proxy}', 'https': f'http://{proxy}'} if proxy else {}

mail_client = Emailnator()
mail_address = mail_client.get_mail()
Expand Down Expand Up @@ -293,10 +293,13 @@ def create(
custom_model: bool = None,
prompt: str = 'hello world',
token: str = '',
proxy: Optional[str] = None
) -> Generator[PoeResponse, None, None]:
_model = MODELS[model] if not custom_model else custom_model

proxies = { 'http': 'http://' + proxy, 'https': 'http://' + proxy } if proxy else False
client = PoeClient(token)
client.proxy = proxies

for chunk in client.send_message(_model, prompt):
yield PoeResponse(
Expand Down Expand Up @@ -330,10 +333,13 @@ def create(
custom_model: str = None,
prompt: str = 'hello world',
token: str = '',
proxy: Optional[str] = None
) -> PoeResponse:
_model = MODELS[model] if not custom_model else custom_model

proxies = {'http': 'http://' + proxy, 'https': 'http://' + proxy} if proxy else False
client = PoeClient(token)
client.proxy = proxies

chunk = None
for response in client.send_message(_model, prompt):
Expand Down
37 changes: 37 additions & 0 deletions gpt4free/quora/backup-mail.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,37 @@
from requests import Session
from time import sleep
from json import loads
from re import findall
class Mail:
def __init__(self) -> None:
self.client = Session()
self.client.post("https://etempmail.com/")
self.cookies = {'acceptcookie': 'true'}
self.cookies["ci_session"] = self.client.cookies.get_dict()["ci_session"]
self.email = None
def get_mail(self):
respone=self.client.post("https://etempmail.com/getEmailAddress")
#cookies
self.cookies["lisansimo"] = eval(respone.text)["recover_key"]
self.email = eval(respone.text)["address"]
return self.email
def get_message(self):
print("Waiting for message...")
while True:
sleep(5)
respone=self.client.post("https://etempmail.com/getInbox")
mail_token=loads(respone.text)
print(self.client.cookies.get_dict())
if len(mail_token) == 1:
break

params = {'id': '1',}
self.mail_context = self.client.post("https://etempmail.com/getInbox",params=params)
self.mail_context = eval(self.mail_context.text)[0]["body"]
return self.mail_context
#,cookies=self.cookies
def get_verification_code(self):
message = self.mail_context
code = findall(r';">(\d{6,7})</div>', message)[0]
print(f"Verification code: {code}")
return code
11 changes: 7 additions & 4 deletions gpt4free/theb/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@
from queue import Queue, Empty
from re import findall
from threading import Thread
from typing import Generator
from typing import Generator, Optional

from curl_cffi import requests
from fake_useragent import UserAgent
Expand All @@ -19,26 +19,29 @@ class Completion:
stream_completed = False

@staticmethod
def request(prompt: str):
def request(prompt: str, proxy: Optional[str]=None):
headers = {
'authority': 'chatbot.theb.ai',
'content-type': 'application/json',
'origin': 'https://chatbot.theb.ai',
'user-agent': UserAgent().random,
}

proxies = {'http': 'http://' + proxy, 'https': 'http://' + proxy} if proxy else None

requests.post(
'https://chatbot.theb.ai/api/chat-process',
headers=headers,
proxies=proxies,
content_callback=Completion.handle_stream_response,
json={'prompt': prompt, 'options': {}},
)

Completion.stream_completed = True

@staticmethod
def create(prompt: str) -> Generator[str, None, None]:
Thread(target=Completion.request, args=[prompt]).start()
def create(prompt: str, proxy: Optional[str]=None) -> Generator[str, None, None]:
Thread(target=Completion.request, args=[prompt, proxy]).start()

while not Completion.stream_completed or not Completion.message_queue.empty():
try:
Expand Down
4 changes: 4 additions & 0 deletions gpt4free/you/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -30,12 +30,16 @@ def create(
include_links: bool = False,
detailed: bool = False,
debug: bool = False,
proxy: Optional[str] = None
) -> PoeResponse:
if chat is None:
chat = []

proxies = { 'http': 'http://' + proxy, 'https': 'http://' + proxy } if proxy else {}

client = Session(client_identifier='chrome_108')
client.headers = Completion.__get_headers()
client.proxies = proxies

response = client.get(
f'https://you.com/api/streamingSearch',
Expand Down
29 changes: 15 additions & 14 deletions gui/query_methods.py
Original file line number Diff line number Diff line change
@@ -1,37 +1,38 @@
import os
import sys
from typing import Optional

sys.path.append(os.path.join(os.path.dirname(__file__), os.path.pardir))

from gpt4free import quora, forefront, theb, you
import random


def query_forefront(question: str) -> str:
def query_forefront(question: str, proxy: Optional[str] = None) -> str:
# create an account
token = forefront.Account.create(logging=False)
token = forefront.Account.create(logging=False, proxy=proxy)

response = ""
# get a response
try:
return forefront.Completion.create(token=token, prompt='hello world', model='gpt-4').text
return forefront.Completion.create(token=token, prompt='hello world', model='gpt-4', proxy=proxy).text
except Exception as e:
# Return error message if an exception occurs
return (
f'An error occurred: {e}. Please make sure you are using a valid cloudflare clearance token and user agent.'
)


def query_quora(question: str) -> str:
token = quora.Account.create(logging=False, enable_bot_creation=True)
return quora.Completion.create(model='gpt-4', prompt=question, token=token).text
def query_quora(question: str, proxy: Optional[str] = None) -> str:
token = quora.Account.create(logging=False, enable_bot_creation=True, proxy=proxy)
return quora.Completion.create(model='gpt-4', prompt=question, token=token, proxy=proxy).text


def query_theb(question: str) -> str:
def query_theb(question: str, proxy: Optional[str] = None) -> str:
# Set cloudflare clearance cookie and get answer from GPT-4 model
response = ""
try:
return ''.join(theb.Completion.create(prompt=question))
return ''.join(theb.Completion.create(prompt=question, proxy=proxy))

except Exception as e:
# Return error message if an exception occurs
Expand All @@ -40,11 +41,11 @@ def query_theb(question: str) -> str:
)


def query_you(question: str) -> str:
def query_you(question: str, proxy: Optional[str] = None) -> str:
# Set cloudflare clearance cookie and get answer from GPT-4 model
try:
result = you.Completion.create(prompt=question)
return result["response"]
result = you.Completion.create(prompt=question, proxy=proxy)
return result.text

except Exception as e:
# Return error message if an exception occurs
Expand All @@ -66,11 +67,11 @@ def query_you(question: str) -> str:
}


def query(user_input: str, selected_method: str = "Random") -> str:
def query(user_input: str, selected_method: str = "Random", proxy: Optional[str] = None) -> str:
# If a specific query method is selected (not "Random") and the method is in the dictionary, try to call it
if selected_method != "Random" and selected_method in avail_query_methods:
try:
return avail_query_methods[selected_method](user_input)
return avail_query_methods[selected_method](user_input, proxy=proxy)
except Exception as e:
print(f"Error with {selected_method}: {e}")
return "😵 Sorry, some error occurred please try again."
Expand All @@ -89,7 +90,7 @@ def query(user_input: str, selected_method: str = "Random") -> str:
chosen_query_name = [k for k, v in avail_query_methods.items() if v == chosen_query][0]
try:
# Try to call the chosen method with the user input
result = chosen_query(user_input)
result = chosen_query(user_input, proxy=proxy)
success = True
except Exception as e:
print(f"Error with {chosen_query_name}: {e}")
Expand Down
5 changes: 5 additions & 0 deletions gui/streamlit_chat_app.py
Original file line number Diff line number Diff line change
Expand Up @@ -75,8 +75,10 @@ def exit_handler():
)
submit_button = st.button("Submit")


if (user_input and user_input != st.session_state['input_text']) or submit_button:
output = query(user_input, st.session_state['query_method'])

escaped_output = output.encode('utf-8').decode('unicode-escape')

st.session_state.current_conversation['user_inputs'].append(user_input)
Expand All @@ -95,6 +97,9 @@ def exit_handler():

st.session_state['query_method'] = st.sidebar.selectbox("Select API:", options=avail_query_methods, index=0)

# Proxy
st.session_state['proxy'] = st.sidebar.text_input("Proxy: ")

# Sidebar
st.sidebar.header("Conversation History")

Expand Down
13 changes: 13 additions & 0 deletions testing/usesless_test.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,13 @@
import usesless

question1 = "Who won the world series in 2020?"
req = usesless.Completion.create(prompt=question1)
answer = req["text"]
message_id = req["parentMessageId"]

question2 = "Where was it played?"
req2 = usesless.Completion.create(prompt=question2, parentMessageId=message_id)
answer2 = req2["text"]

print(answer)
print(answer2)
23 changes: 23 additions & 0 deletions unfinished/usesless/README.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,23 @@
ai.usesless.com

to do:

- use random user agent in header
- make the code better I guess (?)

### Example: `usesless` <a name="example-usesless"></a>

```python
import usesless

message_id = ""
while True:
prompt = input("Question: ")
if prompt == "!stop":
break

req = usesless.Completion.create(prompt=prompt, parentMessageId=message_id)

print(f"Answer: {req['text']}")
message_id = req["id"]
```
51 changes: 51 additions & 0 deletions unfinished/usesless/__init__.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,51 @@
import requests
import json


class Completion:
headers = {
"authority": "ai.usesless.com",
"accept": "application/json, text/plain, */*",
"accept-language": "en-US,en;q=0.5",
"cache-control": "no-cache",
"sec-fetch-dest": "empty",
"sec-fetch-mode": "cors",
"sec-fetch-site": "same-origin",
"user-agent": "Mozilla/5.0 (X11; Linux x86_64; rv:109.0) Gecko/20100101 Firefox/112.0",
}

@staticmethod
def create(
systemMessage: str = "You are a helpful assistant",
prompt: str = "",
parentMessageId: str = "",
presence_penalty: float = 1,
temperature: float = 1,
model: str = "gpt-3.5-turbo",
):
json_data = {
"openaiKey": "",
"prompt": prompt,
"options": {
"parentMessageId": parentMessageId,
"systemMessage": systemMessage,
"completionParams": {
"presence_penalty": presence_penalty,
"temperature": temperature,
"model": model,
},
},
}

url = "https://ai.usesless.com/api/chat-process"
request = requests.post(url, headers=Completion.headers, json=json_data)
content = request.content
response = Completion.__response_to_json(content)
return response

@classmethod
def __response_to_json(cls, text) -> dict:
text = str(text.decode("utf-8"))
split_text = text.rsplit("\n", 1)[1]
to_json = json.loads(split_text)
return to_json

0 comments on commit ef9127d

Please sign in to comment.