Skip to content

Commit

Permalink
support multiple access tokens
Browse files Browse the repository at this point in the history
Signed-off-by: pengzhile <[email protected]>
  • Loading branch information
pengzhile committed Apr 13, 2023
1 parent a0d4337 commit 378677d
Show file tree
Hide file tree
Showing 11 changed files with 234 additions and 113 deletions.
5 changes: 5 additions & 0 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -110,12 +110,14 @@
* `-s``--server``http`服务方式启动,格式:`ip:port`
* `-a``--api` 使用`gpt-3.5-turbo`API请求,**你可能需要向`OpenAI`支付费用**
* `-l``--local` 使用本地环境登录,**你可能需要一个合适的代理IP以避免账号被风控!**
* `--tokens_file` 指定一个存放多`Access Token`的文件,内容为`{"key": "token"}`的形式。
* `--sentry` 启用`sentry`框架来发送错误报告供作者查错,敏感信息**不会被发送**
* `-v``--verbose` 显示调试信息,且出错时打印异常堆栈信息,供查错使用。

## Docker环境变量

* `PANDORA_ACCESS_TOKEN` 指定`Access Token`字符串。
* `PANDORA_TOKENS_FILE` 指定一个存放多`Access Token`的文件路径。
* `PANDORA_PROXY` 指定代理,格式:`protocol://user:pass@ip:port`
* `PANDORA_SERVER``http`服务方式启动,格式:`ip:port`
* `PANDORA_API` 使用`gpt-3.5-turbo`API请求,**你可能需要向`OpenAI`支付费用**
Expand Down Expand Up @@ -166,6 +168,7 @@
* 执行```pip install PyMySQL```安装驱动。
* 设置环境变量:`DATABASE_URI`为类似`mysql+pymysql://user:pass@localhost/dbname`的连接字符串。
* 环境变量指定`OPENAI_EMAIL`可以替代登录输入用户名,`OPENAI_PASSWORD`则可以替代输入密码。
* 环境变量`API_SYSTEM_PROMPT`可以替换`api`模式下的系统`prompt`

## Cloud模式

Expand All @@ -180,6 +183,7 @@
* 你需要一个`Cloudflare`账号,如果没有,可以[注册](https://dash.cloudflare.com/sign-up)一个。
* 登录后,点击`Workers`,然后点击`Create a Worker`,填入服务名称后点击`创建服务`
* 点开你刚才创建的服务,点击`快速编辑`按钮,贴入下面的代码,然后点击`保存并部署`

```javascript
export default {
async fetch(request, env) {
Expand All @@ -189,6 +193,7 @@
}
}
```

* 点击`触发器`选项卡,可以添加自定义访问域名。
* 参考`高阶设置`中的环境变量使用你的服务地址进行替换。
* 这里有一个示例代理地址:`https://chat1.gateway.do`
Expand Down
4 changes: 4 additions & 0 deletions bin/startup.sh
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,10 @@ if [ -n "${PANDORA_ACCESS_TOKEN}" ]; then
echo "${PANDORA_ACCESS_TOKEN}" >"${USER_CONFIG_DIR}/access_token.dat"
fi

if [ -n "${PANDORA_TOKENS_FILE}" ]; then
PANDORA_ARGS="${PANDORA_ARGS} --tokens_file ${PANDORA_TOKENS_FILE}"
fi

if [ -n "${PANDORA_SERVER}" ]; then
PANDORA_ARGS="${PANDORA_ARGS} -s ${PANDORA_SERVER}"
fi
Expand Down
2 changes: 2 additions & 0 deletions doc/HTTP-API.md
Original file line number Diff line number Diff line change
@@ -1,5 +1,7 @@
# Pandora HTTP API

### 特殊说明:如果有多个`Access Token`,可以使用`X-Use-Token: token_name` 头指定使用哪个。

### `/api/models`

* **HTTP方法:** `GET`
Expand Down
2 changes: 1 addition & 1 deletion setup.py
Original file line number Diff line number Diff line change
Expand Up @@ -43,7 +43,7 @@
'Tracker': 'https://github.com/pengzhile/pandora/issues',
},
classifiers=[
'Development Status :: 4 - Beta',
'Development Status :: 5 - Production/Stable',

'Environment :: Console',
'Environment :: Web Environment',
Expand Down
2 changes: 1 addition & 1 deletion src/pandora/__init__.py
Original file line number Diff line number Diff line change
@@ -1,3 +1,3 @@
# -*- coding: utf-8 -*-

__version__ = '0.9.8'
__version__ = '1.0.0'
54 changes: 43 additions & 11 deletions src/pandora/bots/legacy.py
Original file line number Diff line number Diff line change
Expand Up @@ -35,9 +35,12 @@ def __init__(self, title=None, conversation_id=None, model_slug=None, user_promp
class ChatBot:
def __init__(self, chatgpt):
self.chatgpt = chatgpt
self.token_key = None
self.state = None

def run(self):
self.token_key = self.__choice_token_key()

conversation_base = self.__choice_conversation()
if conversation_base:
self.__load_conversation(conversation_base['id'])
Expand Down Expand Up @@ -156,7 +159,7 @@ def __edit_choice(self):

def __print_access_token(self):
Console.warn_b('\n#### Your access token (keep it private)')
Console.warn(self.chatgpt.access_token)
Console.warn(self.chatgpt.get_access_token(token_key=self.token_key))
print()

def __clear_screen(self):
Expand Down Expand Up @@ -191,7 +194,7 @@ def __set_conversation_title(self, state: State):
Console.error('#### Title too long.')
return

if self.chatgpt.set_conversation_title(state.conversation_id, new_title):
if self.chatgpt.set_conversation_title(state.conversation_id, new_title, token=self.token_key):
state.title = new_title
Console.debug('#### Set title success.')
else:
Expand All @@ -201,7 +204,7 @@ def __clear_conversations(self):
if not Confirm.ask('Are you sure?', default=False):
return

if self.chatgpt.clear_conversations():
if self.chatgpt.clear_conversations(token=self.token_key):
self.run()
else:
Console.error('#### Clear conversations failed.')
Expand All @@ -214,7 +217,7 @@ def __del_conversation(self, state: State):
if not Confirm.ask('Are you sure?', default=False):
return

if self.chatgpt.del_conversation(state.conversation_id):
if self.chatgpt.del_conversation(state.conversation_id, token=self.token_key):
self.run()
else:
Console.error('#### Delete conversation failed.')
Expand All @@ -226,7 +229,7 @@ def __load_conversation(self, conversation_id):
self.state = State(conversation_id=conversation_id)

nodes = []
result = self.chatgpt.get_conversation(conversation_id)
result = self.chatgpt.get_conversation(conversation_id, token=self.token_key)
current_node_id = result['current_node']

while True:
Expand Down Expand Up @@ -288,14 +291,15 @@ def __talk(self, prompt):
self.state.user_prompt = ChatPrompt(prompt, parent_id=self.state.chatgpt_prompt.message_id)

status, _, generator = self.chatgpt.talk(prompt, self.state.model_slug, self.state.user_prompt.message_id,
self.state.user_prompt.parent_id, self.state.conversation_id)
self.state.user_prompt.parent_id, self.state.conversation_id,
token=self.token_key)
self.__print_reply(status, generator)

self.state.user_prompts.append(self.state.user_prompt)

if first_prompt:
new_title = self.chatgpt.gen_conversation_title(self.state.conversation_id, self.state.model_slug,
self.state.chatgpt_prompt.message_id)
self.state.chatgpt_prompt.message_id, token=self.token_key)
self.state.title = new_title
Console.debug_bh('#### Title generated: ' + new_title)

Expand All @@ -306,7 +310,7 @@ def __regenerate_reply(self, state):

status, _, generator = self.chatgpt.regenerate_reply(state.user_prompt.prompt, state.model_slug,
state.conversation_id, state.user_prompt.message_id,
state.user_prompt.parent_id)
state.user_prompt.parent_id, token=self.token_key)
print()
Console.success_b('ChatGPT:')
self.__print_reply(status, generator)
Expand All @@ -317,7 +321,7 @@ def __continue(self, state):
return

status, _, generator = self.chatgpt.goon(state.model_slug, state.chatgpt_prompt.message_id,
state.conversation_id)
state.conversation_id, token=self.token_key)
print()
Console.success_b('ChatGPT:')
self.__print_reply(status, generator)
Expand Down Expand Up @@ -354,7 +358,7 @@ def __print_reply(self, status, generator):
print('\n')

def __choice_conversation(self, page=1, page_size=20):
conversations = self.chatgpt.list_conversations((page - 1) * page_size, page_size)
conversations = self.chatgpt.list_conversations((page - 1) * page_size, page_size, token=self.token_key)
if not conversations['total']:
return None

Expand Down Expand Up @@ -383,13 +387,22 @@ def __choice_conversation(self, page=1, page_size=20):
Console.warn(' d?.\tDelete the conversation, eg: d1')
Console.warn(' dd.\t!! Clear all conversations')
Console.warn(' r.\tRefresh conversation list')

if len(self.chatgpt.list_token_keys()) > 1:
choices.append('k')
Console.warn(' k.\tChoice access token')

Console.warn(' c.\t** Start new chat')

while True:
choice = Prompt.ask('Your choice', choices=choices, show_choices=False)
if 'c' == choice:
return None

if 'k' == choice:
self.run()
return

if 'r' == choice:
return self.__choice_conversation(page, page_size)

Expand All @@ -413,8 +426,27 @@ def __choice_conversation(self, page=1, page_size=20):

return items[int(choice) - 1]

def __choice_token_key(self):
tokens = self.chatgpt.list_token_keys()

size = len(tokens)
if 1 == size:
return None

choices = ['r']
Console.info_b('Choice access token:')
for idx, item in enumerate(tokens):
number = str(idx + 1)
choices.append(number)
Console.info(' {}.\t{}'.format(number, item))

while True:
choice = Prompt.ask('Your choice', choices=choices, show_choices=False)

return tokens[int(choice) - 1]

def __choice_model(self):
models = self.chatgpt.list_models()
models = self.chatgpt.list_models(token=self.token_key)

size = len(models)
if 1 == size:
Expand Down
29 changes: 18 additions & 11 deletions src/pandora/bots/server.py
Original file line number Diff line number Diff line change
Expand Up @@ -102,6 +102,10 @@ def __handle_error(self, e):
'message': str(e.original_exception if self.debug and hasattr(e, 'original_exception') else e.name)
}), 500)

@staticmethod
def __get_token_key():
return request.headers.get('X-Use-Token', None)

def chat(self, conversation_id=None):
query = {'chatId': [conversation_id]} if conversation_id else {}

Expand Down Expand Up @@ -179,34 +183,36 @@ def check():
return jsonify(ret)

def list_models(self):
return self.__proxy_result(self.chatgpt.list_models(True))
return self.__proxy_result(self.chatgpt.list_models(True, self.__get_token_key()))

def list_conversations(self):
offset = request.args.get('offset', '1')
limit = request.args.get('limit', '20')

return self.__proxy_result(self.chatgpt.list_conversations(offset, limit, True))
return self.__proxy_result(self.chatgpt.list_conversations(offset, limit, True, self.__get_token_key()))

def get_conversation(self, conversation_id):
return self.__proxy_result(self.chatgpt.get_conversation(conversation_id, True))
return self.__proxy_result(self.chatgpt.get_conversation(conversation_id, True, self.__get_token_key()))

def del_conversation(self, conversation_id):
return self.__proxy_result(self.chatgpt.del_conversation(conversation_id, True))
return self.__proxy_result(self.chatgpt.del_conversation(conversation_id, True, self.__get_token_key()))

def clear_conversations(self):
return self.__proxy_result(self.chatgpt.clear_conversations(True))
return self.__proxy_result(self.chatgpt.clear_conversations(True, self.__get_token_key()))

def set_conversation_title(self, conversation_id):
title = request.json['title']

return self.__proxy_result(self.chatgpt.set_conversation_title(conversation_id, title, True))
return self.__proxy_result(
self.chatgpt.set_conversation_title(conversation_id, title, True, self.__get_token_key()))

def gen_conversation_title(self, conversation_id):
payload = request.json
model = payload['model']
message_id = payload['message_id']

return self.__proxy_result(self.chatgpt.gen_conversation_title(conversation_id, model, message_id, True))
return self.__proxy_result(
self.chatgpt.gen_conversation_title(conversation_id, model, message_id, True, self.__get_token_key()))

def talk(self):
payload = request.json
Expand All @@ -218,7 +224,8 @@ def talk(self):
stream = payload.get('stream', True)

return self.__process_stream(
*self.chatgpt.talk(prompt, model, message_id, parent_message_id, conversation_id, stream), stream)
*self.chatgpt.talk(prompt, model, message_id, parent_message_id, conversation_id, stream,
self.__get_token_key()), stream)

def goon(self):
payload = request.json
Expand All @@ -228,7 +235,7 @@ def goon(self):
stream = payload.get('stream', True)

return self.__process_stream(
*self.chatgpt.goon(model, parent_message_id, conversation_id, stream), stream)
*self.chatgpt.goon(model, parent_message_id, conversation_id, stream, self.__get_token_key()), stream)

def regenerate(self):
payload = request.json
Expand All @@ -240,8 +247,8 @@ def regenerate(self):
stream = payload.get('stream', True)

return self.__process_stream(
*self.chatgpt.regenerate_reply(prompt, model, conversation_id, message_id, parent_message_id, stream),
stream)
*self.chatgpt.regenerate_reply(prompt, model, conversation_id, message_id, parent_message_id, stream,
self.__get_token_key()), stream)

@staticmethod
def __process_stream(status, headers, generator, stream):
Expand Down
Loading

0 comments on commit 378677d

Please sign in to comment.