Skip to content

Commit

Permalink
readme
Browse files Browse the repository at this point in the history
  • Loading branch information
kaqijiang committed Apr 14, 2023
1 parent 98efd26 commit bbcd01d
Show file tree
Hide file tree
Showing 28 changed files with 459 additions and 561 deletions.
1 change: 1 addition & 0 deletions README.fork.md
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
Hello, world!
373 changes: 134 additions & 239 deletions README.md

Large diffs are not rendered by default.

Binary file added demo.mp4
Binary file not shown.
8 changes: 4 additions & 4 deletions scripts/agent_manager.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,7 @@


def create_agent(task, prompt, model):
"""Create a new agent and return its key"""
"""创建新代理并返回其密钥"""
global next_key
global agents

Expand All @@ -34,7 +34,7 @@ def create_agent(task, prompt, model):


def message_agent(key, message):
"""Send a message to an agent and return its response"""
"""向代理发送消息并返回其响应"""
global agents

task, messages, model = agents[int(key)]
Expand All @@ -55,15 +55,15 @@ def message_agent(key, message):


def list_agents():
"""Return a list of all agents"""
"""返回所有代理的列表"""
global agents

# Return a list of agent keys and their tasks
return [(key, task) for key, (task, _, _) in agents.items()]


def delete_agent(key):
"""Delete an agent and return True if successful, False otherwise"""
"""删除代理,如果成功则返回True,否则返回False"""
global agents

try:
Expand Down
54 changes: 27 additions & 27 deletions scripts/ai_config.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,22 +5,22 @@

class AIConfig:
"""
A class object that contains the configuration information for the AI
Attributes:
ai_name (str): The name of the AI.
ai_role (str): The description of the AI's role.
ai_goals (list): The list of objectives the AI is supposed to complete.
包含AI配置信息的类对象
属性:
ai_name (str): 人工智能的名称。
ai_role (str): 人工智能角色的描述。
ai_goals (list): 人工智能应该完成的目标列表。
"""

def __init__(self, ai_name: str="", ai_role: str="", ai_goals: list=[]) -> None:
"""
Initialize a class instance
初始化一个类实例
Parameters:
ai_name (str): The name of the AI.
ai_role (str): The description of the AI's role.
ai_goals (list): The list of objectives the AI is supposed to complete.
参数:
ai_name (str): 人工智能的名称。
ai_role (str): 人工智能角色的描述。
ai_goals (list): 人工智能应该完成的目标列表。
Returns:
None
"""
Expand All @@ -35,15 +35,15 @@ def __init__(self, ai_name: str="", ai_role: str="", ai_goals: list=[]) -> None:
@classmethod
def load(cls: object, config_file: str=SAVE_FILE) -> object:
"""
Returns class object with parameters (ai_name, ai_role, ai_goals) loaded from yaml file if yaml file exists,
else returns class with no parameters.
如果存在 yaml 文件,则返回带有从 yaml 文件加载的参数(ai_nameai_roleai_goals)的类对象,
else 返回没有参数的类。
Parameters:
cls (class object): An AIConfig Class object.
config_file (int): The path to the config yaml file. DEFAULT: "../ai_settings.yaml"
参数:
cls(类对象):一个 AIConfig 类对象。
config_file (int):配置 yaml 文件的路径。 默认值:“../ai_settings.yaml
Returns:
cls (object): An instance of given cls object
返回:
cls (object):给定 cls 对象的一个实例
"""

try:
Expand All @@ -60,10 +60,10 @@ def load(cls: object, config_file: str=SAVE_FILE) -> object:

def save(self, config_file: str=SAVE_FILE) -> None:
"""
Saves the class parameters to the specified file yaml file path as a yaml file.
将类参数作为yaml文件保存到指定文件yaml文件路径。
Parameters:
config_file(str): The path to the config yaml file. DEFAULT: "../ai_settings.yaml"
参数:
config_file(str):配置 yaml 文件的路径。 默认值:“../ai_settings.yaml
Returns:
None
Expand All @@ -75,16 +75,16 @@ def save(self, config_file: str=SAVE_FILE) -> None:

def construct_full_prompt(self) -> str:
"""
Returns a prompt to the user with the class information in an organized fashion.
以有组织的方式向用户返回带有类信息的提示。
Parameters:
None
参数:
没有任何
Returns:
full_prompt (str): A string containing the initial prompt for the user including the ai_name, ai_role and ai_goals.
Returns:
full_prompt (str):包含用户初始提示的字符串,包括 ai_nameai_role ai_goals
"""

prompt_start = """Your decisions must always be made independently without seeking user assistance. Play to your strengths as an LLM and pursue simple strategies with no legal complications."""
prompt_start = """您必须始终独立做出决定,而无需寻求用户帮助。 发挥你作为法学硕士的优势,追求简单的策略,没有法律上的并发症。"""

# Construct full prompt
full_prompt = f"You are {self.ai_name}, {self.ai_role}\n{prompt_start}\n\nGOALS:\n\n"
Expand Down
6 changes: 3 additions & 3 deletions scripts/ai_functions.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,7 @@ def evaluate_code(code: str) -> List[str]:

function_string = "def analyze_code(code: str) -> List[str]:"
args = [code]
description_string = """Analyzes the given code and returns a list of suggestions for improvements."""
description_string = """分析给定的代码,并返回改进建议的列表。"""

result_string = call_ai_function(function_string, args, description_string)

Expand All @@ -39,7 +39,7 @@ def improve_code(suggestions: List[str], code: str) -> str:
"def generate_improved_code(suggestions: List[str], code: str) -> str:"
)
args = [json.dumps(suggestions), code]
description_string = """Improves the provided code based on the suggestions provided, making no other changes."""
description_string = """基于提供的建议改进提供的代码,不做其他更改。"""

result_string = call_ai_function(function_string, args, description_string)
return result_string
Expand All @@ -60,7 +60,7 @@ def write_tests(code: str, focus: List[str]) -> str:
"def create_test_cases(code: str, focus: Optional[str] = None) -> str:"
)
args = [code, json.dumps(focus)]
description_string = """Generates test cases for the existing code, focusing on specific areas if required."""
description_string = """为现有代码生成测试用例,如果需要,重点关注特定区域。"""

result_string = call_ai_function(function_string, args, description_string)
return result_string
18 changes: 9 additions & 9 deletions scripts/browse.py
Original file line number Diff line number Diff line change
Expand Up @@ -56,7 +56,7 @@ def get_response(url, headers=cfg.user_agent_header, timeout=10):


def scrape_text(url):
"""Scrape text from a webpage"""
"""从网页中抓取文本"""
response, error_message = get_response(url)
if error_message:
return error_message
Expand All @@ -75,23 +75,23 @@ def scrape_text(url):


def extract_hyperlinks(soup):
"""Extract hyperlinks from a BeautifulSoup object"""
"""BeautifulSoup 对象中提取超链接"""
hyperlinks = []
for link in soup.find_all('a', href=True):
hyperlinks.append((link.text, link['href']))
return hyperlinks


def format_hyperlinks(hyperlinks):
"""Format hyperlinks into a list of strings"""
"""将超链接格式化为字符串列表"""
formatted_links = []
for link_text, link_url in hyperlinks:
formatted_links.append(f"{link_text} ({link_url})")
return formatted_links


def scrape_links(url):
"""Scrape links from a webpage"""
"""从网页中抓取链接"""
response, error_message = get_response(url)
if error_message:
return error_message
Expand All @@ -107,7 +107,7 @@ def scrape_links(url):


def split_text(text, max_length=8192):
"""Split text into chunks of a maximum length"""
"""将文本拆分为最大长度的块"""
paragraphs = text.split("\n")
current_length = 0
current_chunk = []
Expand All @@ -126,17 +126,17 @@ def split_text(text, max_length=8192):


def create_message(chunk, question):
"""Create a message for the user to summarize a chunk of text"""
"""为用户创建一条消息来总结一段文本"""
return {
"role": "user",
"content": f"\"\"\"{chunk}\"\"\" Using the above text, please answer the following question: \"{question}\" -- if the question cannot be answered using the text, please summarize the text."
"content": f"\"\"\"{chunk}\"\"\" 使用以上文本,请以中文回答以下问题: \"{question}\" -- 如果问题无法使用文本回答 ,请总结文本。"
}


def summarize_text(text, question):
"""Summarize text using the LLM model"""
"""使用 LLM 模型总结文本"""
if not text:
return "Error: No text to summarize"
return "Error: 没有文字可以总结"

text_length = len(text)
print(f"Text length: {text_length} characters")
Expand Down
2 changes: 1 addition & 1 deletion scripts/call_ai_function.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,7 @@
# This is a magic function that can do anything with no-code. See
# https://github.com/Torantulino/AI-Functions for more info.
def call_ai_function(function, args, description, model=None):
"""Call an AI function"""
"""调用一个AI函数"""
if model is None:
model = cfg.smart_llm_model
# For each arg, if any are None, convert to "None":
Expand Down
48 changes: 24 additions & 24 deletions scripts/chat.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,14 +12,14 @@

def create_chat_message(role, content):
"""
Create a chat message with the given role and content.
创建具有给定角色和内容的聊天消息。
Args:
role (str): The role of the message sender, e.g., "system", "user", or "assistant".
content (str): The content of the message.
参数:
角色 (str):消息发送者的角色,例如“系统”、“用户”或“助手”。
content (str): 消息的内容。
Returns:
dict: A dictionary containing the role and content of the message.
dict: 包含消息的作用和内容的字典。
"""
return {"role": role, "content": content}

Expand All @@ -29,9 +29,9 @@ def generate_context(prompt, relevant_memory, full_message_history, model):
create_chat_message(
"system", prompt),
create_chat_message(
"system", f"The current time and date is {time.strftime('%c')}"),
"system", f"现在的时间和日期是 {time.strftime('%c')}"),
create_chat_message(
"system", f"This reminds you of these events from your past:\n{relevant_memory}\n\n")]
"system", f"这让你想起了你过去的某些事件:\n{relevant_memory}\n\n")]

# Add messages from the full message history until we reach the token limit
next_message_to_add_index = len(full_message_history) - 1
Expand All @@ -48,31 +48,31 @@ def chat_with_ai(
full_message_history,
permanent_memory,
token_limit):
"""Interact with the OpenAI API, sending the prompt, user input, message history, and permanent memory."""
"""OpenAI API 交互,发送提示、用户输入、消息历史记录和永久内存。"""
while True:
try:
"""
Interact with the OpenAI API, sending the prompt, user input, message history, and permanent memory.
OpenAI API 交互,发送提示、用户输入、消息历史记录和永久内存。
Args:
prompt (str): The prompt explaining the rules to the AI.
user_input (str): The input from the user.
full_message_history (list): The list of all messages sent between the user and the AI.
permanent_memory (Obj): The memory object containing the permanent memory.
token_limit (int): The maximum number of tokens allowed in the API call.
参数:
prompt (str): 向 AI 解释规则的提示。
user_input (str):来自用户的输入。
full_message_history (list):用户和AI之间发送的所有消息的列表。
permanent_memory (Obj):包含永久内存的内存对象。
token_limit (int)API 调用中允许的最大令牌数。
Returns:
str: The AI's response.
str: AI的回应.
"""
model = cfg.fast_llm_model # TODO: Change model from hardcode to argument
# Reserve 1000 tokens for the response

logger.debug(f"Token limit: {token_limit}")
logger.debug(f"Token 限制: {token_limit}")
send_token_limit = token_limit - 1000

relevant_memory = '' if len(full_message_history) ==0 else permanent_memory.get_relevant(str(full_message_history[-9:]), 10)

logger.debug(f'Memory Stats: {permanent_memory.get_stats()}')
logger.debug(f'内存状态: {permanent_memory.get_stats()}')

next_message_to_add_index, current_tokens_used, insertion_index, current_context = generate_context(
prompt, relevant_memory, full_message_history, model)
Expand Down Expand Up @@ -110,17 +110,17 @@ def chat_with_ai(
# assert tokens_remaining >= 0, "Tokens remaining is negative. This should never happen, please submit a bug report at https://www.github.com/Torantulino/Auto-GPT"

# Debug print the current context
logger.debug(f"Token limit: {token_limit}")
logger.debug(f"Send Token Count: {current_tokens_used}")
logger.debug(f"Tokens remaining for response: {tokens_remaining}")
logger.debug("------------ CONTEXT SENT TO AI ---------------")
logger.debug(f"Token 限制: {token_limit}")
logger.debug(f"发送Token 数量: {current_tokens_used}")
logger.debug(f"Tokens 剩余回应: {tokens_remaining}")
logger.debug("------------ 发送给 AI 的上下文信息 ---------------")
for message in current_context:
# Skip printing the prompt
if message["role"] == "system" and message["content"] == prompt:
continue
logger.debug(f"{message['role'].capitalize()}: {message['content']}")
logger.debug("")
logger.debug("----------- END OF CONTEXT ----------------")
logger.debug("----------- 结束上下文信息 ----------------")

# TODO: use a model defined elsewhere, so that model can contain temperature and other settings we care about
assistant_reply = create_chat_completion(
Expand All @@ -140,5 +140,5 @@ def chat_with_ai(
return assistant_reply
except openai.error.RateLimitError:
# TODO: When we switch to langchain, this is built in
print("Error: ", "API Rate Limit Reached. Waiting 10 seconds...")
print("Error: ", "已达到 API 速率限制。 等待 10 ...")
time.sleep(10)
Loading

0 comments on commit bbcd01d

Please sign in to comment.