Skip to content

Commit

Permalink
swarms addition + example usage
Browse files Browse the repository at this point in the history
  • Loading branch information
Kye committed Nov 26, 2023
1 parent 6b428af commit 262bf5e
Show file tree
Hide file tree
Showing 3 changed files with 62 additions and 2 deletions.
8 changes: 7 additions & 1 deletion README.md
Original file line number Diff line number Diff line change
Expand Up @@ -27,6 +27,7 @@ print(output)
import torch
from process_supervision.prm import PRM
from swarms.models import OpenAIChat
from process_supervision.generator import MathDataGenerator
import os
from dotenv import load_dotenv

Expand All @@ -35,8 +36,12 @@ load_dotenv()
api_key = os.getenv("OPENAI_API_KEY")

# LLM initialization
llm = OpenAIChat(api_key=api_key)
llm = OpenAIChat(openai_api_key=api_key)

# Math data generator initialization
math_datagenerator = MathDataGenerator(llm, num_iters=10)

# Device initialization
device = 0 if torch.cuda.is_available() else "cpu"

# Model initialization
Expand All @@ -59,6 +64,7 @@ sent_kwargs = {"top_k": None, "function_to_apply": "none", "batch_size": 16}

# Sample queries
queries = ["Sample query 1", "Sample query 2"]
queries = [math_datagenerator.generate_samples(query) for query in queries]

# Generate responses
responses = prm_model.generate_responses(
Expand Down
8 changes: 7 additions & 1 deletion prm_example.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
import torch
from process_supervision.prm import PRM
from swarms.models import OpenAIChat
from process_supervision.generator import MathDataGenerator
import os
from dotenv import load_dotenv

Expand All @@ -9,8 +10,12 @@
api_key = os.getenv("OPENAI_API_KEY")

# LLM initialization
llm = OpenAIChat(api_key=api_key)
llm = OpenAIChat(openai_api_key=api_key)

# Math data generator initialization
math_datagenerator = MathDataGenerator(llm, num_iters=10)

# Device initialization
device = 0 if torch.cuda.is_available() else "cpu"

# Model initialization
Expand All @@ -33,6 +38,7 @@

# Sample queries
queries = ["Sample query 1", "Sample query 2"]
queries = [math_datagenerator.generate_samples(query) for query in queries]

# Generate responses
responses = prm_model.generate_responses(
Expand Down
48 changes: 48 additions & 0 deletions process_supervision/generator.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,48 @@
import os

from dotenv import load_dotenv
from swarms.models import OpenAIChat

load_dotenv()

api_key = os.getenv("OPENAI_API_KEY")

# LLM initialization
llm = OpenAIChat(api_key=api_key)

class MathDataGenerator:
"""
Math data generator for the LLM.
Args:
llm (OpenAIChat): LLM model.
num_iters (int): Number of iterations to run the LLM.
Returns:
list of dict: Generated samples.
Examples:
>>> llm = OpenAIChat(api_key=api_key)
>>> mdg = MathDataGenerator(llm, num_iters=10)
>>> mdg.generate_samples("1 + 1 = 2")
[{'query': '1 + 1 = 2', 'response': '1 + 1 = 2', 'score': 0.0, 'reward': 0.0}]
"""
def __init__(self, llm, num_iters):
self.llm = llm
self.num_iters = num_iters

def generate_samples(self, task: str):
"""Generate samples for a given task.
Args:
task (str): _description_
Returns:
_type_: _description_
"""
memory = []
for _ in range(self.num_iters):
results = self.llm(task)
memory.append(results)
return memory

0 comments on commit 262bf5e

Please sign in to comment.