Skip to content

Commit

Permalink
[FEAT][Tests for prm]
Browse files Browse the repository at this point in the history
  • Loading branch information
Kye committed Nov 26, 2023
1 parent 262bf5e commit 6f854ea
Show file tree
Hide file tree
Showing 4 changed files with 130 additions and 11 deletions.
8 changes: 5 additions & 3 deletions prm_example.py
Original file line number Diff line number Diff line change
@@ -1,9 +1,11 @@
import os

import torch
from process_supervision.prm import PRM
from dotenv import load_dotenv
from swarms.models import OpenAIChat

from process_supervision.generator import MathDataGenerator
import os
from dotenv import load_dotenv
from process_supervision.prm import PRM

load_dotenv()

Expand Down
14 changes: 8 additions & 6 deletions process_supervision/generator.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,24 +10,26 @@
# LLM initialization
llm = OpenAIChat(api_key=api_key)


class MathDataGenerator:
"""
Math data generator for the LLM.
Args:
llm (OpenAIChat): LLM model.
num_iters (int): Number of iterations to run the LLM.
Returns:
list of dict: Generated samples.
Examples:
>>> llm = OpenAIChat(api_key=api_key)
>>> mdg = MathDataGenerator(llm, num_iters=10)
>>> mdg.generate_samples("1 + 1 = 2")
[{'query': '1 + 1 = 2', 'response': '1 + 1 = 2', 'score': 0.0, 'reward': 0.0}]
"""
"""

def __init__(self, llm, num_iters):
self.llm = llm
self.num_iters = num_iters
Expand All @@ -45,4 +47,4 @@ def generate_samples(self, task: str):
for _ in range(self.num_iters):
results = self.llm(task)
memory.append(results)
return memory
return memory
27 changes: 25 additions & 2 deletions process_supervision/prm.py
Original file line number Diff line number Diff line change
@@ -1,11 +1,34 @@
from typing import Any, Dict, List

import torch
from transformers import AutoTokenizer, pipeline
from trl import AutoModelForCausalLMWithValueHead


class PRM:
"""
PRM model class.
Args:
model_name (str): Name of the main model.
ref_model_name (str): Name of the reference model.
reward_model_name (str): Name of the reward model.
device (int or str): Device to run the model on ('cpu' or 'cuda').
Examples:
>>> prm_model = PRM(
... model_name="lvwerra/gpt2-imdb-pos-v2",
... ref_model_name="lvwerra/gpt2-imdb",
... reward_model_name="lvwerra/distilbert-imdb",
... device=device,
... )
>>> prm_model.generate_responses(
... queries, gen_len=10, gen_kwargs=gen_kwargs
... )
['Sample response 1', 'Sample response 2']
>>> prm_model.score_responses(responses, sent_kwargs)
[0.0, 0.0]
"""
def __init__(
self,
model_name: str = "lvwerra/gpt2-imdb-pos-v2",
Expand All @@ -26,7 +49,7 @@ def __init__(
self.ref_model_name = ref_model_name
self.reward_model_name = reward_model_name
self.device = device

self.model = AutoModelForCausalLMWithValueHead.from_pretrained(
model_name
).to(device)
Expand Down
92 changes: 92 additions & 0 deletions tests.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,92 @@
from unittest.mock import Mock

import pytest
from transformers import AutoModelForCausalLMWithValueHead, AutoTokenizer
from trl import AutoModelForCausalLMWithValueHead

from process_supervision.prm import PRM


@pytest.fixture
def prm_model():
return PRM(
model_name="lvwerra/gpt2-imdb-pos-v2",
ref_model_name="lvwerra/gpt2-imdb",
reward_model_name="lvwerra/distilbert-imdb",
device="cpu",
)

def test_prm_model_init(prm_model):
assert prm_model.model_name == "lvwerra/gpt2-imdb-pos-v2"
assert prm_model.ref_model_name == "lvwerra/gpt2-imdb"
assert prm_model.reward_model_name == "lvwerra/distilbert-imdb"
assert prm_model.device == "cpu"
assert isinstance(prm_model.model, AutoModelForCausalLMWithValueHead)
assert isinstance(prm_model.ref_model, AutoModelForCausalLMWithValueHead)
assert isinstance(prm_model.tokenizer, AutoTokenizer)

def test_generate_responses(prm_model):
queries = ["How are you?", "What is the weather today?"]
gen_len = 10
gen_kwargs = {"do_sample": True}
responses = prm_model.generate_responses(queries, gen_len, gen_kwargs)
assert isinstance(responses, list)
assert len(responses) == len(queries)
for response in responses:
assert isinstance(response, str)

def test_score_responses(prm_model):
responses = ["I'm good.", "The weather is sunny."]
sent_kwargs = {"truncation": True}
scores = prm_model.score_responses(responses, sent_kwargs)
assert isinstance(scores, list)
assert len(scores) == len(responses)
for score in scores:
assert isinstance(score, float)

@pytest.mark.parametrize("queries, gen_len, gen_kwargs", [
(["Hello"], 5, {"do_sample": False}),
(["How are you?", "What is the weather today?"], 15, {"do_sample": True}),
])
def test_generate_responses_parametrized(prm_model, queries, gen_len, gen_kwargs):
responses = prm_model.generate_responses(queries, gen_len, gen_kwargs)
assert isinstance(responses, list)
assert len(responses) == len(queries)
for response in responses:
assert isinstance(response, str)

@pytest.mark.parametrize("responses, sent_kwargs", [
(["I'm good.", "The weather is sunny."], {"truncation": True}),
(["Great!", "It's raining."], {"truncation": False}),
])
def test_score_responses_parametrized(prm_model, responses, sent_kwargs):
scores = prm_model.score_responses(responses, sent_kwargs)
assert isinstance(scores, list)
assert len(scores) == len(responses)
for score in scores:
assert isinstance(score, float)

def test_generate_responses_with_mocked_model(prm_model, monkeypatch):
mock_generate = Mock(return_value=[[1, 2, 3]])
monkeypatch.setattr(prm_model.model, "generate", mock_generate)
queries = ["How are you?"]
gen_len = 5
gen_kwargs = {"do_sample": True}
responses = prm_model.generate_responses(queries, gen_len, gen_kwargs)
assert responses == ["[CLS] [SEP] [PAD] [PAD] [PAD]"]

def test_score_responses_with_mocked_pipe(prm_model, monkeypatch):
mock_pipe = Mock(return_value=[{"score": 0.8}])
monkeypatch.setattr(prm_model.reward_pipe, "__call__", mock_pipe)
responses = ["I'm good."]
sent_kwargs = {"truncation": True}
scores = prm_model.score_responses(responses, sent_kwargs)
assert scores == [0.8]

def test_generate_responses_exception(prm_model):
with pytest.raises(Exception):
prm_model.generate_responses("Hello", 5, {"do_sample": False})

def test_score_responses_exception(prm_model):
with pytest.raises(Exception):
prm_model.score_responses("Great!", {"truncation": False})

0 comments on commit 6f854ea

Please sign in to comment.