Skip to content

Commit

Permalink
[MNT] Fix infer scripts
Browse files Browse the repository at this point in the history
  • Loading branch information
pengxiao-song committed Jun 6, 2023
1 parent 94b70a3 commit 6bd5919
Show file tree
Hide file tree
Showing 2 changed files with 72 additions and 47 deletions.
115 changes: 70 additions & 45 deletions infer.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,43 +12,53 @@
device = "cuda"


def main(
load_8bit: bool = False,
base_model: str = "",
lora_weights: str = "",
infer_data_path: str = "",
prompt_template: str = "", # The prompt template to use, will default to alpaca.
):
prompter = Prompter(prompt_template)
tokenizer = LlamaTokenizer.from_pretrained(base_model)
model = LlamaForCausalLM.from_pretrained(
base_model,
load_in_8bit=load_8bit,
torch_dtype=torch.float16,
device_map="auto",
)
try:
print(f"Using lora {lora_weights}")
model = PeftModel.from_pretrained(
model,
lora_weights,
class Infer():
def __init__(
self,
load_8bit: bool = False,
base_model: str = "",
lora_weights: str = "",
prompt_template: str = "", # The prompt template to use, will default to alpaca.
):
prompter = Prompter(prompt_template)
tokenizer = LlamaTokenizer.from_pretrained(base_model)
model = LlamaForCausalLM.from_pretrained(
base_model,
load_in_8bit=load_8bit,
torch_dtype=torch.float16,
device_map="auto",
)
except:
print("*"*50, "\n Attention! No Lora Weights \n", "*"*50)
# unwind broken decapoda-research config
model.config.pad_token_id = tokenizer.pad_token_id = 0 # unk
model.config.bos_token_id = 1
model.config.eos_token_id = 2
if not load_8bit:
model.half() # seems to fix bugs for some users.

try:
print(f"Using lora {lora_weights}")
model = PeftModel.from_pretrained(
model,
lora_weights,
torch_dtype=torch.float16,
)
except:
print("*"*50, "\n Attention! No Lora Weights \n", "*"*50)

# unwind broken decapoda-research config
model.config.pad_token_id = tokenizer.pad_token_id = 0 # unk
model.config.bos_token_id = 1
model.config.eos_token_id = 2
if not load_8bit:
model.half() # seems to fix bugs for some users.

model.eval()
model.eval()

if torch.__version__ >= "2" and sys.platform != "win32":
model = torch.compile(model)
if torch.__version__ >= "2" and sys.platform != "win32":
model = torch.compile(model)

self.base_model = base_model
self.lora_weights = lora_weights
self.model = model
self.prompter = prompter
self.tokenizer = tokenizer

def evaluate(
def generate_output(
self,
instruction,
input=None,
temperature=0.1,
Expand All @@ -58,8 +68,8 @@ def evaluate(
max_new_tokens=256,
**kwargs,
):
prompt = prompter.generate_prompt(instruction, input)
inputs = tokenizer(prompt, return_tensors="pt")
prompt = self.prompter.generate_prompt(instruction, input)
inputs = self.tokenizer(prompt, return_tensors="pt")
input_ids = inputs["input_ids"].to(device)
generation_config = GenerationConfig(
temperature=temperature,
Expand All @@ -70,42 +80,57 @@ def evaluate(
**kwargs,
)
with torch.no_grad():
generation_output = model.generate(
generation_output = self.model.generate(
input_ids=input_ids,
generation_config=generation_config,
return_dict_in_generate=True,
output_scores=True,
max_new_tokens=max_new_tokens,
)
s = generation_output.sequences[0]
output = tokenizer.decode(s)
return prompter.get_response(output)
output = self.tokenizer.decode(s)
return self.prompter.get_response(output)

def infer_from_file():
def infer_from_file(self, infer_data_path):
with open(infer_data_path) as f:
for line in f:
data = json.loads(line)
instruction = data["instruction"]
output = data["output"]
print('=' * 100)
print(f"Base Model: {base_model} Lora Weights: {lora_weights}")
print(f"Base Model: {self.base_model} Lora Weights: {self.lora_weights}")
print("Instruction:\n", instruction)
model_output = evaluate(instruction)
model_output = self.generate_output(instruction)
print("Model Output:\n", model_output)
print("Ground Truth:\n", output)
print('=' * 100)


def main(
load_8bit: bool = False,
base_model: str = "",
lora_weights: str = "",
prompt_template: str = "", # The prompt template to use, will default to alpaca.
infer_data_path: str = "",
):
infer = Infer(
load_8bit=load_8bit,
base_model=base_model,
lora_weights=lora_weights,
prompt_template=prompt_template
)

try:
infer_from_file()
except:
print("Read infer_data_path Failed! Now Interactive Mode: ")
infer.infer_from_file(infer_data_path)
except Exception as e:
print(e, "Read infer_data_path Failed! Now Interactive Mode: ")
while True:
print('=' * 100)
instruction = input("请输入您的问题: ")
print("LaWGPT:")
print(evaluate(instruction))
print(infer.generate_output(instruction))
print('=' * 100)


if __name__ == "__main__":
fire.Fire(main)
fire.Fire(main)
4 changes: 2 additions & 2 deletions scripts/infer.sh
Original file line number Diff line number Diff line change
Expand Up @@ -3,5 +3,5 @@ python infer.py \
--load_8bit True \
--base_model 'minlik/chinese-llama-7b-merged' \
--lora_weights 'entity303/lawgpt-lora-7b' \
--infer_data_path './resources/example_infer_data.json' \
--prompt_template 'law_template'
--prompt_template 'law_template' \
--infer_data_path './resources/example_infer_data.json'

0 comments on commit 6bd5919

Please sign in to comment.