Skip to content

Commit

Permalink
fix: adjust bigdl-llm to ipex-llm base intel cpu
Browse files Browse the repository at this point in the history
  • Loading branch information
ai-liuys committed Mar 28, 2024
1 parent 00b354e commit 6034531
Show file tree
Hide file tree
Showing 7 changed files with 76 additions and 4 deletions.
2 changes: 1 addition & 1 deletion Intel_device_demo/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,7 @@
- 其他理论支持 OpenVINO 加速的Intel 工具套件。

## 2. 文件目录
- BigDl_demo: BigDL-LLM 是一个为Intel XPU(Xeon/Core/Flex/Arc/PVC)打造的低精度轻量级大语言模型库,在Intel平台上具有广泛的模型支持、最低的延迟和最小的内存占用,实现加速模型部署示例。
- IPEX_demo: IPEX-LLM 是一个为Intel XPU(Xeon/Core/Flex/Arc/PVC)打造的低精度轻量级大语言模型库,在Intel平台上具有广泛的模型支持、最低的延迟和最小的内存占用,实现加速模型部署示例。
- OpenVINO_demo: 使用 Intel OpenVINO 推理加速框架,实现加速模型部署示例。
- Pytorch_demo (暂未推出) : 使用 Intel Pytorch Extension 实现在 Pytorch 环境上开发(适用于 Intel Arc 系列 GPU)

Original file line number Diff line number Diff line change
Expand Up @@ -38,7 +38,7 @@
from typing import List, Literal, Optional, Union
from loguru import logger
from pydantic import BaseModel, Field
from bigdl.llm.transformers import AutoModel
from ipex_llm.transformers import AutoModel
from transformers import AutoTokenizer
from utils import process_response, generate_chatglm3, generate_stream_chatglm3
# from sentence_transformers import SentenceTransformer
Expand Down
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
import time
from bigdl.llm.transformers import AutoModel
from ipex_llm.transformers import AutoModel
from transformers import AutoTokenizer

CHATGLM_V3_PROMPT_FORMAT = "\n{prompt}\n"
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,7 @@

import os
import streamlit as st
from bigdl.llm.transformers import AutoModel
from ipex_llm.transformers import AutoModel
from transformers import AutoTokenizer


Expand Down
72 changes: 72 additions & 0 deletions Intel_device_demo/ipex_cpu_demo/generate.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,72 @@
#
# Copyright 2016 The BigDL Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#

import torch
import time
import argparse
import numpy as np

from ipex_llm.transformers import AutoModel
from modelscope import AutoTokenizer
from transformers import AutoTokenizer

# you could tune the prompt based on your own model,
# here the prompt tuning refers to https://github.com/THUDM/ChatGLM3/blob/main/PROMPT.md
CHATGLM_V3_PROMPT_FORMAT = "<|user|>\n{prompt}\n<|assistant|>"

if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Predict Tokens using `generate()` API for ModelScope ChatGLM3 model')
parser.add_argument('--repo-id-or-model-path', type=str, default="ZhipuAI/chatglm3-6b",
help='The ModelScope repo id for the ChatGLM3 model to be downloaded'
', or the path to the ModelScope checkpoint folder')
parser.add_argument('--prompt', type=str, default="AI是什么?",
help='Prompt to infer')
parser.add_argument('--n-predict', type=int, default=32,
help='Max tokens to predict')

args = parser.parse_args()
model_path = args.repo_id_or_model_path

# Load model in 4 bit,
# which convert the relevant layers in the model into INT4 format
# It is important to set `model_hub='modelscope'`, otherwise model hub is default to be huggingface
model = AutoModel.from_pretrained(model_path,
load_in_4bit=True,
trust_remote_code=True,
model_hub='modelscope')

# Load tokenizer
tokenizer = AutoTokenizer.from_pretrained(model_path,
trust_remote_code=True)

# Generate predicted tokens
with torch.inference_mode():
prompt = CHATGLM_V3_PROMPT_FORMAT.format(prompt=args.prompt)
input_ids = tokenizer.encode(prompt, return_tensors="pt")
st = time.time()
# if your selected model is capable of utilizing previous key/value attentions
# to enhance decoding speed, but has `"use_cache": false` in its model config,
# it is important to set `use_cache=True` explicitly in the `generate` function
# to obtain optimal performance with BigDL-LLM INT4 optimizations
output = model.generate(input_ids,
max_new_tokens=args.n_predict)
end = time.time()
output_str = tokenizer.decode(output[0], skip_special_tokens=True)
print(f'Inference time: {end - st} s')
print('-' * 20, 'Prompt', '-' * 20)
print(prompt)
print('-' * 20, 'Output', '-' * 20)
print(output_str)
File renamed without changes.

0 comments on commit 6034531

Please sign in to comment.