Skip to content

Commit a1048ca

Browse files
authored
Update setup.py and add new actions and add compatible mode (#25)
* update setup.py * add new action * add compatible mode
1 parent 9df70d9 commit a1048ca

File tree

11 files changed

+188
-27
lines changed

11 files changed

+188
-27
lines changed
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,46 @@
1+
name: "llm-cli Flow Verification (Linux)"
2+
description: "Verify the llm-cli flow on linux"
3+
4+
runs:
5+
using: "composite"
6+
steps:
7+
- name: Test llama llm-cli
8+
shell: bash
9+
run: |
10+
llm-cli -t $THREAD_NUM -n 256 -x llama -m $LLAMA_INT4_CKPT_PATH -p 'Once upon a time,'
11+
12+
timeout 30s llm-cli -t $THREAD_NUM -n 256 -x llama -m $LLAMA_INT4_CKPT_PATH -i -p \
13+
'A chat between a curious user and a helpful and polite AI assistant. User:Can you tell me a story? AI:' >test.out 2>&1 || true
14+
15+
if ! grep -q 'A chat between a curious user and a helpful and polite AI assistant.' test.out ; then
16+
exit 1
17+
fi
18+
rm test.out
19+
20+
- name: Test gptneox llm-cli
21+
shell: bash
22+
run: |
23+
llm-cli -t $THREAD_NUM -n 256 -x gptneox -m $GPTNEOX_INT4_CKPT_PATH -p 'Once upon a time,'
24+
25+
timeout 30s llm-cli -t $THREAD_NUM -n 256 -x gptneox -m $GPTNEOX_INT4_CKPT_PATH -i -p \
26+
'A chat between a curious user and a helpful and polite AI assistant. User:Can you tell me a story? AI:' >test.out 2>&1 || true
27+
28+
if ! grep -q 'A chat between a curious user and a helpful and polite AI assistant.' test.out ; then
29+
exit 1
30+
fi
31+
rm test.out
32+
33+
- name: Test bloom llm-cli
34+
shell: bash
35+
run: |
36+
llm-cli -t $THREAD_NUM -n 256 -x bloom -m $BLOOM_INT4_CKPT_PATH -p 'Once upon a time,'
37+
38+
- name: Test starcoder llm-cli
39+
shell: bash
40+
run: |
41+
llm-cli -t $THREAD_NUM -n 256 -x starcoder -m $STARCODER_INT4_CKPT_PATH -p 'def check_odd('
42+
43+
# - name: Test chatglm llm-cli
44+
# shell: bash
45+
# run: |
46+
# llm-cli -t $THREAD_NUM -n 256 -x chatglm -m $CHATGLM_INT4_CKPT_PATH -p '你好'
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,25 @@
1+
name: "llm-cli Flow Verification (Windows)"
2+
description: "Verify the llm-cli flow on Windows"
3+
4+
runs:
5+
using: "composite"
6+
steps:
7+
- name: Test llama llm-cli
8+
shell: powershell
9+
run: |
10+
llm-cli.ps1 -t $env:THREAD_NUM -n 256 -x llama -m $env:LLAMA_INT4_CKPT_PATH -p 'Once upon a time,'
11+
12+
- name: Test gptneox llm-cli
13+
shell: powershell
14+
run: |
15+
llm-cli.ps1 -t $env:THREAD_NUM -n 256 -x gptneox -m $env:GPTNEOX_INT4_CKPT_PATH -p 'Once upon a time,'
16+
17+
- name: Test bloom llm-cli
18+
shell: powershell
19+
run: |
20+
llm-cli.ps1 -t $env:THREAD_NUM -n 256 -x bloom -m $env:BLOOM_INT4_CKPT_PATH -p 'Once upon a time,'
21+
22+
# - name: Test starcoder llm-cli
23+
# shell: powershell
24+
# run: |
25+
# llm-cli.ps1 -t $env:THREAD_NUM -x starcoder -m $env:STARCODER_INT4_CKPT_PATH -p 'def check_odd('
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,43 @@
1+
name: "BigDL-LLM convert tests"
2+
description: "BigDL-LLM convert test, including downloading original models"
3+
4+
runs:
5+
using: "composite"
6+
steps:
7+
- name: Download original models (LLaMA)
8+
shell: bash
9+
run: |
10+
if [ ! -d $LLAMA_ORIGIN_PATH ]; then
11+
echo "Directory $LLAMA_ORIGIN_PATH not found. Downloading from FTP server..."
12+
wget -r -nH --no-verbose --cut-dirs=1 $LLM_FTP_URL/llm/llama-7b-hf -P $ORIGIN_DIR
13+
fi
14+
15+
- name: Download original models (GPT-NeoX)
16+
shell: bash
17+
run: |
18+
if [ ! -d $GPTNEOX_ORIGIN_PATH ]; then
19+
echo "Directory $GPTNEOX_ORIGIN_PATH not found. Downloading from FTP server..."
20+
wget -r -nH --no-verbose --cut-dirs=1 $LLM_FTP_URL/llm/gptneox-7b-redpajama-bf16 -P $ORIGIN_DIR
21+
fi
22+
23+
- name: Download original models (BLOOM)
24+
shell: bash
25+
run: |
26+
if [ ! -d $BLOOM_ORIGIN_PATH ]; then
27+
echo "Directory $BLOOM_ORIGIN_PATH not found. Downloading from FTP server..."
28+
wget -r -nH --no-verbose --cut-dirs=1 $LLM_FTP_URL/llm/bloomz-7b1 -P $ORIGIN_DIR
29+
fi
30+
31+
- name: Download original models (StarCoder)
32+
shell: bash
33+
run: |
34+
if [ ! -d $STARCODER_ORIGIN_PATH ]; then
35+
echo "Directory $STARCODER_ORIGIN_PATH not found. Downloading from FTP server..."
36+
wget -r -nH --no-verbose --cut-dirs=1 $LLM_FTP_URL/llm/gpt_bigcode-santacoder -P $ORIGIN_DIR
37+
fi
38+
39+
- name: Convert test
40+
shell: bash
41+
run: |
42+
echo "Running the convert models tests..."
43+
python -m pytest -s python/llm/test/convert/test_convert_model.py
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,14 @@
1+
name: 'BigDL-LLM example tests'
2+
description: 'BigDL-LLM example tests'
3+
4+
runs:
5+
using: "composite"
6+
steps:
7+
- name: Test LLAMA2
8+
shell: bash
9+
env:
10+
INT4_CKPT_DIR: ./llm/ggml-actions/stable
11+
LLM_DIR: ./llm
12+
13+
run: |
14+
bash python/llm/dev/test/run-example-tests.sh

apps/ipynb2py.sh

+21
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,21 @@
1+
#!/bin/bash
2+
3+
## Usage ################################
4+
# ./ipynb2py <file-name without extension>
5+
# Example:
6+
# ipynb2py notebooks/neural_networks/rnn
7+
#########################################
8+
if [ $# -ne "1" ]; then
9+
echo "Usage: ./nb2script <file-name without extension>"
10+
else
11+
cp $1.ipynb $1.tmp.ipynb
12+
sed -i 's/%%/#/' $1.tmp.ipynb
13+
sed -i 's/%pylab/#/' $1.tmp.ipynb
14+
15+
jupyter nbconvert $1.tmp.ipynb --to python
16+
17+
mv $1.tmp.py $1.py
18+
sed -i '1i# -*- coding: utf-8 -*-' $1.py
19+
sed -i '#!/usr/bin/python' $1.py
20+
rm $1.tmp.ipynb
21+
fi

python/llm/scripts/check.py

+4-4
Original file line numberDiff line numberDiff line change
@@ -50,10 +50,10 @@ def check_torch_version():
5050
except:
5151
print("PyTorch is not installed.")
5252

53-
def check_bigdl_version():
53+
def check_ipex_llm_version():
5454
import os
55-
if os.system("pip show bigdl-llm")!=0:
56-
print("BigDL is not installed")
55+
if os.system("pip show ipex-llm")!=0:
56+
print("ipex-llm is not installed")
5757

5858

5959
def check_ipex_version():
@@ -71,7 +71,7 @@ def main():
7171
print("-----------------------------------------------------------------")
7272
check_torch_version()
7373
print("-----------------------------------------------------------------")
74-
check_bigdl_version()
74+
check_ipex_llm_version()
7575
print("-----------------------------------------------------------------")
7676
check_ipex_version()
7777

python/llm/scripts/env-check.sh

+5-5
Original file line numberDiff line numberDiff line change
@@ -51,11 +51,11 @@ check_torch()
5151
fi
5252
}
5353

54-
check_bigdl()
54+
check_ipex_llm()
5555
{
5656
echo "-----------------------------------------------------------------"
57-
echo -n 'BigDL '
58-
pip show bigdl-llm | grep Version:
57+
echo -n 'ipex-llm '
58+
pip show ipex-llm | grep Version:
5959
}
6060

6161
check_cpu_info()
@@ -135,10 +135,10 @@ main()
135135
exit -1
136136
fi
137137

138-
# check site packages version, such as transformers, pytorch, bigdl
138+
# check site packages version, such as transformers, pytorch, ipex_llm
139139
check_transformers
140140
check_torch
141-
check_bigdl
141+
check_ipex_llm
142142
check_ipex
143143

144144
# verify hardware (how many gpu availables, gpu status, cpu info, memory info, etc.)

python/llm/scripts/bigdl-llm-init renamed to python/llm/scripts/ipex-llm-init

+9-9
Original file line numberDiff line numberDiff line change
@@ -1,9 +1,9 @@
11
#!/bin/bash
22

33
## Usage #############################
4-
# source bigdl-llm-init
4+
# source ipex-llm-init
55
# Example:
6-
# source bigdl-llm-init
6+
# source ipex-llm-init
77
######################################
88

99
function enable_iomp {
@@ -60,10 +60,10 @@ function display-var {
6060
}
6161

6262
function display-help {
63-
echo "Usage: source bigdl-llm-init [-o] [--option]"
63+
echo "Usage: source ipex-llm-init [-o] [--option]"
6464
echo ""
65-
echo "bigdl-llm-init is a tool to automatically configure and run the subcommand under"
66-
echo "environment variables for accelerating BigDL-LLM."
65+
echo "ipex-llm-init is a tool to automatically configure and run the subcommand under"
66+
echo "environment variables for accelerating IPEX-LLM."
6767
echo ""
6868
echo "Optional options:"
6969
echo " -h, --help Display this help message and exit."
@@ -157,25 +157,25 @@ done
157157

158158
shift $((OPTIND -1))
159159

160-
# Find bigdl-llm-init dir
160+
# Find ipex-llm-init dir
161161
if [ ! -z $BASH_SOURCE ]; then
162162
# using bash
163163
if [ "$BASH_SOURCE" = "$0" ]; then
164-
echo "Error: Incorrect usage: bigdl-llm-init must be sourced."
164+
echo "Error: Incorrect usage: ipex-llm-init must be sourced."
165165
exit 1
166166
fi
167167
BIN_DIR="$(dirname $BASH_SOURCE)"
168168
else
169169
# using zsh
170170
if [ "$zsh_eval_context" = "toplevel" ]; then
171-
echo "Error: Incorrect usage: bigdl-llm-init must be sourced."
171+
echo "Error: Incorrect usage: ipex-llm-init must be sourced."
172172
exit 1
173173
fi
174174
BIN_DIR="$(dirname ${(%):-%N})"
175175
fi
176176

177177
LIB_DIR=$(dirname ${BIN_DIR})/lib
178-
LLM_DIR=$(dirname $(python3 -c "import bigdl; print(bigdl.__file__)"))/llm
178+
LLM_DIR=$(dirname $(python3 -c "import ipex_llm; print(ipex_llm.__file__)"))
179179

180180
if [ "${ENABLE_IOMP}" -eq 1 ]; then
181181
file="${LIB_DIR}/libiomp5.so"

python/llm/setup.py

+9-9
Original file line numberDiff line numberDiff line change
@@ -39,16 +39,16 @@
3939
from setuptools import setup
4040

4141
long_description = '''
42-
BigDL LLM
42+
IPEX LLM
4343
'''
4444

4545
exclude_patterns = ["*__pycache__*", "*ipynb_checkpoints*"]
46-
BIGDL_PYTHON_HOME = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
47-
VERSION = open(os.path.join(BIGDL_PYTHON_HOME,
48-
'version.txt'), 'r').read().strip()
46+
IPEX_LLM_PYTHON_HOME = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
47+
VERSION = open(os.path.join(IPEX_LLM_PYTHON_HOME,
48+
'./llm/version.txt'), 'r').read().strip()
4949
llm_home = os.path.join(os.path.dirname(os.path.abspath(__file__)), "src")
5050
github_artifact_dir = os.path.join(llm_home, '../llm-binary')
51-
libs_dir = os.path.join(llm_home, "bigdl", "llm", "libs")
51+
libs_dir = os.path.join(llm_home, "ipex_llm", "libs")
5252
CONVERT_DEP = ['numpy == 1.26.4', # lastet 2.0.0b1 will cause error
5353
'torch',
5454
'transformers == 4.31.0', 'sentencepiece', 'tokenizers == 0.13.3',
@@ -145,7 +145,7 @@
145145

146146
def get_llm_packages():
147147
llm_packages = []
148-
for dirpath, _, _ in os.walk(os.path.join(llm_home, "bigdl")):
148+
for dirpath, _, _ in os.walk(os.path.join(llm_home, "ipex_llm")):
149149
print(dirpath)
150150
package = dirpath.split(llm_home + os.sep)[1].replace(os.sep, '.')
151151
if any(fnmatch.fnmatchcase(package, pat=pattern)
@@ -299,7 +299,7 @@ def setup_package():
299299

300300

301301
metadata = dict(
302-
name='bigdl-llm',
302+
name='ipex_llm',
303303
version=VERSION,
304304
description='Large Language Model Develop Toolkit',
305305
long_description=long_description,
@@ -330,8 +330,8 @@ def setup_package():
330330
'Programming Language :: Python :: 3.9',
331331
'Programming Language :: Python :: Implementation :: CPython'],
332332
scripts={
333-
'Linux': ['src/bigdl/llm/cli/llm-cli', 'src/bigdl/llm/cli/llm-chat', 'scripts/bigdl-llm-init'],
334-
'Windows': ['src/bigdl/llm/cli/llm-cli.ps1', 'src/bigdl/llm/cli/llm-chat.ps1'],
333+
'Linux': ['src/ipex_llm/cli/llm-cli', 'src/ipex_llm/cli/llm-chat', 'scripts/ipex-llm-init'],
334+
'Windows': ['src/ipex_llm/cli/llm-cli.ps1', 'src/ipex_llm/cli/llm-chat.ps1'],
335335
}[platform_name],
336336
platforms=['windows']
337337
)

python/llm/src/ipex_llm/__init__.py

+11
Original file line numberDiff line numberDiff line change
@@ -23,10 +23,21 @@
2323
from .optimize import optimize_model
2424
import os
2525
from .llm_patching import llm_patch, llm_unpatch
26+
import sys
27+
import types
2628

2729
# Default is false, set to true to auto importing Intel Extension for PyTorch.
2830
BIGDL_IMPORT_IPEX = os.getenv("BIGDL_IMPORT_IPEX", 'True').lower() in ('true', '1', 't')
2931
if BIGDL_IMPORT_IPEX:
3032
# Import Intel Extension for PyTorch as ipex if XPU version is installed
3133
from .utils.ipex_importer import ipex_importer
3234
ipex_importer.import_ipex()
35+
36+
# Default is true, set to true to auto patching bigdl-llm to ipex_llm.
37+
BIGDL_COMPATIBLE_MODE = os.getenv("BIGDL_COMPATIBLE_MODE", 'True').lower() in ('true', '1', 't')
38+
if BIGDL_COMPATIBLE_MODE:
39+
# Make users' application with previous bigdl-llm could run easily through this patch
40+
# Avoid ModuleNotFoundError of 'bigdl', map 'bigdl' to a dummy module
41+
sys.modules['bigdl'] = types.ModuleType('_ipex_llm_dummy')
42+
# Map 'bigdl.llm' to 'ipex_llm'
43+
sys.modules['bigdl.llm'] = sys.modules['ipex_llm']

python/llm/version.txt

+1
Original file line numberDiff line numberDiff line change
@@ -0,0 +1 @@
1+
2.1.0.dev0

0 commit comments

Comments
 (0)