Skip to content

Commit 86e4ab1

Browse files
committed
change ref everywhere
1 parent a87e1cf commit 86e4ab1

File tree

11 files changed

+44
-44
lines changed

11 files changed

+44
-44
lines changed

Makefile

+19-19
Original file line numberDiff line numberDiff line change
@@ -1394,36 +1394,36 @@ llama-gen-docs: examples/gen-docs/gen-docs.cpp \
13941394
$(CXX) $(CXXFLAGS) -c $< -o $(call GET_OBJ_FILE, $<)
13951395
$(CXX) $(CXXFLAGS) $(filter-out %.h $<,$^) $(call GET_OBJ_FILE, $<) -o $@ $(LDFLAGS)
13961396

1397-
libllava.a: tools/llava/llava.cpp \
1398-
tools/llava/llava.h \
1399-
tools/llava/clip.cpp \
1400-
tools/llava/clip.h \
1397+
libllava.a: tools/mtmd/llava.cpp \
1398+
tools/mtmd/llava.h \
1399+
tools/mtmd/clip.cpp \
1400+
tools/mtmd/clip.h \
14011401
common/stb_image.h \
14021402
common/base64.hpp \
14031403
$(OBJ_ALL)
14041404
$(CXX) $(CXXFLAGS) -static -fPIC -c $< -o $@ -Wno-cast-qual
14051405

1406-
llama-llava-cli: tools/llava/llava-cli.cpp \
1407-
tools/llava/llava.cpp \
1408-
tools/llava/llava.h \
1409-
tools/llava/clip.cpp \
1410-
tools/llava/clip.h \
1406+
llama-llava-cli: tools/mtmd/llava-cli.cpp \
1407+
tools/mtmd/llava.cpp \
1408+
tools/mtmd/llava.h \
1409+
tools/mtmd/clip.cpp \
1410+
tools/mtmd/clip.h \
14111411
$(OBJ_ALL)
14121412
$(CXX) $(CXXFLAGS) $< $(filter-out %.h $<,$^) -o $@ $(LDFLAGS) -Wno-cast-qual
14131413

1414-
llama-minicpmv-cli: tools/llava/minicpmv-cli.cpp \
1415-
tools/llava/llava.cpp \
1416-
tools/llava/llava.h \
1417-
tools/llava/clip.cpp \
1418-
tools/llava/clip.h \
1414+
llama-minicpmv-cli: tools/mtmd/minicpmv-cli.cpp \
1415+
tools/mtmd/llava.cpp \
1416+
tools/mtmd/llava.h \
1417+
tools/mtmd/clip.cpp \
1418+
tools/mtmd/clip.h \
14191419
$(OBJ_ALL)
14201420
$(CXX) $(CXXFLAGS) $< $(filter-out %.h $<,$^) -o $@ $(LDFLAGS) -Wno-cast-qual
14211421

1422-
llama-qwen2vl-cli: tools/llava/qwen2vl-cli.cpp \
1423-
tools/llava/llava.cpp \
1424-
tools/llava/llava.h \
1425-
tools/llava/clip.cpp \
1426-
tools/llava/clip.h \
1422+
llama-qwen2vl-cli: tools/mtmd/qwen2vl-cli.cpp \
1423+
tools/mtmd/llava.cpp \
1424+
tools/mtmd/llava.h \
1425+
tools/mtmd/clip.cpp \
1426+
tools/mtmd/clip.h \
14271427
$(OBJ_ALL)
14281428
$(CXX) $(CXXFLAGS) $< $(filter-out %.h $<,$^) -o $@ $(LDFLAGS) -Wno-cast-qual
14291429

common/arg.cpp

+2-2
Original file line numberDiff line numberDiff line change
@@ -2211,14 +2211,14 @@ common_params_context common_params_parser_init(common_params & params, llama_ex
22112211
).set_examples({LLAMA_EXAMPLE_SERVER}).set_env("LLAMA_ARG_NO_CONT_BATCHING"));
22122212
add_opt(common_arg(
22132213
{"--mmproj"}, "FILE",
2214-
"path to a multimodal projector file. see tools/llava/README.md",
2214+
"path to a multimodal projector file. see tools/mtmd/README.md",
22152215
[](common_params & params, const std::string & value) {
22162216
params.mmproj.path = value;
22172217
}
22182218
).set_examples(mmproj_examples));
22192219
add_opt(common_arg(
22202220
{"--mmproj-url"}, "URL",
2221-
"URL to a multimodal projector file. see tools/llava/README.md",
2221+
"URL to a multimodal projector file. see tools/mtmd/README.md",
22222222
[](common_params & params, const std::string & value) {
22232223
params.mmproj.url = value;
22242224
}

common/common.h

+1-1
Original file line numberDiff line numberDiff line change
@@ -340,7 +340,7 @@ struct common_params {
340340

341341
common_conversation_mode conversation_mode = COMMON_CONVERSATION_MODE_AUTO;
342342

343-
// multimodal models (see tools/llava)
343+
// multimodal models (see tools/mtmd)
344344
struct common_params_model mmproj;
345345
bool mmproj_use_gpu = true; // use GPU for multimodal model
346346
bool no_mmproj = false; // explicitly disable multimodal model

docs/multimodal/MobileVLM.md

+6-6
Original file line numberDiff line numberDiff line change
@@ -33,21 +33,21 @@ git clone https://huggingface.co/openai/clip-vit-large-patch14-336
3333
2. Use `llava_surgery.py` to split the LLaVA model to LLaMA and multimodel projector constituents:
3434

3535
```sh
36-
python ./tools/llava/llava_surgery.py -m path/to/MobileVLM-1.7B
36+
python ./tools/mtmd/llava_surgery.py -m path/to/MobileVLM-1.7B
3737
```
3838

3939
3. Use `convert_image_encoder_to_gguf.py` with `--projector-type ldp` (for **V2** please use `--projector-type ldpv2`) to convert the LLaVA image encoder to GGUF:
4040

4141
```sh
42-
python ./tools/llava/convert_image_encoder_to_gguf.py \
42+
python ./tools/mtmd/convert_image_encoder_to_gguf.py \
4343
-m path/to/clip-vit-large-patch14-336 \
4444
--llava-projector path/to/MobileVLM-1.7B/llava.projector \
4545
--output-dir path/to/MobileVLM-1.7B \
4646
--projector-type ldp
4747
```
4848

4949
```sh
50-
python ./tools/llava/convert_image_encoder_to_gguf.py \
50+
python ./tools/mtmd/convert_image_encoder_to_gguf.py \
5151
-m path/to/clip-vit-large-patch14-336 \
5252
--llava-projector path/to/MobileVLM-1.7B_V2/llava.projector \
5353
--output-dir path/to/MobileVLM-1.7B_V2 \
@@ -69,10 +69,10 @@ Now both the LLaMA part and the image encoder is in the `MobileVLM-1.7B` directo
6969

7070
## Android compile and run
7171
### compile
72-
refer to `tools/llava/android/build_64.sh`
72+
refer to `tools/mtmd/android/build_64.sh`
7373
```sh
74-
mkdir tools/llava/android/build_64
75-
cd tools/llava/android/build_64
74+
mkdir tools/mtmd/android/build_64
75+
cd tools/mtmd/android/build_64
7676
../build_64.sh
7777
```
7878
### run on Android

docs/multimodal/glmedge.md

+2-2
Original file line numberDiff line numberDiff line change
@@ -25,13 +25,13 @@ git clone https://huggingface.co/THUDM/glm-edge-v-5b or https://huggingface.co/T
2525
2. Use `glmedge-surgery.py` to split the GLMV-EDGE model to LLM and multimodel projector constituents:
2626

2727
```sh
28-
python ./tools/llava/glmedge-surgery.py -m ../model_path
28+
python ./tools/mtmd/glmedge-surgery.py -m ../model_path
2929
```
3030

3131
4. Use `glmedge-convert-image-encoder-to-gguf.py` to convert the GLMV-EDGE image encoder to GGUF:
3232

3333
```sh
34-
python ./tools/llava/glmedge-convert-image-encoder-to-gguf.py -m ../model_path --llava-projector ../model_path/glm.projector --output-dir ../model_path
34+
python ./tools/mtmd/glmedge-convert-image-encoder-to-gguf.py -m ../model_path --llava-projector ../model_path/glm.projector --output-dir ../model_path
3535
```
3636

3737
5. Use `examples/convert_hf_to_gguf.py` to convert the LLM part of GLMV-EDGE to GGUF:

docs/multimodal/llava.md

+6-6
Original file line numberDiff line numberDiff line change
@@ -37,19 +37,19 @@ git clone https://huggingface.co/openai/clip-vit-large-patch14-336
3737
2. Install the required Python packages:
3838

3939
```sh
40-
pip install -r tools/llava/requirements.txt
40+
pip install -r tools/mtmd/requirements.txt
4141
```
4242

4343
3. Use `llava_surgery.py` to split the LLaVA model to LLaMA and multimodel projector constituents:
4444

4545
```sh
46-
python ./tools/llava/llava_surgery.py -m ../llava-v1.5-7b
46+
python ./tools/mtmd/llava_surgery.py -m ../llava-v1.5-7b
4747
```
4848

4949
4. Use `convert_image_encoder_to_gguf.py` to convert the LLaVA image encoder to GGUF:
5050

5151
```sh
52-
python ./tools/llava/convert_image_encoder_to_gguf.py -m ../clip-vit-large-patch14-336 --llava-projector ../llava-v1.5-7b/llava.projector --output-dir ../llava-v1.5-7b
52+
python ./tools/mtmd/convert_image_encoder_to_gguf.py -m ../clip-vit-large-patch14-336 --llava-projector ../llava-v1.5-7b/llava.projector --output-dir ../llava-v1.5-7b
5353
```
5454

5555
5. Use `examples/convert_legacy_llama.py` to convert the LLaMA part of LLaVA to GGUF:
@@ -69,12 +69,12 @@ git clone https://huggingface.co/liuhaotian/llava-v1.6-vicuna-7b
6969
2) Install the required Python packages:
7070

7171
```sh
72-
pip install -r tools/llava/requirements.txt
72+
pip install -r tools/mtmd/requirements.txt
7373
```
7474

7575
3) Use `llava_surgery_v2.py` which also supports llava-1.5 variants pytorch as well as safetensor models:
7676
```console
77-
python tools/llava/llava_surgery_v2.py -C -m ../llava-v1.6-vicuna-7b/
77+
python tools/mtmd/llava_surgery_v2.py -C -m ../llava-v1.6-vicuna-7b/
7878
```
7979
- you will find a llava.projector and a llava.clip file in your model directory
8080

@@ -88,7 +88,7 @@ curl -s -q https://huggingface.co/cmp-nct/llava-1.6-gguf/raw/main/config_vit.jso
8888

8989
5) Create the visual gguf model:
9090
```console
91-
python ./tools/llava/convert_image_encoder_to_gguf.py -m vit --llava-projector vit/llava.projector --output-dir vit --clip-model-is-vision
91+
python ./tools/mtmd/convert_image_encoder_to_gguf.py -m vit --llava-projector vit/llava.projector --output-dir vit --clip-model-is-vision
9292
```
9393
- This is similar to llava-1.5, the difference is that we tell the encoder that we are working with the pure vision model part of CLIP
9494

docs/multimodal/minicpmo2.6.md

+2-2
Original file line numberDiff line numberDiff line change
@@ -29,8 +29,8 @@ cmake --build build --config Release
2929
Convert PyTorch model to gguf files (You can also download the converted [gguf](https://huggingface.co/openbmb/MiniCPM-o-2_6-gguf) by us)
3030

3131
```bash
32-
python ./tools/llava/minicpmv-surgery.py -m ../MiniCPM-o-2_6
33-
python ./tools/llava/minicpmv-convert-image-encoder-to-gguf.py -m ../MiniCPM-o-2_6 --minicpmv-projector ../MiniCPM-o-2_6/minicpmv.projector --output-dir ../MiniCPM-o-2_6/ --image-mean 0.5 0.5 0.5 --image-std 0.5 0.5 0.5 --minicpmv_version 4
32+
python ./tools/mtmd/minicpmv-surgery.py -m ../MiniCPM-o-2_6
33+
python ./tools/mtmd/minicpmv-convert-image-encoder-to-gguf.py -m ../MiniCPM-o-2_6 --minicpmv-projector ../MiniCPM-o-2_6/minicpmv.projector --output-dir ../MiniCPM-o-2_6/ --image-mean 0.5 0.5 0.5 --image-std 0.5 0.5 0.5 --minicpmv_version 4
3434
python ./convert_hf_to_gguf.py ../MiniCPM-o-2_6/model
3535

3636
# quantize int4 version

docs/multimodal/minicpmv2.5.md

+2-2
Original file line numberDiff line numberDiff line change
@@ -28,8 +28,8 @@ cmake --build build --config Release
2828
Convert PyTorch model to gguf files (You can also download the converted [gguf](https://huggingface.co/openbmb/MiniCPM-Llama3-V-2_5-gguf) by us)
2929

3030
```bash
31-
python ./tools/llava/minicpmv-surgery.py -m ../MiniCPM-Llama3-V-2_5
32-
python ./tools/llava/minicpmv-convert-image-encoder-to-gguf.py -m ../MiniCPM-Llama3-V-2_5 --minicpmv-projector ../MiniCPM-Llama3-V-2_5/minicpmv.projector --output-dir ../MiniCPM-Llama3-V-2_5/ --image-mean 0.5 0.5 0.5 --image-std 0.5 0.5 0.5 --minicpmv_version 2
31+
python ./tools/mtmd/minicpmv-surgery.py -m ../MiniCPM-Llama3-V-2_5
32+
python ./tools/mtmd/minicpmv-convert-image-encoder-to-gguf.py -m ../MiniCPM-Llama3-V-2_5 --minicpmv-projector ../MiniCPM-Llama3-V-2_5/minicpmv.projector --output-dir ../MiniCPM-Llama3-V-2_5/ --image-mean 0.5 0.5 0.5 --image-std 0.5 0.5 0.5 --minicpmv_version 2
3333
python ./convert_hf_to_gguf.py ../MiniCPM-Llama3-V-2_5/model
3434

3535
# quantize int4 version

docs/multimodal/minicpmv2.6.md

+2-2
Original file line numberDiff line numberDiff line change
@@ -28,8 +28,8 @@ cmake --build build --config Release
2828
Convert PyTorch model to gguf files (You can also download the converted [gguf](https://huggingface.co/openbmb/MiniCPM-V-2_6-gguf) by us)
2929

3030
```bash
31-
python ./tools/llava/minicpmv-surgery.py -m ../MiniCPM-V-2_6
32-
python ./tools/llava/minicpmv-convert-image-encoder-to-gguf.py -m ../MiniCPM-V-2_6 --minicpmv-projector ../MiniCPM-V-2_6/minicpmv.projector --output-dir ../MiniCPM-V-2_6/ --image-mean 0.5 0.5 0.5 --image-std 0.5 0.5 0.5 --minicpmv_version 3
31+
python ./tools/mtmd/minicpmv-surgery.py -m ../MiniCPM-V-2_6
32+
python ./tools/mtmd/minicpmv-convert-image-encoder-to-gguf.py -m ../MiniCPM-V-2_6 --minicpmv-projector ../MiniCPM-V-2_6/minicpmv.projector --output-dir ../MiniCPM-V-2_6/ --image-mean 0.5 0.5 0.5 --image-std 0.5 0.5 0.5 --minicpmv_version 3
3333
python ./convert_hf_to_gguf.py ../MiniCPM-V-2_6/model
3434

3535
# quantize int4 version

requirements/requirements-all.txt

+1-1
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
1-
-r ../tools/llava/requirements.txt
1+
-r ../tools/mtmd/requirements.txt
22
-r ../tools/server/bench/requirements.txt
33
-r ../tools/server/tests/requirements.txt
44

tools/CMakeLists.txt

+1-1
Original file line numberDiff line numberDiff line change
@@ -27,7 +27,7 @@ else()
2727
add_subdirectory(run)
2828
add_subdirectory(tokenize)
2929
add_subdirectory(tts)
30-
add_subdirectory(llava)
30+
add_subdirectory(mtmd)
3131
if (GGML_RPC)
3232
add_subdirectory(rpc)
3333
endif()

0 commit comments

Comments
 (0)