Skip to content

Commit e6ddb15

Browse files
committed
cleanup
1 parent 1b71752 commit e6ddb15

File tree

4 files changed

+10
-2
lines changed

4 files changed

+10
-2
lines changed

otherarch/gpt2_v3.cpp

+1
Original file line numberDiff line numberDiff line change
@@ -347,6 +347,7 @@ ModelLoadResult gpt2_model_load(const std::string & fname, gpt2_model & model, g
347347

348348
//gpu offload
349349
#if defined(GGML_USE_CLBLAST)
350+
if(gpulayers>0)
350351
{
351352
const auto & hparams = model.hparams;
352353
size_t vram_total = 0;

otherarch/gptj_v3.cpp

+1
Original file line numberDiff line numberDiff line change
@@ -335,6 +335,7 @@ ModelLoadResult gptj_model_load(const std::string & fname, gptj_model & model, g
335335

336336
//gpu offload
337337
#if defined(GGML_USE_CLBLAST)
338+
if(gpulayers>0)
338339
{
339340
const auto & hparams = model.hparams;
340341
size_t vram_total = 0;

otherarch/mpt_v3.cpp

+4-1
Original file line numberDiff line numberDiff line change
@@ -15,7 +15,9 @@
1515

1616
#include "model_adapter.h"
1717

18-
18+
#if defined(GGML_USE_CLBLAST)
19+
#include "ggml-opencl.h"
20+
#endif
1921

2022
// load the model's weights from a file
2123
bool mpt_model_load(const std::string & fname, mpt_model & model, gpt_vocab & vocab, int gpulayers) {
@@ -280,6 +282,7 @@ bool mpt_model_load(const std::string & fname, mpt_model & model, gpt_vocab & vo
280282

281283
//gpu offload
282284
#if defined(GGML_USE_CLBLAST)
285+
if(gpulayers>0)
283286
{
284287
const auto & hparams = model.hparams;
285288
size_t vram_total = 0;

otherarch/neox_v3.cpp

+4-1
Original file line numberDiff line numberDiff line change
@@ -13,7 +13,9 @@
1313
#include <vector>
1414
#include <iostream>
1515

16-
16+
#if defined(GGML_USE_CLBLAST)
17+
#include "ggml-opencl.h"
18+
#endif
1719

1820
// load the model's weights from a file
1921
ModelLoadResult gpt_neox_model_load(const std::string & fname, gpt_neox_model & model, gpt_vocab & vocab, FileFormat file_format, int gpulayers) {
@@ -320,6 +322,7 @@ ModelLoadResult gpt_neox_model_load(const std::string & fname, gpt_neox_model &
320322

321323
//gpu offload
322324
#if defined(GGML_USE_CLBLAST)
325+
if(gpulayers>0)
323326
{
324327
const auto & hparams = model.hparams;
325328
size_t vram_total = 0;

0 commit comments

Comments
 (0)