Skip to content

Commit

Permalink
Enable gpu support in bench (#581)
Browse files Browse the repository at this point in the history
  • Loading branch information
cjpais authored Oct 11, 2024
1 parent a76bf80 commit 726f6e8
Showing 1 changed file with 2 additions and 2 deletions.
4 changes: 2 additions & 2 deletions llama.cpp/llama-bench/llama-bench.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -450,6 +450,7 @@ static cmd_params parse_cmd_params(int argc, char ** argv) {
invalid_param = true;
break;
}
FLAG_gpu = LLAMAFILE_GPU_AUTO;
auto p = split<int>(argv[i], split_delim);
params.n_gpu_layers.insert(params.n_gpu_layers.end(), p.begin(), p.end());
} else if (arg == "-sm" || arg == "--split-mode") {
Expand Down Expand Up @@ -1357,8 +1358,6 @@ __attribute__((__constructor__(101))) static void init(void) {
}

int main(int argc, char ** argv) {
FLAG_gpu = LLAMAFILE_GPU_DISABLE; // [jart]

ShowCrashReports();

// try to set locale for unicode characters in markdown
Expand All @@ -1382,6 +1381,7 @@ int main(int argc, char ** argv) {
#endif

cmd_params params = parse_cmd_params(argc, argv);
FLAGS_READY = true;

// initialize llama.cpp
if (!params.verbose) {
Expand Down

0 comments on commit 726f6e8

Please sign in to comment.