forked from huggingface/open-r1
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathMakefile
42 lines (34 loc) · 1.49 KB
/
Makefile
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
.PHONY: style quality
# make sure to test the local checkout in scripts and not the pre-installed one (don't use quotes!)
export PYTHONPATH = src
check_dirs := src
style:
black --line-length 119 --target-version py310 $(check_dirs) setup.py
isort $(check_dirs) setup.py
quality:
black --check --line-length 119 --target-version py310 $(check_dirs) setup.py
isort --check-only $(check_dirs) setup.py
flake8 --max-line-length 119 $(check_dirs) setup.py
# Evaluation
evaluate:
$(eval PARALLEL_ARGS := $(if $(PARALLEL),$(shell \
if [ "$(PARALLEL)" = "data" ]; then \
echo "data_parallel_size=$(NUM_GPUS)"; \
elif [ "$(PARALLEL)" = "tensor" ]; then \
echo "tensor_parallel_size=$(NUM_GPUS)"; \
fi \
),))
$(if $(filter tensor,$(PARALLEL)),export VLLM_WORKER_MULTIPROC_METHOD=spawn &&,) \
MODEL_ARGS="pretrained=$(MODEL),dtype=float16,$(PARALLEL_ARGS),max_model_length=32768,gpu_memory_utilisation=0.8" && \
lighteval vllm $$MODEL_ARGS "custom|$(TASK)|0|0" \
--custom-tasks src/open_r1/evaluate.py \
--use-chat-template \
--system-prompt="Please reason step by step, and put your final answer within \boxed{}." \
--output-dir data/evals/$(MODEL)
# Example usage:
# Single GPU:
# make evaluate MODEL=deepseek-ai/DeepSeek-R1-Distill-Qwen-32B TASK=aime24
# Data parallel:
# make evaluate MODEL=deepseek-ai/DeepSeek-R1-Distill-Qwen-32B TASK=aime24 PARALLEL=data NUM_GPUS=8
# Tensor parallel:
# make evaluate MODEL=deepseek-ai/DeepSeek-R1-Distill-Qwen-32B TASK=aime24 PARALLEL=tensor NUM_GPUS=8