-
Notifications
You must be signed in to change notification settings - Fork 1
/
Copy pathrun-mctaco-llama.sh
56 lines (49 loc) · 1.34 KB
/
run-mctaco-llama.sh
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
#!/bin/bash
conda activate llama-hf
model_path="/PATH/TO/YOUR/LLAMA/MODEL"
# model_name="llama-7b"
# model_name="llama-13b"
# model_name="llama-33b"
# model_name="llama-65b"
# model_name="llama-7b-alpaca"
# model_name="Llama-2-7b-hf"
# model_name="Llama-2-13b-hf"
# model_name="Llama-2-70b-hf"
# model_name="Llama-2-7b-chat-hf"
# model_name="Llama-2-13b-chat-hf"
model_name="Llama-2-70b-chat-hf"
prompt_style="qa"
# # # ### run mctaco
# ## Zero-shot experiment
for prompt in 1 2 3
do
python3 mctaco-llama.py \
--model_name $model_name \
--model_path $model_path \
--output_path "llama-output/mctaco/mctaco-"$prompt_style"-zs-pt${prompt}-output" \
--temperature 0 \
--top_p 1.0 \
--max_events_length 3600 \
--max_new_decoding_tokens 0 \
--mctaco_eval \
--max_batch_size 1 \
--prompt_style $prompt_style \
--prompt_template ${prompt}
done
# ## Few-shot ICL experiment
for prompt in 1 2 3
do
python3 mctaco-llama.py \
--model_name $model_name \
--model_path $model_path \
--output_path "llama-output/mctaco/mctaco-"$prompt_style"-fs-pt${prompt}-output" \
--temperature 0 \
--top_p 1.0 \
--max_events_length 3600 \
--max_new_decoding_tokens 0 \
--mctaco_eval \
--do_in_context_learning \
--max_batch_size 1 \
--prompt_style $prompt_style \
--prompt_template ${prompt}
done