-
Notifications
You must be signed in to change notification settings - Fork 0
/
get_animal_video_to_content_Hippopotamus_pigeon_sandmouse_Wyvern.sh
93 lines (52 loc) · 2.79 KB
/
get_animal_video_to_content_Hippopotamus_pigeon_sandmouse_Wyvern.sh
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
#!/bin/bash
# 设置主目录路径
#main_directory="/code/Video-LLaMA-main/data/zoo_300k/zoo_300k/zoo_300k"
main_directory="/code/Video-LLaMA-main/data/zoo_300k/zoo_300k/zoo_300k"
python chat_videoV2.py --cfg-path "eval_configs/video_llama_eval_only_vl.yaml" --model_type "llama_v2" --gpu-id 0 --video_root "${main_directory}/Hippopotamus" --animal_name "Hippopotamus"
# 可以在这里添加延迟,确保内存清理充分
sleep 6h
python chat_videoV2.py --cfg-path "eval_configs/video_llama_eval_only_vl.yaml" --model_type "llama_v2" --gpu-id 0 --video_root "${main_directory}/HorseALL" --animal_name "HorseALL"
wait
# 监控GPU内存使用
echo "Checking GPU memory usage..."
while true; do
memory_used=$(nvidia-smi --query-gpu=memory.used --format=csv,noheader,nounits)
if [ "$memory_used" -lt 1000 ]; then # 设置合适的内存使用阈值
break
fi
echo "Waiting for GPU memory to clear..."
sleep 60 # 等待一段时间后再检查
done
# 可以在这里添加延迟,确保内存清理充分
sleep 30
python chat_videoV2.py --cfg-path "eval_configs/video_llama_eval_only_vl.yaml" --model_type "llama_v2" --gpu-id 0 --video_root "${main_directory}/Hound" --animal_name "Hound"
wait
# 监控GPU内存使用
echo "Checking GPU memory usage..."
while true; do
memory_used=$(nvidia-smi --query-gpu=memory.used --format=csv,noheader,nounits)
if [ "$memory_used" -lt 1000 ]; then # 设置合适的内存使用阈值
break
fi
echo "Waiting for GPU memory to clear..."
sleep 60 # 等待一段时间后再检查
done
# 可以在这里添加延迟,确保内存清理充分
sleep 30
# python chat_videoV2.py --cfg-path "eval_configs/video_llama_eval_only_vl.yaml" --model_type "llama_v2" --gpu-id 0 --video_root "${main_directory}/Isopetra" --animal_name "Isopetra"
#python chat_videoV2.py --cfg-path "eval_configs/video_llama_eval_only_vl.yaml" --model_type "llama_v2" --gpu-id 0 --video_root "${main_directory}/Lynx" --animal_name "Lynx"
python chat_videoV2.py --cfg-path "eval_configs/video_llama_eval_only_vl.yaml" --model_type "llama_v2" --gpu-id 0 --video_root "${main_directory}/flamingo" --animal_name "flamingo"
wait
# 监控GPU内存使用
echo "Checking GPU memory usage..."
while true; do
memory_used=$(nvidia-smi --query-gpu=memory.used --format=csv,noheader,nounits)
if [ "$memory_used" -lt 1000 ]; then # 设置合适的内存使用阈值
break
fi
echo "Waiting for GPU memory to clear..."
sleep 60 # 等待一段时间后再检查
done
# 可以在这里添加延迟,确保内存清理充分
sleep 30
python chat_videoV2.py --cfg-path "eval_configs/video_llama_eval_only_vl.yaml" --model_type "llama_v2" --gpu-id 0 --video_root "${main_directory}/leopard" --animal_name "leopard"