This repository has been archived by the owner on Dec 22, 2022. It is now read-only.
-
Notifications
You must be signed in to change notification settings - Fork 3
/
sub_vasp_version_slurm
executable file
·171 lines (153 loc) · 6.71 KB
/
sub_vasp_version_slurm
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
#!/bin/bash
NAME=vasp
upper_NAME=`echo ${NAME} | tr '[a-z]' '[A-Z]'`
EXE_NAME=vasp
VERSION=5.3.2
VERSIONSUFFIX=
MPI_CMD=srun
MPI_ARGS="--mpi=pmi2"
SUBMIT_JOB=1
TOOLCHAIN=ictcet-4.5.0
EXTRA_MODULE_NAME1=use.restricted
EXTRA_MODULE_NAME2=
MODULE_NAME=${upper_NAME}/${VERSION}-${TOOLCHAIN}${VERSIONSUFFIX}
source /hpc-common/software/sub_scripts/common_sub_fns.sh
#source /home/garvct/my_sw_development/pbs_job_submit/common_sub_fns.sh
function usage ()
{
echo
echo "sub_vasp_${VERSION}"
echo "usage: sub_vasp_${VERSION} [<-n mpiprocs>] [<-w HH:MM:SS>] <-P project> [<-q queue>] [<-N mpipernode>] [<-j jobname>] [<-W depends_on_jobid>] [<-m memory_per_process>] [<-F job_array_filename>] [<-J job_array_range>] [<-s scratch,ramdisk,localdisk>] [<-t cputype>] [<-D>] [<-l>] [<-c>] [<-g>] [<-h>]"
echo "Submits a VASP ${VERSION} job to the Fission cluster via SLURM."
echo " Where:"
echo " -n mpiprocs : Selects the number of mpi parallel processes to use.( Default: 1)"
echo " -w HH:MM:SS : Selects the Job Walltime.( Default: 24:00:00)"
echo " -P project : Selects the project this job is associated with.( Default: none)"
echo " -q queue : Selects the queue to use.( Default: general)"
echo " -N mpipernode : Selects the maximum number of mpi processes to use in each node.( Default: ${MPI_PER_NODE})"
echo " -j jobname : Selects the name of Job.( Default: vasp)"
echo " -W depend_on_jobid : This job runs after jobid is complete. (Default: none)"
echo " -m memory_per_process : Selects the Memory per process.( Default: SLURM default)"
echo " -F job_array_filename : Selects the job array Filename. Each line of the file contains"
echo " the full path to the directory containing the input files. (Default: None)"
echo " -J job_array_range : Selects the job array range (e.g 1-10)( Default: None)"
echo " -s scratch,ramdisk,localdisk : Job runs in scratch(panasas parallel filesystem),"
echo " ramdisk(memory where available) or localdisk (locally attached disk where available). (Default: Run from current location)"
echo " -t cpu_type : Applicable on falcon cluster only, requests the cpu type (\"haswell\" requests haswell processors, \"broadwell\" requests broadwell processors (default "haswell")"
echo " -D : Selects the debug option (performance basic system checks, tracks memory usage, process placement etc)( Default: off)"
echo " -l : Selects special MD vasp executable (LVDW=.true.).( Default: vasp)"
echo " -c : Selects Non-collinear vasp executable.( Default: vasp)"
echo " -g : Generate Job script only, but do not submit (Default: Generate and submit)"
echo " -h : Selects help.( Default: none)"
echo
echo " To add extra SLURM options (e.g --mail-type=BEGIN,END --mail-user=garvct), use the environmental variable SLURM_EXTRA_ARGS"
echo 'e.g. export SLURM_EXTRA_ARGS="--mail-type=BEGIN,END --mail-user=garvct"'
}
while getopts "w:P:n:q:N:j:W:m:F:J:s:t:DlcghX" opt; do
case $opt in
w)
WALLTIME=$OPTARG
;;
P)
PROJECT=$OPTARG
;;
n)
MPIPROCS=$OPTARG
;;
q)
QUEUE=$OPTARG
;;
N)
MPI_PER_NODE=$OPTARG
MPI_PER_NODE_SET=$OPTARG
;;
j)
JOBNAME=$OPTARG
;;
W)
DEPENDS_ON_JOBID=$OPTARG
;;
m)
MEM_PER_PROCESS=$OPTARG
;;
J)
JOB_ARRAY_RANGE=$OPTARG
;;
F)
JOB_ARRAY_FILENAME=$OPTARG
;;
s)
SCRATCH_TYPE=$OPTARG
;;
t)
CPU_TYPE=$OPTARG
;;
D)
DEBUG_JOB=1
;;
l)
EXE_NAME=vasp_tbdyn
;;
c)
EXE_NAME=vasp_no_ngzhalf
;;
g)
SUBMIT_JOB=0
;;
h)
HELP=1
usage
exit 0
;;
X)
exit 0
;;
\?)
echo "Invalid option: -$OPTARG" >&2
usage
exit
;;
esac
done
slurm_check_command_args
set_default_jobname
#calc_chunk_new
if [ -n "$SCRATCH_TYPE" ]
then
get_scratch_dir $SCRATCH_TYPE
fi
echo "#!/bin/bash" > $JOBNAME.sbatch
slurm_tasks $JOBNAME.sbatch
slurm_set_defaults $JOBNAME.sbatch
slurm_depends_on $JOBNAME.sbatch
slurm_jobarray $JOBNAME.sbatch
echo "#" >> $JOBNAME.sbatch
echo "echo \$SLURM_JOB_NODELIST" >> $JOBNAME.sbatch
echo "#" >> $JOBNAME.sbatch
load_modules $JOBNAME.sbatch
echo "#" >> $JOBNAME.sbatch
#echo "export I_MPI_PMI_LIBRARY=/usr/lib64/libpmi.so" >> $JOBNAME.sbatch
echo "export OMP_NUM_THREADS=1" >> $JOBNAME.sbatch
echo "#" >> $JOBNAME.sbatch
echo "echo \"The following SLURM extra args (SLURM_EXTRA_ARGS) will be used: $SLURM_EXTRA_ARGS\"" >> $JOBNAME.sbatch
echo "echo \"The following VASP extra args (VASP_EXTRA_ARGS) will be used: $VASP_EXTRA_ARGS\"" >> $JOBNAME.sbatch
echo "#" >> $JOBNAME.sbatch
slurm_create_nodefile $JOBNAME.sbatch
slurm_debug_job $JOBNAME.sbatch
echo "#" >> $JOBNAME.sbatch
slurm_setup_run_in_scratch $JOBNAME.sbatch
echo "#" >> $JOBNAME.sbatch
slurm_setup_jobarray $JOBNAME.sbatch
echo "#" >> $JOBNAME.sbatch
if [ -z "$DEBUG_JOB" ]
then
echo "${MPI_CMD} ${MPI_ARGS} ${EXE_NAME} ${EXE_ARGS} ${VASP_EXTRA_ARGS}" >> $JOBNAME.sbatch
else
echo "job_tracker.py \"${MPI_CMD} ${MPI_ARGS} ${EXE_NAME} ${EXE_ARGS} ${VASP_EXTRA_ARGS}\"" >> $JOBNAME.sbatch
fi
echo "#" >> $JOBNAME.sbatch
slurm_finish_jobarray_and_scratch $JOBNAME.sbatch
if [ $SUBMIT_JOB -eq 1 ]
then
sbatch $SLURM_EXTRA_ARGS $JOBNAME.sbatch
fi