Samples of PBS batch command for application
#! /bin/bash
# PBS -l walltime=168:00:00
# PBS -l select=1:ncpus=32:mpiprocs=32:ompthreads=1:mem=70gb
# PBS -q @pbsServer
#
# REF: https://wiki.hpcc.msu.edu/display/~colbrydi@msu.edu/2011/06/08/HFSS+script
#
. /etc/profile.d/modules.sh
export NNODES=` sort $PBS_NODEFILE | uniq | wc -l`
export NPROCS=` wc -l < $PBS_NODEFILE `
cd $PBS_O_WORKDIR
module load hfss
export ANSYSEM_HOST_FILE=$PBS_NODEFILE
# export AnsTempDir=/wrk/$USER/tmp # scratch folder this doesn't work
ansysedt -distributed -machinelist numcores=$NPROCS -auto -monitor -ng -batchsolve -batchoptions " HFSS/HPCLicenseType=pool tempdirectory=/scratch/myscratch" coaxial.aedt | tee pbs.log
#! /bin/bash
# PBS -l walltime=168:00:00
# PBS -l select=1:ncpus=32:mpiprocs=32:ompthreads=1:mem=18gb
# PBS -q @pbsServer
# PBS -N ansysMECH
#
. /etc/profile.d/modules.sh
export NNODES=` sort $PBS_NODEFILE | uniq | wc -l`
export NPROCS=` wc -l < $PBS_NODEFILE `
cd $PBS_O_WORKDIR
mkdir $WORK /$PBS_JOBID
module load ansys
cd $PBS_O_WORKDIR
ansys191 -i YOURINPUT.dat -dir $WORK /$PBS_JOBID -dis -p ansys -np ${NPROCS} -o file.out -s read -l en-us -b -usessh
# Scratch folder locationis given from -dir. Or use TMPDIR variables in the bash
#! /bin/bash
# PBS -l walltime=168:00:00
# PBS -l select=10:ncpus=32:mpiprocs=32:ompthreads=1:mem=100gb
# PBS -q @servername
export NNODES=` sort $PBS_NODEFILE | uniq | wc -l`
export NPROCS=` wc -l < $PBS_NODEFILE `
. /etc/profile.d/modules.sh
cd $PBS_O_WORKDIR
export OMP_NUM_THREADS=1
module load ansys
echo " Running case now"
lfs setstripe -S 1m -c 8 . # Lustre setup
fluent 3ddp -mpi=intel -g -ssh -t${NPROCS} -cnf=${PBS_NODEFILE} -i input.jou -pib -feature_parallel_preferred=anshpc_pack > log.out
# # -feature_parallel_preferred=anshpc for anshpc license. Choose pack for very large scale run
#! /bin/bash
# PBS -l select=1:ncpus=32:mpiprocs=32:ompthreads=1:mem=150gb
# PBS -l walltime=168:00:00
# PBS -q @server
# PBS -N ABQ
cd $PBS_O_WORKDIR
export NNODES=` sort $PBS_NODEFILE | uniq | wc -l`
export NPROCS=` wc -l < $PBS_NODEFILE `
. /etc/profile.d/modules.sh
module load abaqus
abaqus job=__inp_file_name__ cpus=$NPROCS inter scratch=$WORK /tmp
#! /bin/bash
# PBS -N Q1
# PBS -l walltime=168:00:00
# PBS -l select=1:ncpus=32:mpiprocs=8:ompthreads=4:mem=150gb
# PBS -q @serverName
export NNODES=` sort $PBS_NODEFILE | uniq | wc -l`
export NPROCS=` wc -l < $PBS_NODEFILE `
. /etc/profile.d/modules.sh
cd $PBS_O_WORKDIR
module load comsol
comsol batch -nn $NPROCS -f $PBS_NODEFILE -np $OMP_NUM_THREADS -tmpdir $WORK /tmp -study std4 -inputfile ${JNAME} .mph -outputfile output.mph -batchlog log.txt
#! /bin/bash
# PBS -l select=1:ncpus=16:mpiprocs=16:ompthreads=1
# PBS -l walltime=2:00:00
# PBS -N rsoft_fullwave
# PBS -q @servername
# Ref: https://opus.nci.org.au/display/Help/FullWAVE
# May need to edit ~/.xbcad_ini
#
cd $PBS_O_WORKDIR
#
export NNODES=` sort $PBS_NODEFILE | uniq | wc -l`
export NPROCS=` wc -l < $PBS_NODEFILE `
#
module load rsoft
rslmd -start
export P4_RSHCOMMAND=rshlocal
fwmpirun -np $NPROCS wg.ind
rslmd -stop
Torque/moab script for running a single node job over the entire nodes
A bash script submitting a job over compute001-compute300 nodes
for i in {001...300}
do
echo -n " $1 "
sed " s/_hostname_/compute${i} /" single.template > $1 .pbs
msub $1 .bps
done
A sample template job script
#! /bin/bash
# MSUB -l nodes=_hostname_:ppn=32
# MSUB -l walltime=00:15:00
# MSUB -N test_single_node.$HOSTNAME
cd $PBS_O_WORKDIR
cat $PBS_NODEFILE
NCPU=$( wc -l $PBS_NODEFILE | awk ' {print $1}' )
export MODULEPATH+=:/opt/modulefiles
module load gcc/9.3 mvapich/2.3
export Mexe=/opt/myexecutable
mpirun -np $NCPU -hostfile $PBS_NODEFILE bash -c " ulimit -s unlimited && $Mexe " | & tee log.${NCPU} .${HOSTNAME}