Loading...

Useful Scripts at HPC




filename: subserial.sh – for single CPU low I/O

Submission script for single CPU job

Number of CPU threads: 1

Ideal for CPU-only serial codes


#!/bin/sh
# specifies number of nodes
#SBATCH -N 1
#specifies cores per node
#SBATCH --ntasks-per-node=1
# specifies maximum duration of run
#SBATCH --time=1:00:00
# specifies job name
#SBATCH --job-name=JOBNAME
# specifies error file name
#SBATCH --error=job.%J.err_node
# specifies output file name
#SBATCH --output=job.%J.out_node_48
# specifies queue name
#SBATCH --partition=serial
# To run job in the directory from where it is submitted
cd $SLURM_SUBMIT_DIR
# include your desired executable name
time ./a.out >res.dat








filename: subcpu.sh – for parallel/CPU low I/O

Submission script for CPU parallel jobs

Number of CPU threads: 12

Ideal for MPI/OpenMP/serial CPU-only codes

#!/bin/sh
# specifies number of nodes
#SBATCH -N 1
#specifies cores per node
#SBATCH --ntasks-per-node=12
# specifies maximum duration of run
#SBATCH --time=1:00:00
# specifies job name
#SBATCH --job-name=JOBNAME
# specifies error file name
#SBATCH --error=job.%J.err_node
# specifies output file name
#SBATCH --output=job.%J.out_node_48
# specifies queue name
#SBATCH --partition=day
# To run job in the directory from where it is submitted
cd $SLURM_SUBMIT_DIR
# include your desired executable name
export OMP_NUM_THREADS=$SLURM_CPUS_PER_TASK
time ./a.out >res.dat








filename: subgpu.sh – for GPU low I/O

Submission script for GPU jobs

Number of CPU threads: 1

Number of GPU’s: 2

Ideal for GPU-only codes (CUDA, etc.)

#!/bin/sh
# specifies number of nodes
#SBATCH -N 1
#specifies cores per node
#SBATCH --ntasks-per-node=1
# specifies maximum duration of run
#SBATCH --time=1:00:00
# specifies job name
#SBATCH --job-name=JOBNAME
# specifies error file name
#SBATCH --error=job.%J.err_node
# specifies output file name
#SBATCH --output=job.%J.out_node_48
# specifies queue name
#SBATCH --partition=day
# Generic resources
#SBATCH --gres=gpu:2
# To run job in the directory from where it is submitted
cd $SLURM_SUBMIT_DIR
# include your desired executable name
time ./a.out >res.dat








filename: subcpuscr.sh – for parallel/CPU high I/O

Submission script for CPU jobs using scratch space

Number of CPU threads: 12

Ideal for MPI/OpenMP CPU codes with large I/O

#!/bin/sh
# specifies number of nodes
#SBATCH -N 1
#specifies cores per node
#SBATCH --ntasks-per-node=12
# specifies maximum duration of run
#SBATCH --time=1:00:00
# specifies job name
#SBATCH --job-name=JOBNAME
# specifies error file name
#SBATCH --error=job.%J.err_node
# specifies output file name
#SBATCH --output=job.%J.out_node_48
# specifies queue name
#SBATCH --partition=day
# Keep number of threads exactly same as requested
export OMP_NUM_THREADS=$SLURM_CPUS_PER_TASK
## Scratch space usage (faster for large I/O)
drn=$SLURM_JOB_USER$SLURM_JOB_ID
scrdir=/scr/usr
mkdir $scrdir/$drn
cd $scrdir/$drn
echo $SLURM_JOB_ID > jobid.txt
rsync -a ${SLURM_SUBMIT_DIR}/ $scrdir/$drn/
echo ${SLURM_JOB_NODELIST} > nodes.txt
echo " location: $scrdir/$drn" >> nodes.txt
echo
echo "Program Output begins:"
time ./a.out >res.dat
echo "end of the program ready for clean up"
rsync -a $scrdir/$drn/ ${SLURM_SUBMIT_DIR}/
cd
rm -fr $scrdir/$drn









filename: subgpuscr.sh – for parallel/CPU/GPU high I/O

Submission script for heterogeneous jobs (CPU + GPU) using scratch space

Number of CPU threads: 12

Number of GPU’s: 2

Ideal for MPI/OpenMP + CUDA jobs with large I/O


#!/bin/sh
# specifies number of nodes
#SBATCH -N 1
#specifies cores per node
#SBATCH --ntasks-per-node=12
# specifies maximum duration of run
#SBATCH --time=1:00:00
# specifies job name
#SBATCH --job-name=JOBNAME
# specifies error file name
#SBATCH --error=job.%J.err_node
# specifies output file name
#SBATCH --output=job.%J.out_node_48
# specifies queue name
#SBATCH --partition=day
# Generic resources
#SBATCH --gres=gpu:2
# Keep number of threads exactly same as requested
export OMP_NUM_THREADS=$SLURM_CPUS_PER_TASK
## Scratch space usage (faster for large I/O)
drn=$SLURM_JOB_USER$SLURM_JOB_ID
scrdir=/scr/usr
mkdir $scrdir/$drn
cd $scrdir/$drn
echo $SLURM_JOB_ID > jobid.txt
rsync -a ${SLURM_SUBMIT_DIR}/ $scrdir/$drn/
echo ${SLURM_JOB_NODELIST} > nodes.txt
echo " location: $scrdir/$drn" >> nodes.txt
echo
echo "Program Output begins:"
time ./a.out >res.dat
echo "end of the program ready for clean up"
rsync -a $scrdir/$drn/ ${SLURM_SUBMIT_DIR}/
cd
rm -fr $scrdir/$drn