#!/bin/bash
#PBS -q gpuq
#PBS -o out.o
#PBS -e out.e
#PBS -N conda
#PBS -l nodes=1:ppn=1
#PBS -l walltime=00:02:00
#PBS -l nodes=n81.cluster.iitmandi.ac.in
#PBS -V
cd ${PBS_O_WORKDIR} # to make sure that we are in the right dir on compute node
echo “Running on: ” # on standard output
cat ${PBS_NODEFILE} # env variable for file name containing node details
cat $PBS_NODEFILE > machines.list # also on machines.list file
echo “Program Output begins: “
source ~/miniconda3/bin/activate
python python_script.py
#!/bin/bash
#PBS
-j oe
#specify queue name
#PBS
-q serial#specify job name
#PBS -N my_serial_job#specify time required for job completion
#PBS
-l walltime=10:00:00
#to locate where it is running
echo
“Running on: “
cat
${PBS_NODEFILE}echo
echo
“Program Output begins: “
cd
${PBS_O_WORKDIR}
./a.out
#!/bin/bash
#PBS -q batch
#PBS -o out.o
#PBS -e out.e
#PBS -N your job name
#PBS -l nodes=2:ppn=8
#PBS -V
cd ${PBS_O_WORKDIR}
echo “Running on: “
cat ${PBS_NODEFILE}
cat $PBS_NODEFILE > machines.list
echo “Program Output begins: “
/opt/openmpi_intel/bin/mpirun -np 16 -machinefile machines.list
#!/bin/bash
#PBS -j oe
#specify queue name
#PBS -q day
#specify job name
#PBS -N jobname
#specify number of cpus to be assigned
#PBS -l ncpus=8
#PBS -V
#to locate here it is running
echo “Running on: “
cat ${PBS_NODEFILE}
echo
echo “Program Output begins: “
cd ${PBS_O_WORKDIR}
#ncpus==-np
mpiexec -np 8
#!/bin/bash
#PBS -j oe
#PBS -q batch
#PBS -N your job name
# it is desirable to request for all processors of
# a node if you have multinode jobs
#PBS -l nodes=2:ppn=8
#PBS -V
cd ${PBS_O_WORKDIR}
echo “Running on: “
cat ${PBS_NODEFILE}
cat ${PBS_NODEFILE}|uniq > node.txt
/opt/mpich2_intel/bin/mpdboot -n 2 -f node.txt
echo
echo “Program Output begins: “
/opt/mpich2_intel/bin/mpiexec -np 16
/opt/mpich2_intel/bin/mpdallexit
#!/bin/bash
#!/bin/bash
#PBS -j oe
#specify queue name
#PBS -q day
#specify job name
#PBS -N jobname
#specify time required for job completion
#PBS -l walltime=10:00:00
#specify number of cpus to be assigned
#PBS -l ncpus=8
#PBS -V
#Set the number of OpenMP threads
#if you are using open MP
#number of threads == ncpus
export OMP_NUM_THREADS=8
#to locate here it is running
echo “Running on: “
cat ${PBS_NODEFILE}
echo
echo “Program Output begins: “
cd ${PBS_O_WORKDIR}
22 ./
#!/bin/bash
#PBS -j oe
#PBS -q day
#PBS -N cuda
## Presently only one node n81 has NVIDIA cards in it
## In future when there is a down time for the cluster
## GPU support will be added explicitly in the scheduler
#PBS -l nodes=n81.cluster.iitmandi.ac.in:ppn=20
#PBS -V
cd $PBS_O_WORKDIR
echo
echo “Program Output begins: ”
pwd
#——————————-do not modify————–
#include the your desired nvcc compiled executable name
#or it can any package that can run on GPU cards
./a.out
#——————————-do not modify————–
19 echo “end of the program ready for clean up”
20 #——————————-do not modify————–
#!/bin/bash
#PBS -j oe
#specify queue name
#PBS -q week
#specify job name
#PBS -N jobname
#specify time required for job completion
#PBS -l walltime=00:30:00
#specify number of cpus to be assigned
#PBS -l ncpus=4
#Set same number of thread in Gaussian as number of threads
#PBS -V
export GAUSS_SCRDIR=/scr/usr
#Set the number of SMP threads
#number of threads == ncpus
cd ${PBS_O_WORKDIR}
g09 formyl.com