// Slurm Example Scripts
Serial Job Script
#SBATCH --job-name=Serial_Test_Job
#SBATCH --ntasks=1 --constraint=hasw
#SBATCH --time=1:00:00
#SBATCH -o output.%j
#SBATCH -e error.%j
#SBATCH --qos=debug
#SBATCH --account=xxxx
#SBATCH --workdir=/discover/nobackup/myuserid
./myexec
exit 0
By default, Slurm executes your job from the current directory where you submit the job. You can change the work directory by "cd" to it in the script, or specify --workdir option for SBATCH.
OPENMP Job Script
#!/usr/bin/bash
#SBATCH -J Test_Slurm_Job
#SBATCH --ntasks=1 --cpus-per-task=6 --constraint=hasw
#SBATCH --time=1:00:00
#SBATCH -o output.%j
#SBATCH --account=xxxx
#SBATCH --workdir=/discover/nobackup/myuserid
export OMP_NUM_THREADS=6
# the line above is optional if "--cpus-per-task=" is set
export OMP_STACKSIZE=1G
export KMP_AFFINITY=scatter
./Test_omp_executable
exit 0
Note: The option "--cpus-per-task=n" advises the Slurm controller that ensuring job steps will require "n" number of processors per task. Without this option, the controller will just try to allocate one processor per task. Even when "--cpus-per-task" is set, you can still set OMP_NUM_THREADS explicitly with a different number as long as it does not exceed requested resource.
MPI/OPENMP Hybrid Job Script
#!/usr/bin/csh
#SBATCH -J Test_Job
#SBATCH --nodes=4 --ntasks=24 --cpus-per-task=2 --ntasks-per-node=6
#SBATCH --constraint=hasw
#SBATCH --time=12:00:00
#SBATCH -o output.%j
#SBATCH --account=xxxx
source /usr/share/modules/init/csh
module purge
module load comp/intel-13.1.3.192 mpi/impi-4.1.0.024
cd $SLURM_SUBMIT_DIR
setenv OMP_NUM_THREADS 2
setenv OMP_STACKSIZE 1G
setenv KMP_AFFINITY compact
setenv I_MPI_PIN_DOMAIN auto
mpirun -perhost 6 -np 24 ./Test_executable