Submission Script Examples¶
Basic¶
Single-threaded programs¶
#!/bin/bash
#SBATCH --job-name=my_job
#SBATCH --account=my_project_account
#SBATCH --partition=processing
#SBATCH --time=10:00
#SBATCH --nodes=1
#SBATCH --ntasks=1
#SBATCH --cpus-per-task=1
./hello.omp
Multi-threaded programs¶
#!/bin/bash
#SBATCH --job-name=omp_job
#SBATCH --account=my_project_account
#SBATCH --partition=processing
#SBATCH --output=omp_job.txt
#SBATCH --ntasks=1
#SBATCH --cpus-per-task=4
#SBATCH --time=10:00
export OMP_NUM_THREADS=$SLURM_CPUS_PER_TASK
./hello.omp
Multi-process programs¶
#!/bin/bash
#SBATCH --job-name=mpi
#SBATCH --account=my_project_account
#SBATCH --partition=processing
#SBATCH --output=mpi_job.txt
#SBATCH --ntasks=4
#SBATCH --time=10:00
mpirun hello.mpi
Hybrid (MPI+OpenMP) programs¶
#!/bin/bash
#SBATCH --job-name=hybrid
#SBATCH --account=my_project_account
#SBATCH --partition=processing
#SBATCH --output=hydrid_job.txt
#SBATCH --ntasks=8
#SBATCH --cpus-per-task=5
#SBATCH --nodes=2
#SBATCH --time=10:00
export OMP_NUM_THREADS=$SLURM_CPUS_PER_TASK
mpirun hello_hybrid.mpi
Simple Python job (CPU)¶
This example shows how to run a basic Python script using CPU only.
#!/bin/bash
#SBATCH --job-name=python_analysis
#SBATCH --account=my_project_account
#SBATCH --partition=processing
#SBATCH --output=python_job_%j.out
#SBATCH --error=python_job_%j.err
#SBATCH --time=01:00:00
#SBATCH --nodes=1
#SBATCH --ntasks=1
#SBATCH --cpus-per-task=1
# Load Python module
module load python/3.10
# Run script
python my_script.py
Python job with virtual environment (CPU)¶
For projects using specific packages, create a virtual environment:
#!/bin/bash
#SBATCH --job-name=data_analysis
#SBATCH --account=my_project_account
#SBATCH --partition=processing
#SBATCH --output=/scratch/projetos/<my_project>/logs/job_%j.out
#SBATCH --error=/scratch/projetos/<my_project>/logs/job_%j.err
#SBATCH --time=02:00:00
#SBATCH --nodes=1
#SBATCH --ntasks=1
#SBATCH --cpus-per-task=4
# Load Python
module load python/3.10
# Activate virtual environment
source /home/$USER/my_env/bin/activate
# Create output directory if it doesn't exist
mkdir -p /scratch/projetos/<my_project>/results
# Run analysis
python analysis.py --input /scratch/projetos/<my_project>/data/ --output /scratch/projetos/<my_project>/results/
Python job with multiple CPUs¶
For parallel processing using multiprocessing or libraries like NumPy/Pandas:
#!/bin/bash
#SBATCH --job-name=parallel_processing
#SBATCH --account=my_project_account
#SBATCH --partition=processing
#SBATCH --output=/scratch/projetos/<my_project>/logs/parallel_%j.out
#SBATCH --error=/scratch/projetos/<my_project>/logs/parallel_%j.err
#SBATCH --time=04:00:00
#SBATCH --nodes=1
#SBATCH --ntasks=1
#SBATCH --cpus-per-task=16
#SBATCH --mem=32G
# Load Python
module load python/3.10
# Activate environment
source ~/venv_science/bin/activate
# Set threads for NumPy/SciPy libraries
export OMP_NUM_THREADS=$SLURM_CPUS_PER_TASK
export MKL_NUM_THREADS=$SLURM_CPUS_PER_TASK
export OPENBLAS_NUM_THREADS=$SLURM_CPUS_PER_TASK
# Run with number of processes equal to number of CPUs
python parallel_processing.py --workers $SLURM_CPUS_PER_TASK
Python job with Conda¶
#!/bin/bash
#SBATCH --job-name=conda_job
#SBATCH --account=my_project_account
#SBATCH --partition=processing
#SBATCH --output=conda_%j.out
#SBATCH --time=02:00:00
#SBATCH --cpus-per-task=8
# Initialize Conda (adjust path according to your installation)
source ~/miniconda3/etc/profile.d/conda.sh
# Activate Conda environment
conda activate my_env
# Run script
python statistical_analysis.py
Python job with job array¶
To run the same script with different parameters:
#!/bin/bash
#SBATCH --job-name=array_python
#SBATCH --account=my_project_account
#SBATCH --partition=processing
#SBATCH --output=/scratch/projetos/<my_project>/logs/array_%A_%a.out
#SBATCH --error=/scratch/projetos/<my_project>/logs/array_%A_%a.err
#SBATCH --array=1-10
#SBATCH --time=01:00:00
#SBATCH --cpus-per-task=2
module load python/3.10
source ~/venv/bin/activate
# Use SLURM_ARRAY_TASK_ID as parameter
python process_dataset.py --id $SLURM_ARRAY_TASK_ID
Job with Python virtual environment and GPU¶
#!/bin/bash
#SBATCH --job-name=deep_learn
#SBATCH --partition=gpu
#SBATCH --account=my_project_account
#SBATCH --output=gpu_job.txt
#SBATCH --ntasks=1
#SBATCH --time=10:00
module load cuda/11.8
# using your anaconda environment
source activate pytorch
python my_app.py
Job with Python virtual environment, GPU and output/error files¶
#!/bin/bash
#SBATCH --job-name=deep_learn
#SBATCH --partition=gpu
#SBATCH --account=my_project_account
#SBATCH --time=0-00:10 # Runtime in D-HH:MM, minimum of 10 minutes
#SBATCH -o my_output_%j.out # File to which STDOUT will be written, %j inserts jobid
#SBATCH -e my_errors_%j.err # File to which STDERR will be written, %j inserts jobid
# load modules
module load cuda/11.8
source activate pytorch
# run code
python my_app.py'
Job with Intel MPI¶
#!/bin/bash
#SBATCH --job-name=3D_ABC # Job name
#SBATCH --account=exemple # Project account
#SBATCH --cpus-per-task=20 # Number of cores per task
#SBATCH --export=ALL # Export all environment variables
#SBATCH --nodes=10 # Number of nodes
#SBATCH --ntasks-per-node=2 # Number of tasks per node
#SBATCH --output=slurm-%j.out # Output file name
#SBATCH --partition=standard # Partition (queue)
module purge
module load intel/oneapi/mpi
module load intel/oneapi/compiler
module load ucx/1.14.0-intel
export I_MPI_FABRICS=shm:ofi
export I_MPI_OFI_PROVIDER=mlx
I_MPI_PMI_LIBRARY=/lib64/libpmi2.so srun --mpi=pmi2 </path/to/program> <arguments>