#!/bin/bash # #SBATCH --job-name=oggm_tests #SBATCH --ntasks=1 #SBATCH --cpus-per-task=4 # Abort whenever a single step fails. Without this, bash will just continue on errors. set -e # We create a sub-directory for this job to store its runtime data at. WORKDIR="/work/$USER/oggm_tests_$SLURM_JOB_ID" mkdir -p "$WORKDIR" cd "$WORKDIR" echo "Workdir for this run: $WORKDIR" # Export the WORKDIR as environment variable so our benchmark script can use it to find its working directory. export WORKDIR # Export the global read-only download-cache-dir export OGGM_DOWNLOAD_CACHE=/home/data/download export OGGM_DOWNLOAD_CACHE_RO=1 # All commands in the EOF block run inside of the container # Adjust container version to your needs, they are guaranteed to never change after their respective day has passed. srun -n 1 -c "${SLURM_JOB_CPUS_PER_NODE}" singularity exec docker://oggm/oggm:20181110 bash -s <