#!/bin/bash #- MPI Test #- See: https://mpitutorial.com/tutorials/mpi-hello-world/ #- Follow these steps to get the exexcutable: #- >>> git clone https://github.com/mpitutorial/mpitutorial #- >>> cd mpitutorial/tutorials/mpi-hello-world/code #- >>> make # #SBATCH --job-name=mpitest #SBATCH --output=log.mpitest # #SBATCH --nodes=2 #SBATCH --ntasks-per-node=2 #SBATCH --time=0-00:10:00 #SBATCH --ear=off #SBATCH --mem=93G echo "#############################" echo "User:" $USER echo "Date:" `date` echo "Host:" `hostname` echo "Directory:" `pwd` echo "SLURM_JOBID:" $SLURM_JOBID echo "SLURM_SUBMIT_DIR:" $SLURM_SUBMIT_DIR echo "SLURM_JOB_NODELIST:" $SLURM_JOB_NODELIST echo "#############################" #- Ensure only owner can read the output umask 0077 export SLURM_COMP_VERBOSE=3 export SLURM_LOADER_VERBOSE=3 #- Clean rm -rf log.run* #- Processors N=2 M=2 #- Generate hostfiles rm -f hosts.ompi # hosts.intel for host in `scontrol show hostname $SLURM_JOB_NODELIST`; do for j in $(seq 1 ${SLURM_TASKS_PER_NODE%%(*}); do echo $host >> hosts.ompi; done done #- Partition the hosts for each participant sed -n " 1,${N}p" hosts.ompi > hosts.runOne sed -n "$(($N+1)),$(($N+$M))p" hosts.ompi > hosts.runTwo #- Load modules module purge #- Activate spack environment which also loads mpirun spack env activate foamPrecice #- Print executed commands set -e #- Group solvers to stop all if one fails #- NOTE: Same code is executed twice to mimic preCICE framework set -m ( mpirun -n $N -hostfile hosts.runOne ./mpi_hello_world &> log.runOne & mpirun -n $M -hostfile hosts.runTwo ./mpi_hello_world &> log.runTwo & #- Wait for every solver to finish wait )