#!/bin/bash # #SBATCH --job-name=cpp.coupled #SBATCH --output=log.sim.slurm # #SBATCH --nodes=2 #SBATCH --ntasks-per-node=2 #SBATCH --time=0-00:30:00 #SBATCH --ear=off #SBATCH --mem=93G echo "#############################" echo "User:" $USER echo "Date:" `date` echo "Host:" `hostname` echo "Directory:" `pwd` echo "SLURM_JOBID:" $SLURM_JOBID echo "SLURM_SUBMIT_DIR:" $SLURM_SUBMIT_DIR echo "SLURM_JOB_NODELIST:" $SLURM_JOB_NODELIST echo "#############################" #- Ensure only owner can read the output umask 0077 export SLURM_COMP_VERBOSE=3 export SLURM_LOADER_VERBOSE=3 #- Processors N=2 M=2 #- Generate hostfiles rm -f hosts.ompi # hosts.intel for host in `scontrol show hostname $SLURM_JOB_NODELIST`; do for j in $(seq 1 ${SLURM_TASKS_PER_NODE%%(*}); do echo $host >> hosts.ompi; done done #- Partition the hosts for each participant sed -n " 1,${N}p" hosts.ompi > hosts.SolverOne sed -n "$(($N+1)),$(($N+$M))p" hosts.ompi > hosts.SolverTwo #- Load modules module purge spack env activate foamPrecice #- Print executed commands set -e #- Group solvers to stop all if one fails set -m ( mpirun -n $N -hostfile hosts.SolverOne ./solverdummy-cpp-parallel ../precice-config-parallel.xml SolverOne MeshOne &> log.runOne & mpirun -n $M -hostfile hosts.SolverTwo ./solverdummy-cpp-parallel ../precice-config-parallel.xml SolverTwo MeshTwo &> log.runTwo & #- Wait for every solver to finish wait )