#!/bin/bash # #SBATCH --job-name=precice.partitioned-pipe #SBATCH --output=%j.out #SBATCH --error=%j.err # #SBATCH --nodes=2 #SBATCH --ntasks-per-node=2 #SBATCH --time=0-04:00:00 # Print executed commands # set -e -u #- Generate hostfiles rm -f hosts.ompi # hosts.intel for host in `scontrol show hostname $SLURM_JOB_NODELIST`; do for j in $(seq 1 ${SLURM_TASKS_PER_NODE%%(*}); do echo $host >> hosts.ompi; done done #- Number of processors per participant nprocs1=2 nprocs2=2 #- Partition the hosts for each participant sed -n " 1,${nprocs1}p" hosts.ompi > fluid1-openfoam-pimplefoam/hosts.fluid1 sed -n "$(($nprocs1+1)),$(($nprocs1+$nprocs2))p" hosts.ompi > fluid2-openfoam-pimplefoam/hosts.fluid2 #- Source the bash profile and then call the appropriate OpenFOAM version function echo "Sourcing the bash profile, loading modules, and setting the OpenFOAM environment variables..." source /gpfs/home/nkumar001/.bash_profile module purge module load gcc/7.3.0 module load openmpi/2.1.6 #- Activate spack environment spack env activate foamPrecice source /gpfs/home/nkumar001/tools/spack/opt/spack/linux-centos7-skylake_avx512/gcc-7.3.0/openfoam-org-6-ssbibujof7mzwyo2gsjdlsoskflx5axr/etc/bashrc parentDIR=$(pwd) # Group runs to prevent a failure from wasting resources set -m ( # Launch solver Fluid1 cd $parentDIR/fluid1-openfoam-pimplefoam ./run.fluid1.sh & # Launch solver Fluid2 cd $parentDIR/fluid2-openfoam-pimplefoam ./run.fluid2.sh & # Wait for every solver to finish wait ) echo "All participants succeeded" # ----------------------------------------------------------------- end-of-file