|
| 1 | +#!/bin/bash --login |
| 2 | +#SBATCH --job-name=e3_GE-abaqus-mpi |
| 3 | +#SBATCH --partition=workq |
| 4 | +#SBATCH --account=pawsey0001 |
| 5 | +#SBATCH --cpus-per-task=16 |
| 6 | +#SBATCH --nodes=2 |
| 7 | +#SBATCH --time=01:00:00 |
| 8 | +##SBATCH --output e3_abaqus-mpi.%j.out |
| 9 | +##SBATCH --error e3_abaqus-mpi.%j.err |
| 10 | + |
| 11 | +# Default loaded compiler module is gcc module |
| 12 | +# load the necessary modules to compile with GNU |
| 13 | +module load abaqus |
| 14 | + |
| 15 | +# leave in, it lists the environment loaded by the modules |
| 16 | +#module list |
| 17 | + |
| 18 | +# Note: SLURM_JOBID is a unique number for every job. |
| 19 | +# These are generic variables |
| 20 | +INPUT=e3 |
| 21 | +SRC_DIR=${SLURM_SUBMIT_DIR} |
| 22 | +EXECUTABLE=abaqus |
| 23 | +SCRATCH=$MYSCRATCH/run_abaqus-mpi/$SLURM_JOBID |
| 24 | +RESULTS=$MYGROUP/abaqus-mpi-results_zeus/$SLURM_JOBID |
| 25 | +unset SLURM_GTIDS |
| 26 | +############################################### |
| 27 | +# Creates a unique directory in the SCRATCH directory for this job to run in. |
| 28 | +if [ ! -d $SCRATCH ]; then |
| 29 | + mkdir -p $SCRATCH |
| 30 | +fi |
| 31 | +echo SCRATCH is $SCRATCH |
| 32 | + |
| 33 | +############################################### |
| 34 | +# Creates a unique directory in your GROUP directory for the results of this job |
| 35 | +if [ ! -d $RESULTS ]; then |
| 36 | + mkdir -p $RESULTS |
| 37 | +fi |
| 38 | +echo the results directory is $RESULTS |
| 39 | + |
| 40 | +################################################ |
| 41 | +# declare the name of the output file or log file |
| 42 | + |
| 43 | +OUTPUT=abaqus_${INPUT}_mpi |
| 44 | + |
| 45 | +############################################# |
| 46 | +# Copy input files to $SCRATCH |
| 47 | +# then change directory to $SCRATCH |
| 48 | +cp ${INPUT}.inp $SCRATCH |
| 49 | + |
| 50 | +cd $SCRATCH |
| 51 | + |
| 52 | +###################################### |
| 53 | +# create the abaqus_v6.env file |
| 54 | +envFile=abaqus_v6.env |
| 55 | + |
| 56 | +# The number of processors/cores per node |
| 57 | +ncpus=`echo $SLURM_CPUS_PER_TASK` |
| 58 | +# | cut -c1-2` |
| 59 | +echo cpu per task $SLURM_CPUS_PER_TASK |
| 60 | +echo "number of cores per nodes ${ncpus}" |
| 61 | +# total number of cores |
| 62 | +echo "total number of nodes $SLURM_NNODES" |
| 63 | +ncores=`expr $SLURM_NNODES \* $ncpus` |
| 64 | +echo "total number of cores is $ncores" |
| 65 | +# file of the node lists |
| 66 | +node_list=ABAQUS_NODES |
| 67 | + |
| 68 | +srun hostname | sort > $node_list |
| 69 | +echo 'the node list is $node_list' |
| 70 | +mp_host_list="[" |
| 71 | +for i in $(cat ${node_list}) ; do |
| 72 | + mp_host_list="${mp_host_list}['$i', $ncpus]," |
| 73 | +done |
| 74 | +echo host list ${mp_host_list} |
| 75 | +mp_host_list=`echo ${mp_host_list} | sed -e "s/,$//"` |
| 76 | +mp_host_list="${mp_host_list}]" |
| 77 | + |
| 78 | +export mp_host_list |
| 79 | +#Write the environment variables to abaqus_v6.env |
| 80 | +echo "import os" > ${envFile} |
| 81 | +echo "os.environ['ABA_BATCH_OVERRIDE'] = '1'" >> ${envFile} |
| 82 | +echo "verbose=1" >> $envFile |
| 83 | +echo "mp_host_list=${mp_host_list}" >> ${envFile} |
| 84 | +echo "mp_mpi_implementation=PMPI" >> ${envFile} |
| 85 | +# |
| 86 | + |
| 87 | +abaqus job=$OUTPUT input=$INPUT cpus=$ncores \ |
| 88 | + standard_parallel=all mp_mode=mpi interactive |
| 89 | + |
| 90 | +############################################# |
| 91 | +# move entire scratch directory to the unique results directory |
| 92 | +# note this can be a copy or move |
| 93 | +mv *.dat *.abq *.m* *.odb *.res *.p* *.s* ${RESULTS} |
| 94 | + |
| 95 | +cd $HOME |
| 96 | + |
| 97 | +########################### |
| 98 | +# Clean up $SCRATCH |
| 99 | + |
| 100 | +rm -r $SCRATCH |
| 101 | + |
| 102 | +echo abaqus-mpi job finished at `date` |
0 commit comments