diff --git a/Defaultslurm b/Defaultslurm index b42a946..1f789ed 100755 --- a/Defaultslurm +++ b/Defaultslurm @@ -1,7 +1,7 @@ #!/bin/bash #! -#! Example SLURM job script for Darwin (Sandy Bridge, ConnectX3) -#! Last updated: Sat Apr 18 13:05:53 BST 2015 +#! Example SLURM job script for Peta4-Skylake (Skylake CPUs, OPA) +#! Last updated: Mon 13 Nov 12:25:17 GMT 2017 #! #!############################################################# @@ -12,29 +12,33 @@ #! Name of the job: #SBATCH -J Tadalafil #! Which project should be charged: -#SBATCH -A GOODMAN +#SBATCH -A GOODMAN-SL3-CPU #! How many whole nodes should be allocated? #SBATCH --nodes=1 -#! How many (MPI) tasks will there be in total? (<= nodes*16) -#SBATCH --ntasks=6 +#! How many (MPI) tasks will there be in total? (<= nodes*32) +#! The skylake/skylake-himem nodes have 32 CPUs (cores) each. +#SBATCH --ntasks=32 #! How much wallclock time will be required? -#SBATCH --time=01:00:00 +#SBATCH --time=02:00:00 #! What types of email messages do you wish to receive? #SBATCH --mail-type=FAIL #! Uncomment this to prevent the job from being requeued (e.g. if #! interrupted by node failure or system downtime): -##SBATCH --no-requeue +#SBATCH --no-requeue -#! Do not change: -#SBATCH -p sandybridge +#! For 6GB per CPU, set "-p skylake"; for 12GB per CPU, set "-p skylake-himem": +#SBATCH -p skylake #! sbatch directives end here (put any additional directives above this line) #! Notes: -#! Charging is determined by node number*walltime. Allocation is in entire nodes. +#! Charging is determined by core number*walltime. #! The --ntasks value refers to the number of tasks to be launched by SLURM only. This #! usually equates to the number of MPI tasks launched. Reduce this from nodes*16 if #! demanded by memory requirements, or if OMP_NUM_THREADS>1. +#! Each task is allocated 1 core by default, and each core is allocated 5990MB (skylake) +#! and 12040MB (skylake-himem). If this is insufficient, also specify +#! --cpus-per-task and/or --mem (the latter specifies MB per node). #! Number of nodes and tasks per node allocated by SLURM (do not change): numnodes=$SLURM_JOB_NUM_NODES @@ -48,14 +52,14 @@ mpi_tasks_per_node=$(echo "$SLURM_TASKS_PER_NODE" | sed -e 's/^\([0-9][0-9]*\). #! (note that SLURM reproduces the environment at submission irrespective of ~/.bashrc): . /etc/profile.d/modules.sh # Leave this line (enables the module command) module purge # Removes all modules still loaded -module load default-impi # REQUIRED - loads the basic environment +module load rhel7/default-peta4 # REQUIRED - loads the basic environment #! Insert additional module load commands after this line if needed: module load gaussian/09 #! Full path to application executable: -application="g09" - +application="$GAUSS_EXEDIR/g09" +export GAUSS_SCRDIR=/home/ke291/rds/hpc-work #! Run options for the application: options="< Tadalafil1ginp001.com > Tadalafil1ginp001.out" @@ -118,6 +122,6 @@ fi echo -e "\nnumtasks=$numtasks, numnodes=$numnodes, mpi_tasks_per_node=$mpi_tasks_per_node (OMP_NUM_THREADS=$OMP_NUM_THREADS)" -echo -e "\nExecuting command:\n==================\n$CMD\n" +echo -e "\nExecuting commands:\n==================\n\n" diff --git a/Gaussian.py b/Gaussian.py index 883ba03..ace27be 100755 --- a/Gaussian.py +++ b/Gaussian.py @@ -583,25 +583,31 @@ def WriteSlurm(GausJobs, settings, index=''): slurmf = open(filename, 'r+') slurm = slurmf.readlines() slurm[12] = '#SBATCH -J ' + settings.Title + '\n' - slurm[18] = '#SBATCH --ntasks=' + str(len(GausJobs)) + '\n' - slurm[20] = '#SBATCH --time=' + format(settings.TimeLimit,"02") +\ + slurm[19] = '#SBATCH --ntasks=' + str(len(GausJobs)) + '\n' + slurm[21] = '#SBATCH --time=' + format(settings.TimeLimit,"02") +\ ':00:00\n' if (not settings.DFTOpt) and (not settings.PM6Opt) and (not settings.HFOpt)\ and (not settings.M06Opt): for f in GausJobs: - slurm.append('srun --exclusive -n 1 $application < ' + f[:-3] + \ + slurm.append('srun --exclusive -n1 $application < ' + f[:-3] + \ 'com > ' + f[:-3] + 'out 2> error &\n') + #slurm.append('$application < ' + f[:-3] + \ + # 'com > ' + f[:-3] + 'out 2> error &\n') slurm.append('wait\n') else: for f in GausJobs: - slurm.append('srun --exclusive -n 1 $application < ' + f[:-4] + \ + slurm.append('srun --exclusive -n1 $application < ' + f[:-4] + \ 'a.com > ' + f[:-4] + 'temp.out 2> error &\n') + #slurm.append('$application < ' + f[:-4] + \ + # 'a.com > ' + f[:-4] + 'temp.out 2> error &\n') slurm.append('wait\n') for f in GausJobs: - slurm.append('srun --exclusive -n 1 $application < ' + f[:-4] + \ - 'b.com > ' + f[:-4] + '.out 2> error &\n') + slurm.append('srun --exclusive -n1 $application < ' + f[:-4] + \ + 'b.com > ' + f[:-4] + '.out 2> error &\n') + #slurm.append('$application < ' + f[:-4] + \ + # 'b.com > ' + f[:-4] + '.out 2> error &\n') slurm.append('wait\n') slurmf.truncate(0) diff --git a/PyDP4.py b/PyDP4.py index fbbf7ef..0c95dd8 100755 --- a/PyDP4.py +++ b/PyDP4.py @@ -58,7 +58,7 @@ class Settings: DFT = 'z' Rot5Cycle = False Title = 'DP4molecule' - DarwinNodeSize = 16 + DarwinNodeSize = 32 RingAtoms = [] ConfPrune = True GenDS = True @@ -93,7 +93,7 @@ class Settings: MMfactor = 2500 # nsteps = MMfactor*degrees of freedom HardConfLimit = 10000 MaxConcurrentJobs = 75 - MaxConcurrentJobsDarwin = 256 + MaxConcurrentJobsDarwin = 320 PerStructConfLimit = 100 StrictConfLimit = True InitialRMSDcutoff = 0.75