Skip to content

Commit

Permalink
Changes to run jobs on the new Darwin cluster
Browse files Browse the repository at this point in the history
  • Loading branch information
KristapsE committed Nov 29, 2017
1 parent 03146fb commit c288622
Show file tree
Hide file tree
Showing 3 changed files with 32 additions and 22 deletions.
32 changes: 18 additions & 14 deletions Defaultslurm
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
#!/bin/bash
#!
#! Example SLURM job script for Darwin (Sandy Bridge, ConnectX3)
#! Last updated: Sat Apr 18 13:05:53 BST 2015
#! Example SLURM job script for Peta4-Skylake (Skylake CPUs, OPA)
#! Last updated: Mon 13 Nov 12:25:17 GMT 2017
#!

#!#############################################################
Expand All @@ -12,29 +12,33 @@
#! Name of the job:
#SBATCH -J Tadalafil
#! Which project should be charged:
#SBATCH -A GOODMAN
#SBATCH -A GOODMAN-SL3-CPU
#! How many whole nodes should be allocated?
#SBATCH --nodes=1
#! How many (MPI) tasks will there be in total? (<= nodes*16)
#SBATCH --ntasks=6
#! How many (MPI) tasks will there be in total? (<= nodes*32)
#! The skylake/skylake-himem nodes have 32 CPUs (cores) each.
#SBATCH --ntasks=32
#! How much wallclock time will be required?
#SBATCH --time=01:00:00
#SBATCH --time=02:00:00
#! What types of email messages do you wish to receive?
#SBATCH --mail-type=FAIL
#! Uncomment this to prevent the job from being requeued (e.g. if
#! interrupted by node failure or system downtime):
##SBATCH --no-requeue
#SBATCH --no-requeue

#! Do not change:
#SBATCH -p sandybridge
#! For 6GB per CPU, set "-p skylake"; for 12GB per CPU, set "-p skylake-himem":
#SBATCH -p skylake

#! sbatch directives end here (put any additional directives above this line)

#! Notes:
#! Charging is determined by node number*walltime. Allocation is in entire nodes.
#! Charging is determined by core number*walltime.
#! The --ntasks value refers to the number of tasks to be launched by SLURM only. This
#! usually equates to the number of MPI tasks launched. Reduce this from nodes*16 if
#! demanded by memory requirements, or if OMP_NUM_THREADS>1.
#! Each task is allocated 1 core by default, and each core is allocated 5990MB (skylake)
#! and 12040MB (skylake-himem). If this is insufficient, also specify
#! --cpus-per-task and/or --mem (the latter specifies MB per node).

#! Number of nodes and tasks per node allocated by SLURM (do not change):
numnodes=$SLURM_JOB_NUM_NODES
Expand All @@ -48,14 +52,14 @@ mpi_tasks_per_node=$(echo "$SLURM_TASKS_PER_NODE" | sed -e 's/^\([0-9][0-9]*\).
#! (note that SLURM reproduces the environment at submission irrespective of ~/.bashrc):
. /etc/profile.d/modules.sh # Leave this line (enables the module command)
module purge # Removes all modules still loaded
module load default-impi # REQUIRED - loads the basic environment
module load rhel7/default-peta4 # REQUIRED - loads the basic environment

#! Insert additional module load commands after this line if needed:
module load gaussian/09

#! Full path to application executable:
application="g09"

application="$GAUSS_EXEDIR/g09"
export GAUSS_SCRDIR=/home/ke291/rds/hpc-work
#! Run options for the application:
options="< Tadalafil1ginp001.com > Tadalafil1ginp001.out"

Expand Down Expand Up @@ -118,6 +122,6 @@ fi

echo -e "\nnumtasks=$numtasks, numnodes=$numnodes, mpi_tasks_per_node=$mpi_tasks_per_node (OMP_NUM_THREADS=$OMP_NUM_THREADS)"

echo -e "\nExecuting command:\n==================\n$CMD\n"
echo -e "\nExecuting commands:\n==================\n\n"


18 changes: 12 additions & 6 deletions Gaussian.py
Original file line number Diff line number Diff line change
Expand Up @@ -583,25 +583,31 @@ def WriteSlurm(GausJobs, settings, index=''):
slurmf = open(filename, 'r+')
slurm = slurmf.readlines()
slurm[12] = '#SBATCH -J ' + settings.Title + '\n'
slurm[18] = '#SBATCH --ntasks=' + str(len(GausJobs)) + '\n'
slurm[20] = '#SBATCH --time=' + format(settings.TimeLimit,"02") +\
slurm[19] = '#SBATCH --ntasks=' + str(len(GausJobs)) + '\n'
slurm[21] = '#SBATCH --time=' + format(settings.TimeLimit,"02") +\
':00:00\n'

if (not settings.DFTOpt) and (not settings.PM6Opt) and (not settings.HFOpt)\
and (not settings.M06Opt):

for f in GausJobs:
slurm.append('srun --exclusive -n 1 $application < ' + f[:-3] + \
slurm.append('srun --exclusive -n1 $application < ' + f[:-3] + \
'com > ' + f[:-3] + 'out 2> error &\n')
#slurm.append('$application < ' + f[:-3] + \
# 'com > ' + f[:-3] + 'out 2> error &\n')
slurm.append('wait\n')
else:
for f in GausJobs:
slurm.append('srun --exclusive -n 1 $application < ' + f[:-4] + \
slurm.append('srun --exclusive -n1 $application < ' + f[:-4] + \
'a.com > ' + f[:-4] + 'temp.out 2> error &\n')
#slurm.append('$application < ' + f[:-4] + \
# 'a.com > ' + f[:-4] + 'temp.out 2> error &\n')
slurm.append('wait\n')
for f in GausJobs:
slurm.append('srun --exclusive -n 1 $application < ' + f[:-4] + \
'b.com > ' + f[:-4] + '.out 2> error &\n')
slurm.append('srun --exclusive -n1 $application < ' + f[:-4] + \
'b.com > ' + f[:-4] + '.out 2> error &\n')
#slurm.append('$application < ' + f[:-4] + \
# 'b.com > ' + f[:-4] + '.out 2> error &\n')
slurm.append('wait\n')

slurmf.truncate(0)
Expand Down
4 changes: 2 additions & 2 deletions PyDP4.py
Original file line number Diff line number Diff line change
Expand Up @@ -58,7 +58,7 @@ class Settings:
DFT = 'z'
Rot5Cycle = False
Title = 'DP4molecule'
DarwinNodeSize = 16
DarwinNodeSize = 32
RingAtoms = []
ConfPrune = True
GenDS = True
Expand Down Expand Up @@ -93,7 +93,7 @@ class Settings:
MMfactor = 2500 # nsteps = MMfactor*degrees of freedom
HardConfLimit = 10000
MaxConcurrentJobs = 75
MaxConcurrentJobsDarwin = 256
MaxConcurrentJobsDarwin = 320
PerStructConfLimit = 100
StrictConfLimit = True
InitialRMSDcutoff = 0.75
Expand Down

0 comments on commit c288622

Please sign in to comment.