Commit e8cd4d48 authored by adelmann's avatar adelmann 🎗
Browse files

reflect newest Bebop setting

parent ae586ce3
......@@ -40,7 +40,7 @@ class Simulation:
os.mkdir(self.dirname)
return True
def run(self,N, baseFileName, restart_step, inputfilePath, tmplFile, oinpFile, restart_file, doTest, doBlock, doKeep, doNobatch, info, queue):
def run(self,N, baseFileName, restart_step, inputfilePath, tmplFile, oinpFile, restart_file, doTest, doBlock, doKeep, doNobatch, info, queue, hypert):
# make directory name indicating changed values
self.dirname = baseFileName
if N >= 0:
......@@ -158,7 +158,7 @@ class Simulation:
elif (hostname.startswith("bebop")):
batchsys = 'SLURM'
runfile = 'run.bebop'
self.WriteBebop(opalexe, oinpFile, CORES, time, ram, info, runfile, queue)
self.WriteBebop(opalexe, oinpFile, CORES, time, ram, info, runfile, queue, hypert)
elif (hostname.startswith("cori")):
batchsys = 'SLURM'
......@@ -427,7 +427,7 @@ class Simulation:
myfile.write(s1)
myfile.close()
def WriteBebop(self, opalexe, oinpFile, cores, time, ram, info, name, queue):
def WriteBebop(self, opalexe, oinpFile, cores, time, ram, info, name, queue, hypert):
# BDW and KNL Compute Nodes at ANL
# http://www.lcrc.anl.gov/for-users/using-lcrc/running-jobs/running-jobs-on-bebop/
if type(cores) is str:
......@@ -438,10 +438,10 @@ class Simulation:
#Adjusting number of cores for specified queue
if (queue=='bdw' or queue=='bdwall' or queue=='bdwd'):
print('Running on BDW')
coresPerNode = 36
coresPerNode = 36 * (hypert+1) # hypert == 0 -> no hyper threading
elif (queue=='knl' or queue=='knlall' or queue=='knld'):
print('Running on KNL')
coresPerNode = 64
coresPerNode = 32 * (hypert+1)
else:
print('You have picked a non-valid queue!! Your run will fail!!')
......@@ -477,7 +477,8 @@ class Simulation:
#s1 += "#SBATCH --cpus-per-task=1 \n"
s1 += "export I_MPI_SLURM_EXT=0 \n"
s1 += "export I_MPI_FABRICS=shm:tmi \n"
if (queue=='knl' or queue=='knlall' or queue=='knld'):
s1 += "#SBATCH -C knl,quad,cache \n"
if int(nodes) > 1:
s1 += "#SBATCH --ntasks-per-node=" + str(tasks_per_node) + " \n"
s1 += "mpirun -n $SLURM_NTASKS "+ opalexe + " " + oinpFile + "\n"
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment