Here is an example of a configure.ac file for the Green cluster:
enable_mpi="yes"
enable_mpi_io="yes"
with_mpi_prefix="/home/pcpm/buildbot/openmpi_intel"
with_fft_flavor="fftw3"
with_fft_libs="-L/usr/local/intel/Compiler/11.1/current/mkl/lib/em64t -Wl,--start-group -lmkl_intel_lp64 -lmkl_sequential -lmkl_core -Wl,--end-group -lpthread"
with_linalg_flavor="mkl"
with_linalg_libs="-L/usr/local/intel/Compiler/11.1/current/mkl/lib/em64t -Wl,--start-group -lmkl_intel_lp64 -lmkl_sequential -lmkl_core -Wl,--end-group -lpthread"
enable_clib="yes"
enable_gw_dpc="yes"
enable_memory_profiling="no"
enable_maintainer_checks="no"
enable_test_timeout="yes"
Green uses a SGE submission script system.
Here is an example submission script:
#!/bin/sh
#
# On old Green node
#$ -l nb=false
#
# ask for pe=parallel environment, snode or openmpi
# snode= same node, as the shared memory communication is the fastest
#$ -pe openmpi 16
# -pe snode8 8
# keep current working directory
#$ -cwd
# give a name to your job
#$ -N name_of_job
# keep all the defined variables
#$ -V
#$ -l nb=false
# not mandatory: highmem=true (hm=true) for 32GB node
# or hm=false for 16GB node
# no hm argument does not take about the kind of node ram (16/32)
# -l hm=true
# important: specify the mem_free needed
# h_vmem can also be set but mf is mandatory
# max 31G on hm=true and 15G on hm=false nodes
#$ -l mf=4G
# specify the requested time
# 240:00:00 is 10 days
#$ -l h_rt=40:59:59
# to be informed by email (bes= begin,end,stop)
#$ -M your_e_mail@blabla.com
#$ -m besa
PROGS=/home/naps/sponce/Software/7.3.3-private/build/src/98_main/abinit
echo "Got $NSLOTS slots."
echo "Temp dir is $TMPDIR"
echo "Node file is:"
cat $TMPDIR/machines
MPI=mpirun
MPIOPT="-np $NSLOTS "
INI=`date +%s`
${MPI} ${MPIOPT} ${PROGS} < input.files >& log
FIN=`date +%s`
echo INI $INI
echo FIN $FIN
echo TIME-OF-CALCULATION `expr $FIN - $INI`