#!/bin/bash
#
beam="$1"
lattice="$2"
wakes="$3"
csr="$4"
isr="$5"

if [ "$wakes" != "1" ] ; then
wakes="0"
fi
if [ "$csr" != "1" ] ; then
csr="0"
fi
if [ "$isr" != "1" ] ; then
isr="0"
fi
if [ "$lattice" != "e" -a "$lattice" != "csr" ] ; then
lattice="simple"
fi
if [ "$beam" != "e+" ] ; then
beam="e-"
fi

#
# submit using bsub -q <queuename> -n <numnodes> -m <hosts> -R <resources> mpilsfjob.sh
#
# for jobs on LXSCD <hosts> has to be "LXSCD"
# simple example: bsub -q 1nd -n 7 -m "LXSCD" mpilsfjob.sh
#     start the job on 7 nodes of LXSCD using the queue 1nd (equivalent to 1day total CPU time on 1 standard CPU)
#
# advanced example: bsub -q 1nd -n 7 -m "LXSCD PARC" -R "select [type==SLC4_64]" mpilsfjob.sh
#     start the job on 7 nodes of LXSCD or PARC, use nodes only which are 64bit SLC4 
#
# ATTENTION: There seems to be a limit imposed by the batch system on the number of nodes one job can use!
#            
echo "Job started: " $( date )

# some MPI related stuff
export MPI=/afs/cern.ch/project/parc/mpi/
export MPIBIN=$MPI/bin
export MPILIB=$MPI/lib
# if the binary was compiled using Intel compilers
#export INTELFORTRAN=/afs/cern.ch/sw/IntelSoftware/linux/x86_64/fce/10.0.023/
#export IFORTLIB=$INTELFORTRAN/lib

export PATH=$MPIBIN:$PATH
export LD_LIBRARY_PATH=$MPILIB:$LD_LIBRARY_PATH
#export LD_LIBRARY_PATH=$MPILIB:$IFORTLIB:$LD_LIBRARY_PATH

# I am unsure if this sets the working directory to the directory where this script is located
# or to the directory from where this script was started
# since in all cases I tried until now they where the same I did not bother to find out
# LS_SUBCWD is provided by LSF automatically
cd $LS_SUBCWD
# if you do not do this, the default working directory will be set by LSF to something like /pool/lsf/<jobid> or similar
# then you have to take care to copy your files there!

# set up some job related stuff
outputdir="./simrun_${beam}_${lattice}_wakes=${wakes}_csr=${csr}_isr=${isr}"
mkdir $outputdir

./create_rtml_lte.sh $beam $wakes $csr $isr
cp extractdata.sh $outputdir
mv rtml_${beam}_wakes=${wakes}_csr=${csr}_isr=${isr}.lte $outputdir/rtml.lte
cp rtml_$lattice.ele $outputdir/rtml.ele
cp wake_bc1.sdds $outputdir/wake_bc1.sdds
cp wake_bc2.sdds $outputdir/wake_bc2.sdds
cp wake_booster.sdds $outputdir/wake_booster.sdds
cd $outputdir
#

# LSF automatically provides the variable LSB_HOSTS containing the assigned node names
# get nodes and write them to a file, store number of nodes in nnodes
rm -f LSF_nodes_$LSB_JOBID
nnodes=0
for node in $LSB_HOSTS
do
  echo $node >> LSF_nodes_$LSB_JOBID
  nnodes=$(($nnodes+1))
done
#

# now we run the mpi job
mpirun --prefix $MPI -np $nnodes --hostfile ./LSF_nodes_$LSB_JOBID Pelegant_old rtml.ele > Pelegant.out 2> Pelegant.err
#
# e.g. mpirun -np $nnodes --hostfile ./LSF_nodes_$LSB_JOBID ./csrt12_openmpi_intel > csrt_$LSB_JOBID.log 2> csrt_$LSB_JOBID.err
# if you do not have ssh access to the nodes which you want to use you have to use the following line
#mpirun --mca pls_rsh_agent blaunch -np $nnodes --hostfile ./LSF_nodes_$LSB_JOBID <your job command>
#
rm LSF_nodes_$LSB_JOBID
echo "Job finished: " $( date )
