====== Magnus cluster ======
===== Info =====
Workload manager - [[https://wiki.storion.ru/linux/slurm|SLURM]]
[[http://cest-cms-wiki.skoltech.ru/doku.php?id=start|Wiki for users]]
===== Run script example =====
===VASP ===
#!/bin/bash
#SBATCH -J my_job_LiMnO2 #name of job
#SBATCH -N 1 #number of nodes
#SBATCH -n 16 #number of cores
#SBATCH -o %x.e%j #out file path
#SBATCH -e %x.e%j.err #err file path
#SBATCH --mem-per-cpu=7675 #memory limit
#SBATCH -p AMG-medium #type of schedule: AMG (24 h limit), AMG-medium (72 h), AMG-long (336 h)
module load Compiler/Intel/17u8 module load Q-Ch/VASP/5.4.4_OPT ScriptLang/python/3.6i_2018u3
ulimit -s unlimited
# cd /home/user/project_folder # provide the folder if the script is submitted not from project folder
mpirun vasp_std >log
===VASP with Occupation Matrix Control===
The same as previous. Differs only by another loaded Q-Ch/VASP/5.4.4_OMC module
#!/bin/bash
#SBATCH -J my_job_LiMnO2 #name of job
#SBATCH -N 1 #number of nodes
#SBATCH -n 16 #number of cores
#SBATCH -o %x.e%j #out file path
#SBATCH -e %x.e%j.err #err file path
#SBATCH --mem-per-cpu=7675 #memory limit
#SBATCH -p AMG-medium #type of schedule: AMG (24 h limit), AMG-medium (72 h), AMG-long (336 h)
module load Compiler/Intel/17u8 Q-Ch/VASP/5.4.4_OMC ScriptLang/python/3.6i_2018u3
ulimit -s unlimited
# cd /home/user/project_folder # provide the folder if the script is submitted not from project folder
mpirun vasp_std >log
=== LAMMPS ===
#!/bin/bash
#SBATCH -J meltcool10Kpps # job name, should coincide with input filename
#SBATCH -N 1
#SBATCH -n 8 # number of cores
#SBATCH -o %x.e%j
#SBATCH -e %x.e%j.err
#SBATCH --mem-per-cpu=8gb
export OMP_NUM_THREADS="'${SLURM_CPUS_PER_TASK:-1}'"
echo 'SLURM_NTASKS' $SLURM_NTASKS$
module load Compiler/Intel/18u4
module load Q-Ch/LAMMPS/20Nov2019/intel/2018u4
mpirun -np ${SLURM_NTASKS} lammps -sf opt -pk omp ${OMP_NUM_THREADS} -in $SLURM_JOB_NAME.lam -log $SLURM_JOB_NAME.out >$SLURM_JOB_NAME.stdout
=== Quantum Espresso ===
#!/bin/bash
#SBATCH -J JOBNAME # job name
#SBATCH -N 1 # number of nodes
#SBATCH -n 16 # number of cores
#SBATCH -o %x.e%j.out # std output file
#SBATCH -e %x.e%j.err # std error file
#SBATCH --mem-per-cpu=8gb # memory limit
module load Compiler/Intel/19u5
module load Q-Ch/QE/6.4.1/intel/2019u5
exe = pw.x # any of qe executables
mpirun $exe < scf.in > scf.out