-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathsub_openmm.py
38 lines (30 loc) · 896 Bytes
/
sub_openmm.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
#!/bin/sh
## --------------------------------------------------------
## Information
## @email: [email protected] || [email protected]
## @lab: Dr. Isborn
## @place: UC Merced
## @date: July.10.2021
## @author: Ajay Khanna
## --------------------------------------------------------
# submit_array.sh
#SBATCH --nodes=1
#SBATCH --ntasks=2
#SBATCH --partition gpu
#SBATCH --mem=10G
#SBATCH --time=03-00:00:00 # 03 Days
#SBATCH --output=openmm_log.out
#SBATCH --open-mode=append # Append to Outfile
#SBATCH --job-name=OpenMMMD
##SBATCH --exclude=gnode003
#SBATCH --export=ALL
#SBATCH --gres gpu:2
whoami
module load cuda/10.2.89
module list
conda activate openmm_env
export CUDA_VISIBLE_DEVICES=0,1
# Work around slurm arraying
nvidia-smi --query-gpu=index,memory.used --format=csv --loop=4.7 >> OpenMM_Job_GPUsage.data &
python full_openmm.py >> output.log
echo "All Done"