Remove sbatch*.sh

This commit is contained in:
Cadene 2024-04-10 15:58:10 +00:00
parent e8622154f8
commit daecc3b64c
3 changed files with 3 additions and 42 deletions

3
.gitignore vendored
View File

@ -11,6 +11,9 @@ rl
nautilus/*.yaml
*.key
# Slurm
sbatch*.sh
# Byte-compiled / optimized / DLL files
__pycache__/
*.py[cod]

View File

@ -1,25 +0,0 @@
#!/bin/bash
#SBATCH --nodes=1 # total number of nodes (N to be defined)
#SBATCH --ntasks-per-node=1 # number of tasks per node (here 8 tasks, or 1 task per GPU)
#SBATCH --gres=gpu:1 # number of GPUs reserved per node (here 8, or all the GPUs)
#SBATCH --cpus-per-task=8 # number of cores per task (8x8 = 64 cores, or all the cores)
#SBATCH --time=2-00:00:00
#SBATCH --output=/home/rcadene/slurm/%j.out
#SBATCH --error=/home/rcadene/slurm/%j.err
#SBATCH --qos=low
#SBATCH --mail-user=re.cadene@gmail.com
#SBATCH --mail-type=ALL
CMD=$@
echo "command: $CMD"
apptainer exec --nv \
~/apptainer/nvidia_cuda:12.2.2-devel-ubuntu22.04.sif $SHELL
source ~/.bashrc
#conda activate fowm
conda activate lerobot
export DATA_DIR="data"
srun $CMD

View File

@ -1,17 +0,0 @@
#!/bin/bash
#SBATCH --nodes=1 # total number of nodes (N to be defined)
#SBATCH --ntasks-per-node=1 # number of tasks per node (here 8 tasks, or 1 task per GPU)
#SBATCH --qos=normal # number of GPUs reserved per node (here 8, or all the GPUs)
#SBATCH --partition=hopper-prod
#SBATCH --gres=gpu:1 # number of GPUs reserved per node (here 8, or all the GPUs)
#SBATCH --cpus-per-task=12 # number of cores per task
#SBATCH --mem-per-cpu=11G
#SBATCH --time=12:00:00
#SBATCH --output=/admin/home/remi_cadene/slurm/%j.out
#SBATCH --error=/admin/home/remi_cadene/slurm/%j.err
#SBATCH --mail-user=remi_cadene@huggingface.co
#SBATCH --mail-type=ALL
CMD=$@
echo "command: $CMD"
srun $CMD