| #SBATCH --job-name=phi | |
| #SBATCH --nodes=1 # Request 1 compute node per job instance | |
| #SBATCH --cpus-per-task=4 | |
| #SBATCH --gres=gpu:a100:2 | |
| #SBATCH --mem=64GB # Request 2GB of RAM per job instance | |
| #SBATCH --time=07:30:00 # Request 10 mins per job instance | |
| #SBATCH --output=/scratch/spp9399/output_logs/rh_outputs/phi_%A.out # The output will be saved here. %A will be replaced by the slurm job ID, and %a will be replaced by the SLURM_ARRAY_TASK_ID | |
| #SBATCH [email protected] # Email address | |
| #SBATCH --mail-type=BEGIN,END # Send an email when all the instances of this job are completed | |
| module purge # unload all currently loaded modules in the environment | |
| export WANDB_ENTITY=ETNLP_Project | |
| export WANDB_PROJECT=retrieval-head-detection | |
| MODEL_PATH="/scratch/spp9399/LLMS/Phi-3.5-mini-instruct" | |
| PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True /scratch/spp9399/env/retrieval_heads/run_env.sh python3 retrieval_head_detection.py --model_path $MODEL_PATH -s 0 -e 50000 --haystack_dir /scratch/spp9399/ETNLP/original/Retrieval_Head/haystack_for_detect/en --needle_lg en --exp_name phi_35_mini_inst_en | |
| PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True /scratch/spp9399/env/retrieval_heads/run_env.sh python3 retrieval_head_detection.py --model_path $MODEL_PATH -s 0 -e 50000 --haystack_dir /scratch/spp9399/ETNLP/original/Retrieval_Head/haystack_for_detect/zh --needle_lg zh --exp_name phi_35_mini_inst_zh | |
| PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True /scratch/spp9399/env/retrieval_heads/run_env.sh python3 retrieval_head_detection.py --model_path $MODEL_PATH -s 0 -e 50000 --haystack_dir /scratch/spp9399/ETNLP/original/Retrieval_Head/haystack_for_detect/de --needle_lg de --exp_name phi_35_mini_inst_de | |