Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 2 additions & 2 deletions jobs/dpo/dpo_cppo_multi_gpu.sh
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@
#SBATCH --mem=64G
#SBATCH --time=24:00:00
#SBATCH --output=out/%x.%j.out # Include job name + job ID
#SBATCH --error=out/%x.%j.err # Include job name + job ID
#SBATCH --error=out/%x.%j.err # Include job name + job ID
#SBATCH --mail-type=ALL
#SBATCH --account=aip-rrabba
#SBATCH --mail-user=shahrad_m@icloud.com # Update with your email
Expand All @@ -34,4 +34,4 @@ accelerate launch --config_file benchmarks/dpo/accelerate_configs/deepspeed_zero
--output_dir "$SCRATCH/projects/Qwen2-0.5B-DPO-${dataset_name}" \
--no_remove_unused_columns \
--wandb_project $dataset_name \
--wandb_run_name "Qwen2-0.5B-DPO-${dataset_name}-multi-gpu"
--wandb_run_name "Qwen2-0.5B-DPO-${dataset_name}-multi-gpu"
4 changes: 2 additions & 2 deletions jobs/dpo/dpo_domain_shift_multi_gpu.sh
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@
#SBATCH --mem=64G
#SBATCH --time=24:00:00
#SBATCH --output=out/%x.%j.out # Include job name + job ID
#SBATCH --error=out/%x.%j.err # Include job name + job ID
#SBATCH --error=out/%x.%j.err # Include job name + job ID
#SBATCH --mail-type=ALL
#SBATCH --account=aip-rrabba
#SBATCH --mail-user=shahrad_m@icloud.com # Update with your email
Expand All @@ -20,7 +20,7 @@ accelerate launch --config_file benchmarks/dpo/accelerate_configs/deepspeed_zero
benchmarks/dpo/dpo_continual.py \
--dataset_name $dataset_name \
--model_name_or_path Qwen/Qwen2-0.5B-Instruct \
--reward_model_path LifelongAlignment/Qwen2.5-0.5B-Instruct_CPPO_REWARD \
--reward_model_path LifelongAlignment/Qwen2.5-0.5B-Instruct_${dataset_name}_REWARD \
--learning_rate 5.0e-6 \
--num_train_epochs 4 \
--per_device_train_batch_size 8 \
Expand Down
4 changes: 2 additions & 2 deletions jobs/dpo/dpo_lipschitz_multi_gpu.sh
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@
#SBATCH --mem=64G
#SBATCH --time=24:00:00
#SBATCH --output=out/%x.%j.out # Include job name + job ID
#SBATCH --error=out/%x.%j.err # Include job name + job ID
#SBATCH --error=out/%x.%j.err # Include job name + job ID
#SBATCH --mail-type=ALL
#SBATCH --account=aip-rrabba
#SBATCH --mail-user=shahrad_m@icloud.com # Update with your email
Expand All @@ -20,7 +20,7 @@ accelerate launch --config_file benchmarks/dpo/accelerate_configs/deepspeed_zero
benchmarks/dpo/dpo_continual.py \
--dataset_name $dataset_name \
--model_name_or_path Qwen/Qwen2-0.5B-Instruct \
--reward_model_path LifelongAlignment/Qwen2.5-0.5B-Instruct_CPPO_REWARD \
--reward_model_path LifelongAlignment/Qwen2.5-0.5B-Instruct_${dataset_name}_REWARD \
--learning_rate 5.0e-6 \
--num_train_epochs 4 \
--per_device_train_batch_size 8 \
Expand Down
4 changes: 2 additions & 2 deletions jobs/dpo/dpo_long_piecewise_multi_gpu.sh
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@
#SBATCH --mem=64G
#SBATCH --time=24:00:00
#SBATCH --output=out/%x.%j.out # Include job name + job ID
#SBATCH --error=out/%x.%j.err # Include job name + job ID
#SBATCH --error=out/%x.%j.err # Include job name + job ID
#SBATCH --mail-type=ALL
#SBATCH --account=aip-rrabba
#SBATCH --mail-user=shahrad_m@icloud.com # Update with your email
Expand All @@ -20,7 +20,7 @@ accelerate launch --config_file benchmarks/dpo/accelerate_configs/deepspeed_zero
benchmarks/dpo/dpo_continual.py \
--dataset_name $dataset_name \
--model_name_or_path Qwen/Qwen2-0.5B-Instruct \
--reward_model_path LifelongAlignment/Qwen2.5-0.5B-Instruct_CPPO_REWARD \
--reward_model_path LifelongAlignment/Qwen2.5-0.5B-Instruct_${dataset_name}_REWARD \
--learning_rate 5.0e-6 \
--num_train_epochs 4 \
--per_device_train_batch_size 8 \
Expand Down
4 changes: 2 additions & 2 deletions jobs/dpo/dpo_piecewise_multi_gpu.sh
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@
#SBATCH --mem=64G
#SBATCH --time=24:00:00
#SBATCH --output=out/%x.%j.out # Include job name + job ID
#SBATCH --error=out/%x.%j.err # Include job name + job ID
#SBATCH --error=out/%x.%j.err # Include job name + job ID
#SBATCH --mail-type=ALL
#SBATCH --account=aip-rrabba
#SBATCH --mail-user=shahrad_m@icloud.com # Update with your email
Expand All @@ -20,7 +20,7 @@ accelerate launch --config_file benchmarks/dpo/accelerate_configs/deepspeed_zero
benchmarks/dpo/dpo_continual.py \
--dataset_name $dataset_name \
--model_name_or_path Qwen/Qwen2-0.5B-Instruct \
--reward_model_path LifelongAlignment/Qwen2.5-0.5B-Instruct_CPPO_REWARD \
--reward_model_path LifelongAlignment/Qwen2.5-0.5B-Instruct_${dataset_name}_REWARD \
--learning_rate 5.0e-6 \
--num_train_epochs 4 \
--per_device_train_batch_size 8 \
Expand Down
4 changes: 2 additions & 2 deletions jobs/dpo/dpo_short_piecewise_multi_gpu.sh
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@
#SBATCH --mem=64G
#SBATCH --time=24:00:00
#SBATCH --output=out/%x.%j.out # Include job name + job ID
#SBATCH --error=out/%x.%j.err # Include job name + job ID
#SBATCH --error=out/%x.%j.err # Include job name + job ID
#SBATCH --mail-type=ALL
#SBATCH --account=aip-rrabba
#SBATCH --mail-user=shahrad_m@icloud.com # Update with your email
Expand All @@ -20,7 +20,7 @@ accelerate launch --config_file benchmarks/dpo/accelerate_configs/deepspeed_zero
benchmarks/dpo/dpo_continual.py \
--dataset_name $dataset_name \
--model_name_or_path Qwen/Qwen2-0.5B-Instruct \
--reward_model_path LifelongAlignment/Qwen2.5-0.5B-Instruct_CPPO_REWARD \
--reward_model_path LifelongAlignment/Qwen2.5-0.5B-Instruct_${dataset_name}_REWARD \
--learning_rate 5.0e-6 \
--num_train_epochs 4 \
--per_device_train_batch_size 8 \
Expand Down
37 changes: 37 additions & 0 deletions jobs/dpo_ewc/dpo_ewc_cppo_multi_gpu.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,37 @@
#!/bin/bash
#SBATCH --job-name=aif-gen-dpo-ewc-cppo
#SBATCH --nodes=1 # Request 2 nodes
#SBATCH --gpus-per-node=h100:4 # Request 4 H100 GPUs per node
#SBATCH --ntasks-per-node=4 # One task per GPU
#SBATCH --cpus-per-task=6
#SBATCH --mem=64G
#SBATCH --time=24:00:00
#SBATCH --output=out/%x.%j.out # Include job name + job ID
#SBATCH --error=out/%x.%j.err # Include job name + job ID
#SBATCH --mail-type=ALL
#SBATCH --account=aip-rrabba
#SBATCH --mail-user=shahrad_m@icloud.com # Update with your email

source .env

dataset_name='CPPO-RL'

accelerate launch --config_file benchmarks/dpo/accelerate_configs/deepspeed_zero2.yaml \
benchmarks/dpo_ewc/dpo_EWC_continual.py \
--dataset_name 'CPPO-RL' \
--model_name_or_path Qwen/Qwen2-0.5B-Instruct \
--reward_model_path LifelongAlignment/Qwen2.5-0.5B-Instruct_CPPO_REWARD \
--learning_rate 5.0e-6 \
--num_train_epochs 4 \
--per_device_train_batch_size 8 \
--gradient_checkpointing \
--logging_steps 20 \
--eval_strategy steps \
--response_length 256 \
--eval_steps 500 \
--save_steps 500 \
--bf16 \
--output_dir "$SCRATCH/projects/Qwen2-0.5B-DPO-EWC-${dataset_name}" \
--no_remove_unused_columns \
--wandb_project $dataset_name \
--wandb_run_name "Qwen2-0.5B-DPO-EWC-${dataset_name}-multi-gpu"
37 changes: 37 additions & 0 deletions jobs/dpo_ewc/dpo_ewc_domain_shift_multi_gpu.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,37 @@
#!/bin/bash
#SBATCH --job-name=aif-gen-dpo-ewc-domain_shift
#SBATCH --nodes=1 # Request 2 nodes
#SBATCH --gpus-per-node=h100:4 # Request 4 H100 GPUs per node
#SBATCH --ntasks-per-node=4 # One task per GPU
#SBATCH --cpus-per-task=6
#SBATCH --mem=64G
#SBATCH --time=24:00:00
#SBATCH --output=out/%x.%j.out # Include job name + job ID
#SBATCH --error=out/%x.%j.err # Include job name + job ID
#SBATCH --mail-type=ALL
#SBATCH --account=aip-rrabba
#SBATCH --mail-user=shahrad_m@icloud.com # Update with your email

source .env

dataset_name='aifgen-domain-preference-shift'

accelerate launch --config_file benchmarks/dpo/accelerate_configs/deepspeed_zero2.yaml \
benchmarks/dpo_ewc/dpo_EWC_continual.py \
--dataset_name $dataset_name \
--model_name_or_path Qwen/Qwen2-0.5B-Instruct \
--reward_model_path LifelongAlignment/Qwen2.5-0.5B-Instruct_${dataset_name}_REWARD \
--learning_rate 5.0e-6 \
--num_train_epochs 4 \
--per_device_train_batch_size 8 \
--gradient_checkpointing \
--logging_steps 20 \
--eval_strategy steps \
--response_length 256 \
--eval_steps 500 \
--save_steps 500 \
--bf16 \
--output_dir "$SCRATCH/projects/Qwen2-0.5B-DPO-EWC-${dataset_name}" \
--no_remove_unused_columns \
--wandb_project $dataset_name \
--wandb_run_name "Qwen2-0.5B-DPO-EWC-${dataset_name}-multi-gpu"
37 changes: 37 additions & 0 deletions jobs/dpo_ewc/dpo_ewc_lipschitz_multi_gpu.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,37 @@
#!/bin/bash
#SBATCH --job-name=aif-gen-dpo-ewc-lipschitz
#SBATCH --nodes=1 # Request 2 nodes
#SBATCH --gpus-per-node=h100:4 # Request 4 H100 GPUs per node
#SBATCH --ntasks-per-node=4 # One task per GPU
#SBATCH --cpus-per-task=6
#SBATCH --mem=64G
#SBATCH --time=24:00:00
#SBATCH --output=out/%x.%j.out # Include job name + job ID
#SBATCH --error=out/%x.%j.err # Include job name + job ID
#SBATCH --mail-type=ALL
#SBATCH --account=aip-rrabba
#SBATCH --mail-user=shahrad_m@icloud.com # Update with your email

source .env

dataset_name='aifgen-lipschitz'

accelerate launch --config_file benchmarks/dpo/accelerate_configs/deepspeed_zero2.yaml \
benchmarks/dpo_ewc/dpo_EWC_continual.py \
--dataset_name $dataset_name \
--model_name_or_path Qwen/Qwen2-0.5B-Instruct \
--reward_model_path LifelongAlignment/Qwen2.5-0.5B-Instruct_${dataset_name}_REWARD \
--learning_rate 5.0e-6 \
--num_train_epochs 4 \
--per_device_train_batch_size 8 \
--gradient_checkpointing \
--logging_steps 20 \
--eval_strategy steps \
--response_length 256 \
--eval_steps 500 \
--save_steps 500 \
--bf16 \
--output_dir "$SCRATCH/projects/Qwen2-0.5B-DPO-EWC-${dataset_name}" \
--no_remove_unused_columns \
--wandb_project $dataset_name \
--wandb_run_name "Qwen2-0.5B-DPO-EWC-${dataset_name}-multi-gpu"
37 changes: 37 additions & 0 deletions jobs/dpo_ewc/dpo_ewc_long_piecewise_multi_gpu.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,37 @@
#!/bin/bash
#SBATCH --job-name=aif-gen-dpo-ewc-long_piecewise
#SBATCH --nodes=1 # Request 2 nodes
#SBATCH --gpus-per-node=h100:4 # Request 4 H100 GPUs per node
#SBATCH --ntasks-per-node=4 # One task per GPU
#SBATCH --cpus-per-task=6
#SBATCH --mem=64G
#SBATCH --time=24:00:00
#SBATCH --output=out/%x.%j.out # Include job name + job ID
#SBATCH --error=out/%x.%j.err # Include job name + job ID
#SBATCH --mail-type=ALL
#SBATCH --account=aip-rrabba
#SBATCH --mail-user=shahrad_m@icloud.com # Update with your email

source .env

dataset_name='aifgen-long-piecewise'

accelerate launch --config_file benchmarks/dpo/accelerate_configs/deepspeed_zero2.yaml \
benchmarks/dpo_ewc/dpo_EWC_continual.py \
--dataset_name $dataset_name \
--model_name_or_path Qwen/Qwen2-0.5B-Instruct \
--reward_model_path LifelongAlignment/Qwen2.5-0.5B-Instruct_${dataset_name}_REWARD \
--learning_rate 5.0e-6 \
--num_train_epochs 4 \
--per_device_train_batch_size 8 \
--gradient_checkpointing \
--logging_steps 20 \
--eval_strategy steps \
--response_length 256 \
--eval_steps 500 \
--save_steps 500 \
--bf16 \
--output_dir "$SCRATCH/projects/Qwen2-0.5B-DPO-EWC-${dataset_name}" \
--no_remove_unused_columns \
--wandb_project $dataset_name \
--wandb_run_name "Qwen2-0.5B-DPO-EWC-${dataset_name}-multi-gpu"
37 changes: 37 additions & 0 deletions jobs/dpo_ewc/dpo_ewc_piecewise_multi_gpu.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,37 @@
#!/bin/bash
#SBATCH --job-name=aif-gen-dpo-ewc-piecewise-preference-shift
#SBATCH --nodes=1 # Request 2 nodes
#SBATCH --gpus-per-node=h100:4 # Request 4 H100 GPUs per node
#SBATCH --ntasks-per-node=4 # One task per GPU
#SBATCH --cpus-per-task=6
#SBATCH --mem=64G
#SBATCH --time=24:00:00
#SBATCH --output=out/%x.%j.out # Include job name + job ID
#SBATCH --error=out/%x.%j.err # Include job name + job ID
#SBATCH --mail-type=ALL
#SBATCH --account=aip-rrabba
#SBATCH --mail-user=shahrad_m@icloud.com # Update with your email

source .env

dataset_name='aifgen-piecewise-preference-shift'

accelerate launch --config_file benchmarks/dpo/accelerate_configs/deepspeed_zero2.yaml \
benchmarks/dpo_ewc/dpo_EWC_continual.py \
--dataset_name $dataset_name \
--model_name_or_path Qwen/Qwen2-0.5B-Instruct \
--reward_model_path LifelongAlignment/Qwen2.5-0.5B-Instruct_${dataset_name}_REWARD \
--learning_rate 5.0e-6 \
--num_train_epochs 4 \
--per_device_train_batch_size 8 \
--gradient_checkpointing \
--logging_steps 20 \
--eval_strategy steps \
--response_length 256 \
--eval_steps 500 \
--save_steps 500 \
--bf16 \
--output_dir "$SCRATCH/projects/Qwen2-0.5B-DPO-EWC-${dataset_name}" \
--no_remove_unused_columns \
--wandb_project $dataset_name \
--wandb_run_name "Qwen2-0.5B-DPO-EWC-${dataset_name}-multi-gpu"
37 changes: 37 additions & 0 deletions jobs/dpo_ewc/dpo_ewc_short_piecewise_multi_gpu.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,37 @@
#!/bin/bash
#SBATCH --job-name=aif-gen-dpo-short-piecewise
#SBATCH --nodes=1 # Request 2 nodes
#SBATCH --gpus-per-node=h100:4 # Request 4 H100 GPUs per node
#SBATCH --ntasks-per-node=4 # One task per GPU
#SBATCH --cpus-per-task=6
#SBATCH --mem=64G
#SBATCH --time=24:00:00
#SBATCH --output=out/%x.%j.out # Include job name + job ID
#SBATCH --error=out/%x.%j.err # Include job name + job ID
#SBATCH --mail-type=ALL
#SBATCH --account=aip-rrabba
#SBATCH --mail-user=shahrad_m@icloud.com # Update with your email

source .env

dataset_name='aifgen-short-piecewise'

accelerate launch --config_file benchmarks/dpo/accelerate_configs/deepspeed_zero3.yaml \
benchmarks/dpo_ewc/dpo_EWC_continual.py \
--dataset_name $dataset_name \
--model_name_or_path Qwen/Qwen2-0.5B-Instruct \
--reward_model_path LifelongAlignment/Qwen2.5-0.5B-Instruct_${dataset_name}_REWARD \
--learning_rate 5.0e-6 \
--num_train_epochs 4 \
--per_device_train_batch_size 8 \
--gradient_checkpointing \
--logging_steps 20 \
--eval_strategy steps \
--response_length 256 \
--eval_steps 500 \
--save_steps 500 \
--bf16 \
--output_dir "$SCRATCH/projects/Qwen2-0.5B-DPO-${dataset_name}" \
--no_remove_unused_columns \
--wandb_project $dataset_name \
--wandb_run_name "Qwen2-0.5B-DPO-${dataset_name}-multi-gpu"
Loading