mirror of
https://github.com/leigest519/ScreenCoder.git
synced 2026-02-13 10:12:46 +00:00
Add post-training folder
This commit is contained in:
@@ -0,0 +1,21 @@
|
||||
output_dir: /path/to/output/runs/Qwen2.5-VL-3B-Idefics-V3-RSN-ai2d-500steps
|
||||
model_name_or_path: /path/to/models/Qwen2.5-VL-3B-Instruct
|
||||
dataset_name: Idefics-ai2d
|
||||
data_file_paths: /path/to/data/ai2d.jsonl
|
||||
image_folders: /path/to/images
|
||||
max_prompt_length: 1024
|
||||
per_device_train_batch_size: 1
|
||||
gradient_accumulation_steps: 2
|
||||
logging_steps: 1
|
||||
bf16: true
|
||||
report_to: wandb
|
||||
gradient_checkpointing: false
|
||||
deepspeed: /path/to/config/zero3.json
|
||||
attn_implementation: flash_attention_2
|
||||
max_pixels: 401408
|
||||
max_steps: 500
|
||||
run_name: Qwen2.5-VL-3B-Idefics-V3-RSN-ai2d-500steps-multinode
|
||||
save_steps: 100
|
||||
save_total_limit: 3
|
||||
save_only_model: true
|
||||
num_generations: 8
|
||||
145
post-training/VLM-R1/run_scripts/multinode_training_demo.sh
Normal file
145
post-training/VLM-R1/run_scripts/multinode_training_demo.sh
Normal file
@@ -0,0 +1,145 @@
|
||||
#!/bin/bash
|
||||
|
||||
RUN_NAME=multinode_training # assume there is a ${RUN_NAME}_args.yaml file in the current directory
|
||||
|
||||
declare -A node2ip_map
|
||||
node2ip_map=(
|
||||
["node1"]="192.168.1.101"
|
||||
["node2"]="192.168.1.102"
|
||||
["node3"]="192.168.1.103"
|
||||
["node4"]="192.168.1.104"
|
||||
)
|
||||
|
||||
# Default nodes if no arguments provided
|
||||
DEFAULT_NODES=("node1" "node2")
|
||||
|
||||
# Local codebase path in file system
|
||||
LOCAL_CODEBASE_PATH="/path/to/your/codebase"
|
||||
|
||||
# Use provided nodes or default nodes
|
||||
if [ "$#" -ge 1 ]; then
|
||||
NODES=("$@")
|
||||
else
|
||||
NODES=("${DEFAULT_NODES[@]}")
|
||||
echo "Using default nodes: ${NODES[*]}"
|
||||
fi
|
||||
|
||||
# Add this debug line
|
||||
echo "All nodes in order: ${NODES[@]}"
|
||||
|
||||
TOTAL_NODES=${#NODES[@]}
|
||||
MASTER_NODE=${NODES[0]}
|
||||
MASTER_PORT=12345
|
||||
|
||||
# Get project root directory (using the directory where this script is located)
|
||||
PROJECT_ROOT="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
|
||||
echo "Project root directory: $PROJECT_ROOT"
|
||||
|
||||
# Get master node IP address
|
||||
echo "MASTER_NODE: $MASTER_NODE"
|
||||
MASTER_IP="${node2ip_map[$MASTER_NODE]}"
|
||||
echo "Master node IP: $MASTER_IP"
|
||||
|
||||
# Create log directory for each node
|
||||
LOG_DIR="path/to/your/log/dir"
|
||||
mkdir -p $LOG_DIR
|
||||
|
||||
# Generate docker-compose.yml
|
||||
echo "Generating docker-compose.yml..."
|
||||
cat > docker-compose.yml << EOL
|
||||
version: '3.8'
|
||||
|
||||
services:
|
||||
trainer:
|
||||
image: your/training-image:tag
|
||||
deploy:
|
||||
resources:
|
||||
reservations:
|
||||
devices:
|
||||
- driver: nvidia
|
||||
count: all
|
||||
capabilities: [gpu]
|
||||
shm_size: '8gb'
|
||||
volumes:
|
||||
- /path/to/data:/data
|
||||
- $LOCAL_CODEBASE_PATH/src:/workspace/src
|
||||
environment:
|
||||
- MASTER_ADDR=\${MASTER_ADDR:-$MASTER_IP}
|
||||
- MASTER_PORT=\${MASTER_PORT:-12345}
|
||||
- NODE_RANK=\${NODE_RANK:-0}
|
||||
- WORLD_SIZE=\${WORLD_SIZE:-4}
|
||||
- DEBUG_MODE=true
|
||||
- LOG_PATH=${LOG_DIR}/debug_log.txt
|
||||
- WANDB_API_KEY=your_wandb_api_key # Optional: for logging with weights & biases
|
||||
- WANDB_PROJECT=your_project_name
|
||||
- WANDB_RUN_NAME=${RUN_NAME}-$(date +%Y-%m-%d-%H-%M-%S)
|
||||
- PYTHONPATH=/workspace/src
|
||||
network_mode: "host"
|
||||
command: /bin/bash
|
||||
working_dir: /workspace
|
||||
EOL
|
||||
|
||||
# Function to build training arguments from yaml
|
||||
build_train_args() {
|
||||
args=""
|
||||
while IFS=": " read -r key value; do
|
||||
[[ -z "$key" || "$key" =~ ^[[:space:]]*# ]] && continue
|
||||
value=$(echo "$value" | sed -e 's/^[[:space:]]*//' -e 's/[[:space:]]*$//' -e 's/^"//' -e 's/"$//')
|
||||
if [[ "$value" == "true" ]]; then
|
||||
args="$args --$key"
|
||||
elif [[ "$value" == "false" ]]; then
|
||||
continue
|
||||
else
|
||||
args="$args --$key $value"
|
||||
fi
|
||||
done < ${RUN_NAME}_args.yaml
|
||||
echo "$args"
|
||||
}
|
||||
|
||||
# Get training arguments
|
||||
TRAIN_ARGS=$(build_train_args)
|
||||
echo "TRAIN_ARGS: $TRAIN_ARGS"
|
||||
|
||||
# Launch containers on each node
|
||||
NODE_RANK=0
|
||||
for host in "${NODES[@]}"; do
|
||||
LOG_FILE="$LOG_DIR/${host}_rank${NODE_RANK}.log"
|
||||
if [ "$host" = "$MASTER_NODE" ]; then
|
||||
echo "Launching on master $host with rank $NODE_RANK, logging to $LOG_FILE"
|
||||
ssh $host "cd $PROJECT_ROOT && \
|
||||
MASTER_ADDR=$MASTER_IP \
|
||||
NODE_RANK=$NODE_RANK \
|
||||
WORLD_SIZE=$TOTAL_NODES \
|
||||
sudo -E docker-compose -f docker-compose.yml run --rm trainer \
|
||||
torchrun --nproc_per_node=8 \
|
||||
--nnodes=$TOTAL_NODES \
|
||||
--node_rank=$NODE_RANK \
|
||||
--master_addr=$MASTER_IP \
|
||||
--master_port=$MASTER_PORT \
|
||||
src/train.py \
|
||||
$TRAIN_ARGS" > "$LOG_FILE" 2>&1 &
|
||||
else
|
||||
echo "Launching on $host with rank $NODE_RANK, logging to $LOG_FILE"
|
||||
ssh $host "cd $PROJECT_ROOT && \
|
||||
MASTER_ADDR=$MASTER_IP \
|
||||
NODE_RANK=$NODE_RANK \
|
||||
WORLD_SIZE=$TOTAL_NODES \
|
||||
sudo -E docker-compose -f docker-compose.yml run --rm trainer \
|
||||
torchrun --nproc_per_node=8 \
|
||||
--nnodes=$TOTAL_NODES \
|
||||
--node_rank=$NODE_RANK \
|
||||
--master_addr=$MASTER_IP \
|
||||
--master_port=$MASTER_PORT \
|
||||
src/train.py \
|
||||
$TRAIN_ARGS" > "$LOG_FILE" 2>&1 &
|
||||
fi
|
||||
|
||||
NODE_RANK=$((NODE_RANK + 1))
|
||||
done
|
||||
|
||||
echo "Jobs launched. To monitor the logs, you can:"
|
||||
echo "1. Use 'tail -f $LOG_DIR/*.log' to watch all logs"
|
||||
echo "2. Use 'tail -f $LOG_DIR/<node_name>_rank<N>.log' to watch a specific node"
|
||||
|
||||
# Wait for all background processes to complete
|
||||
wait
|
||||
58
post-training/VLM-R1/run_scripts/run_grpo_gui.sh
Normal file
58
post-training/VLM-R1/run_scripts/run_grpo_gui.sh
Normal file
@@ -0,0 +1,58 @@
|
||||
PROJECT_ROOT="$( cd "$( dirname "${BASH_SOURCE[0]}" )/.." && pwd )"
|
||||
export REPO_HOME="${PROJECT_ROOT}" # TODO: change this to your own
|
||||
echo "REPO_HOME: $REPO_HOME"
|
||||
# on remote
|
||||
data_paths="${REPO_HOME}/src/open-r1-multimodal/data_jsonl/gui_multi-image.jsonl"
|
||||
image_folders="/data9/shz/project/vlm-r1/VLM-R1/images/gui_multi-image"
|
||||
model_path="/data9/shz/ckpt/Qwen2.5-VL-3B-Instruct"
|
||||
is_reward_customized_from_vlm_module=False
|
||||
reward_methods="all_match"
|
||||
echo "data_paths: $data_paths"
|
||||
echo "image_folders: $image_folders"
|
||||
|
||||
export EXP_NAME="GUI-multi-image" # TODO: change this to your own experiment name
|
||||
TASK_TYPE="gui"
|
||||
cd ${REPO_HOME}/src/open-r1-multimodal
|
||||
|
||||
export DEBUG_MODE="true" # Enable Debug if you want to see the rollout of model during RL
|
||||
# create the run directory and log file
|
||||
mkdir -p ${REPO_HOME}/runs/${EXP_NAME}/log
|
||||
export LOG_PATH="${REPO_HOME}/runs/${EXP_NAME}/log/debug_log.$(date +%Y-%m-%d-%H-%M-%S).txt"
|
||||
MAX_STEPS=1200 # TODO: change this to your own max steps
|
||||
|
||||
# export WANDB_DISABLED=true
|
||||
# CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6
|
||||
torchrun --nproc_per_node="8" \
|
||||
--nnodes="1" \
|
||||
--node_rank="0" \
|
||||
--master_addr="127.0.0.1" \
|
||||
--master_port="12349" \
|
||||
src/open_r1/grpo_jsonl.py \
|
||||
--use_vllm False \
|
||||
--output_dir ${REPO_HOME}/checkpoints/rl/${EXP_NAME} \
|
||||
--resume_from_checkpoint True \
|
||||
--model_name_or_path $model_path \
|
||||
--data_file_paths $data_paths \
|
||||
--image_folders $image_folders \
|
||||
--is_reward_customized_from_vlm_module $is_reward_customized_from_vlm_module \
|
||||
--reward_method $reward_methods \
|
||||
--task_type $TASK_TYPE \
|
||||
--per_device_train_batch_size 2 \
|
||||
--gradient_accumulation_steps 2 \
|
||||
--gradient_checkpointing true \
|
||||
--logging_steps 1 \
|
||||
--num_train_epochs 2 \
|
||||
--max_steps $MAX_STEPS \
|
||||
--bf16 \
|
||||
--attn_implementation flash_attention_2 \
|
||||
--run_name ${EXP_NAME} \
|
||||
--save_steps 400 \
|
||||
--num_generations 8 \
|
||||
--max_completion_length 2048 \
|
||||
--reward_funcs accuracy format \
|
||||
--beta 0.04 \
|
||||
--report_to wandb \
|
||||
--dataset-name not_used \
|
||||
--deepspeed ${REPO_HOME}/src/open-r1-multimodal/local_scripts/zero3.json \
|
||||
|
||||
echo "Training completed for ${EXP_NAME}"
|
||||
57
post-training/VLM-R1/run_scripts/run_grpo_rec.sh
Normal file
57
post-training/VLM-R1/run_scripts/run_grpo_rec.sh
Normal file
@@ -0,0 +1,57 @@
|
||||
PROJECT_ROOT="$( cd "$( dirname "${BASH_SOURCE[0]}" )/.." && pwd )"
|
||||
export REPO_HOME="${PROJECT_ROOT}"
|
||||
echo "REPO_HOME: $REPO_HOME"
|
||||
# Change the data_paths and image_folders to your own data
|
||||
data_paths="/training/shz/dataset/vlm-r1/rec_jsonsl_train/refcoco_train.jsonl:/training/shz/dataset/vlm-r1/rec_jsonsl_train/refcocop_train.jsonl:/training/shz/dataset/vlm-r1/rec_jsonsl_train/refcocog_train.jsonl"
|
||||
image_folders="/training/shz/dataset/coco:/training/shz/dataset/coco:/training/shz/dataset/coco"
|
||||
model_path="/training/models/Qwen2.5-VL-3B-Instruct"
|
||||
is_reward_customized_from_vlm_module=True
|
||||
echo "data_paths: $data_paths"
|
||||
echo "image_folders: $image_folders"
|
||||
|
||||
export EXP_NAME="Qwen2.5-VL-3B-Instruct-rec" # TODO: change this to your own experiment name
|
||||
TASK_TYPE="rec"
|
||||
cd ${REPO_HOME}/src/open-r1-multimodal
|
||||
|
||||
export DEBUG_MODE="true" # Enable Debug if you want to see the rollout of model during RL
|
||||
# create the run directory and log file
|
||||
mkdir -p ${REPO_HOME}/runs/${EXP_NAME}/log
|
||||
export LOG_PATH="${REPO_HOME}/runs/${EXP_NAME}/log/debug_log.$(date +%Y-%m-%d-%H-%M-%S).txt"
|
||||
# MAX_STEPS=1200 # TODO: change this to your own max steps
|
||||
|
||||
|
||||
# export WANDB_DISABLED=true
|
||||
# CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6
|
||||
torchrun --nproc_per_node="8" \
|
||||
--nnodes="1" \
|
||||
--node_rank="0" \
|
||||
--master_addr="127.0.0.1" \
|
||||
--master_port="12349" \
|
||||
src/open_r1/grpo_jsonl.py \
|
||||
--use_vllm False \
|
||||
--output_dir ${REPO_HOME}/checkpoints/rl/${EXP_NAME} \
|
||||
--resume_from_checkpoint True \
|
||||
--model_name_or_path $model_path \
|
||||
--data_file_paths $data_paths \
|
||||
--image_folders $image_folders \
|
||||
--is_reward_customized_from_vlm_module $is_reward_customized_from_vlm_module \
|
||||
--task_type $TASK_TYPE \
|
||||
--per_device_train_batch_size 8 \
|
||||
--gradient_accumulation_steps 2 \
|
||||
--gradient_checkpointing true \
|
||||
--logging_steps 1 \
|
||||
--num_train_epochs 2 \
|
||||
--bf16 \
|
||||
--attn_implementation flash_attention_2 \
|
||||
--run_name ${EXP_NAME} \
|
||||
--data_seed 42 \
|
||||
--save_steps 100 \
|
||||
--num_generations 8 \
|
||||
--max_completion_length 2048 \
|
||||
--reward_funcs accuracy format \
|
||||
--beta 0.04 \
|
||||
--report_to wandb \
|
||||
--dataset-name this_is_not_used \
|
||||
--deepspeed ${REPO_HOME}/src/open-r1-multimodal/local_scripts/zero3.json \
|
||||
|
||||
echo "Training completed for ${EXP_NAME}"
|
||||
58
post-training/VLM-R1/run_scripts/run_grpo_rec_internvl.sh
Normal file
58
post-training/VLM-R1/run_scripts/run_grpo_rec_internvl.sh
Normal file
@@ -0,0 +1,58 @@
|
||||
PROJECT_ROOT="$( cd "$( dirname "${BASH_SOURCE[0]}" )/.." && pwd )"
|
||||
export REPO_HOME="${PROJECT_ROOT}"
|
||||
echo "REPO_HOME: $REPO_HOME"
|
||||
# on remote
|
||||
data_paths="/training/shz/dataset/vlm-r1/rec_jsonsl_train/refcoco_train.jsonl:/training/shz/dataset/vlm-r1/rec_jsonsl_train/refcocop_train.jsonl:/training/shz/dataset/vlm-r1/rec_jsonsl_train/refcocog_train.jsonl"
|
||||
image_folders="/training/shz/dataset/coco:/training/shz/dataset/coco:/training/shz/dataset/coco"
|
||||
model_path="OpenGVLab/InternVL2_5-4B-MPO"
|
||||
is_reward_customized_from_vlm_module=True
|
||||
echo "data_paths: $data_paths"
|
||||
echo "image_folders: $image_folders"
|
||||
|
||||
export EXP_NAME="InternVL2_5-4B_MPO-rec" # TODO: change this to your own experiment name
|
||||
TASK_TYPE="rec"
|
||||
cd ${REPO_HOME}/src/open-r1-multimodal
|
||||
|
||||
export DEBUG_MODE="true" # Enable Debug if you want to see the rollout of model during RL
|
||||
# create the run directory and log file
|
||||
mkdir -p ${REPO_HOME}/runs/${EXP_NAME}/log
|
||||
export LOG_PATH="${REPO_HOME}/runs/${EXP_NAME}/log/debug_log.$(date +%Y-%m-%d-%H-%M-%S).txt"
|
||||
# MAX_STEPS=1200 # TODO: change this to your own max steps
|
||||
|
||||
|
||||
# export WANDB_DISABLED=true
|
||||
# CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6
|
||||
torchrun --nproc_per_node="8" \
|
||||
--nnodes="1" \
|
||||
--node_rank="0" \
|
||||
--master_addr="127.0.0.1" \
|
||||
--master_port="12349" \
|
||||
src/open_r1/grpo_jsonl.py \
|
||||
--use_vllm False \
|
||||
--output_dir ${REPO_HOME}/checkpoints/rl/${EXP_NAME} \
|
||||
--resume_from_checkpoint True \
|
||||
--model_name_or_path $model_path \
|
||||
--data_file_paths $data_paths \
|
||||
--image_folders $image_folders \
|
||||
--is_reward_customized_from_vlm_module $is_reward_customized_from_vlm_module \
|
||||
--task_type $TASK_TYPE \
|
||||
--max_anyres_num 6 \
|
||||
--per_device_train_batch_size 8 \
|
||||
--gradient_accumulation_steps 2 \
|
||||
--gradient_checkpointing true \
|
||||
--logging_steps 1 \
|
||||
--num_train_epochs 2 \
|
||||
--bf16 \
|
||||
--attn_implementation flash_attention_2 \
|
||||
--run_name ${EXP_NAME} \
|
||||
--data_seed 42 \
|
||||
--save_steps 100 \
|
||||
--num_generations 8 \
|
||||
--max_completion_length 2048 \
|
||||
--reward_funcs accuracy format \
|
||||
--beta 0.04 \
|
||||
--report_to wandb \
|
||||
--dataset-name this_is_not_used \
|
||||
--deepspeed ${REPO_HOME}/src/open-r1-multimodal/local_scripts/zero3.json \
|
||||
|
||||
echo "Training completed for ${EXP_NAME}"
|
||||
64
post-training/VLM-R1/run_scripts/run_grpo_rec_lora.sh
Normal file
64
post-training/VLM-R1/run_scripts/run_grpo_rec_lora.sh
Normal file
@@ -0,0 +1,64 @@
|
||||
PROJECT_ROOT="$( cd "$( dirname "${BASH_SOURCE[0]}" )/.." && pwd )"
|
||||
export REPO_HOME="${PROJECT_ROOT}"
|
||||
echo "REPO_HOME: $REPO_HOME"
|
||||
# on remote
|
||||
data_paths="/training/shz/dataset/vlm-r1/rec_jsonsl_train/refcoco_train.jsonl:/training/shz/dataset/vlm-r1/rec_jsonsl_train/refcocop_train.jsonl:/training/shz/dataset/vlm-r1/rec_jsonsl_train/refcocog_train.jsonl"
|
||||
image_folders="/training/shz/dataset/coco:/training/shz/dataset/coco:/training/shz/dataset/coco"
|
||||
model_path="/training/models/Qwen2.5-VL-3B-Instruct"
|
||||
is_reward_customized_from_vlm_module=True
|
||||
echo "data_paths: $data_paths"
|
||||
echo "image_folders: $image_folders"
|
||||
|
||||
export EXP_NAME="Qwen2.5-VL-3B-Instruct-rec-lora" # TODO: change this to your own experiment name
|
||||
TASK_TYPE="rec"
|
||||
cd ${REPO_HOME}/src/open-r1-multimodal
|
||||
|
||||
export DEBUG_MODE="true" # Enable Debug if you want to see the rollout of model during RL
|
||||
# create the run directory and log file
|
||||
mkdir -p ${REPO_HOME}/runs/${EXP_NAME}/log
|
||||
export LOG_PATH="${REPO_HOME}/runs/${EXP_NAME}/log/debug_log.$(date +%Y-%m-%d-%H-%M-%S).txt"
|
||||
# MAX_STEPS=1200 # TODO: change this to your own max steps
|
||||
|
||||
|
||||
# export WANDB_DISABLED=true
|
||||
# CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6
|
||||
torchrun --nproc_per_node="8" \
|
||||
--nnodes="1" \
|
||||
--node_rank="0" \
|
||||
--master_addr="127.0.0.1" \
|
||||
--master_port="12349" \
|
||||
src/open_r1/grpo_jsonl.py \
|
||||
--use_vllm False \
|
||||
--output_dir ${REPO_HOME}/checkpoints/rl/${EXP_NAME} \
|
||||
--resume_from_checkpoint True \
|
||||
--model_name_or_path $model_path \
|
||||
--data_file_paths $data_paths \
|
||||
--image_folders $image_folders \
|
||||
--is_reward_customized_from_vlm_module $is_reward_customized_from_vlm_module \
|
||||
--task_type $TASK_TYPE \
|
||||
--per_device_train_batch_size 8 \
|
||||
--gradient_accumulation_steps 2 \
|
||||
--gradient_checkpointing true \
|
||||
--logging_steps 1 \
|
||||
--num_train_epochs 2 \
|
||||
--bf16 \
|
||||
--attn_implementation flash_attention_2 \
|
||||
--run_name ${EXP_NAME} \
|
||||
--data_seed 42 \
|
||||
--save_steps 100 \
|
||||
--num_generations 8 \
|
||||
--max_completion_length 2048 \
|
||||
--reward_funcs accuracy format \
|
||||
--beta 0.04 \
|
||||
--report_to wandb \
|
||||
--dataset-name this_is_not_used \
|
||||
--deepspeed ${REPO_HOME}/src/open-r1-multimodal/local_scripts/zero2.json \
|
||||
--learning_rate 1e-5 \
|
||||
--use_peft true \
|
||||
--lora_r 64 \
|
||||
--lora_alpha 128 \
|
||||
--lora_dropout 0.05 \
|
||||
--lora_task_type CAUSAL_LM \
|
||||
--freeze_vision_modules true
|
||||
|
||||
echo "Training completed for ${EXP_NAME}"
|
||||
60
post-training/VLM-R1/run_scripts/run_grpo_rec_more_params.sh
Normal file
60
post-training/VLM-R1/run_scripts/run_grpo_rec_more_params.sh
Normal file
@@ -0,0 +1,60 @@
|
||||
PROJECT_ROOT="$( cd "$( dirname "${BASH_SOURCE[0]}" )/.." && pwd )"
|
||||
export REPO_HOME="${PROJECT_ROOT}"
|
||||
echo "REPO_HOME: $REPO_HOME"
|
||||
# on remote
|
||||
data_paths="/training/shz/dataset/vlm-r1/rec_jsonsl_train/refcoco_train.jsonl:/training/shz/dataset/vlm-r1/rec_jsonsl_train/refcocop_train.jsonl:/training/shz/dataset/vlm-r1/rec_jsonsl_train/refcocog_train.jsonl"
|
||||
image_folders="/training/shz/dataset/coco:/training/shz/dataset/coco:/training/shz/dataset/coco"
|
||||
model_path="/training/models/Qwen2.5-VL-3B-Instruct"
|
||||
is_reward_customized_from_vlm_module=True
|
||||
echo "data_paths: $data_paths"
|
||||
echo "image_folders: $image_folders"
|
||||
|
||||
export EXP_NAME="Qwen2.5-VL-3B-Instruct-rec-more-params" # TODO: change this to your own experiment name
|
||||
TASK_TYPE="rec"
|
||||
cd ${REPO_HOME}/src/open-r1-multimodal
|
||||
|
||||
export DEBUG_MODE="true" # Enable Debug if you want to see the rollout of model during RL
|
||||
# create the run directory and log file
|
||||
mkdir -p ${REPO_HOME}/runs/${EXP_NAME}/log
|
||||
export LOG_PATH="${REPO_HOME}/runs/${EXP_NAME}/log/debug_log.$(date +%Y-%m-%d-%H-%M-%S).txt"
|
||||
# MAX_STEPS=1200 # TODO: change this to your own max steps
|
||||
|
||||
|
||||
# export WANDB_DISABLED=true
|
||||
# CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6
|
||||
torchrun --nproc_per_node="8" \
|
||||
--nnodes="1" \
|
||||
--node_rank="0" \
|
||||
--master_addr="127.0.0.1" \
|
||||
--master_port="12349" \
|
||||
src/open_r1/grpo_jsonl.py \
|
||||
--use_vllm False \
|
||||
--output_dir ${REPO_HOME}/checkpoints/rl/${EXP_NAME} \
|
||||
--resume_from_checkpoint True \
|
||||
--model_name_or_path $model_path \
|
||||
--data_file_paths $data_paths \
|
||||
--image_folders $image_folders \
|
||||
--is_reward_customized_from_vlm_module $is_reward_customized_from_vlm_module \
|
||||
--task_type $TASK_TYPE \
|
||||
--per_device_train_batch_size 8 \
|
||||
--gradient_accumulation_steps 2 \
|
||||
--gradient_checkpointing true \
|
||||
--logging_steps 1 \
|
||||
--num_train_epochs 2 \
|
||||
--bf16 \
|
||||
--attn_implementation flash_attention_2 \
|
||||
--run_name ${EXP_NAME} \
|
||||
--data_seed 42 \
|
||||
--save_steps 100 \
|
||||
--num_generations 8 \
|
||||
--max_completion_length 2048 \
|
||||
--reward_funcs accuracy format \
|
||||
--beta 0.04 \
|
||||
--epsilon_high 0.28 \
|
||||
--report_to wandb \
|
||||
--dataset-name this_is_not_used \
|
||||
--deepspeed ${REPO_HOME}/src/open-r1-multimodal/local_scripts/zero3.json \
|
||||
|
||||
# epsilon_high is the additional parameter compared to the general grpo training script
|
||||
|
||||
echo "Training completed for ${EXP_NAME}"
|
||||
Reference in New Issue
Block a user