From 1a7cf421b92e5d4725a2ccbabc40d515e27c60b1 Mon Sep 17 00:00:00 2001 From: Taishi-N324 Date: Mon, 14 Oct 2024 22:08:54 +0900 Subject: [PATCH 1/8] Add v3-moe-recipes-sakura --- .../v3-moe-recipes-sakura/README.md | 46 +++++++ .../example/checkpoint_init.py | 31 +++++ .../v3-moe-recipes-sakura/example/config.json | 31 +++++ .../v3-moe-recipes-sakura/example/sbatch.sh | 125 ++++++++++++++++++ .../v3-moe-recipes-sakura/example/train.sh | 113 ++++++++++++++++ .../v3-moe-recipes-sakura/install.sh | 117 ++++++++++++++++ .../v3-moe-recipes-sakura/requirements.txt | 34 +++++ .../scripts/environment.sh | 25 ++++ .../scripts/mpi_variables.sh | 56 ++++++++ 9 files changed, 578 insertions(+) create mode 100644 pretrain/installers/v3-moe-recipes-sakura/README.md create mode 100644 pretrain/installers/v3-moe-recipes-sakura/example/checkpoint_init.py create mode 100644 pretrain/installers/v3-moe-recipes-sakura/example/config.json create mode 100644 pretrain/installers/v3-moe-recipes-sakura/example/sbatch.sh create mode 100644 pretrain/installers/v3-moe-recipes-sakura/example/train.sh create mode 100644 pretrain/installers/v3-moe-recipes-sakura/install.sh create mode 100644 pretrain/installers/v3-moe-recipes-sakura/requirements.txt create mode 100644 pretrain/installers/v3-moe-recipes-sakura/scripts/environment.sh create mode 100644 pretrain/installers/v3-moe-recipes-sakura/scripts/mpi_variables.sh diff --git a/pretrain/installers/v3-moe-recipes-sakura/README.md b/pretrain/installers/v3-moe-recipes-sakura/README.md new file mode 100644 index 00000000..5a9850fe --- /dev/null +++ b/pretrain/installers/v3-moe-recipes-sakura/README.md @@ -0,0 +1,46 @@ +# moe-recipes installation script for Sakura/LLM-jp v3 models + +LLM-jp v3シリーズでのMoE実験を行うためのmoe-recipes環境をSakuraクラスタにインストールするためのスクリプトです。 +System Pythonやpyenvに依存しない閉じた環境を指定したディレクトリ上に構築します。 + +## Usage + +### Build + +インストール処理のためにSakuraクラスタのCPUノードを1個使用します。 +時間がかかるので気長に待って下さい。 + +```shell +git clone https://github.com/llm-jp/scripts +cd pretrain/installers/v3-moe-recipes-sakura + +# ~/myspace に環境をインストールします。 +sbatch install.sh ~/myspace +``` + +### Check + +インストール終了後、下記のようなディレクトリ構造が構築されています。 + +``` +~/myspace/ + example/ サンプルスクリプト + installer_envvar.log インストール開始後に記録した環境変数の一覧 + install.sh 使用したインストールスクリプト + python/ Python実行環境 + requirements.txt venvに事前インストールされたライブラリ一覧 + scripts/ 各種の環境設定用スクリプト + src/ 個別ダウンロードされたライブラリ + venv/ Python仮想環境 (python/ にリンク) +``` + +インストールした環境で正常に事前学習ジョブを起動できるかどうかを確認します。 + +```shell +cd ~/myspace + +# デフォルトでは1ノードを専有し、GPUを8枚全て使うジョブが起動します。 +sbatch example/sbatch.sh + +# W&Bにtrain lossが記録されるのを確認したらジョブを止めてください。 +``` diff --git a/pretrain/installers/v3-moe-recipes-sakura/example/checkpoint_init.py b/pretrain/installers/v3-moe-recipes-sakura/example/checkpoint_init.py new file mode 100644 index 00000000..108ede51 --- /dev/null +++ b/pretrain/installers/v3-moe-recipes-sakura/example/checkpoint_init.py @@ -0,0 +1,31 @@ +from transformers import AutoConfig, AutoModelForCausalLM +import os + +def save_model_and_config(model_name, save_directory): + # Check if the directory already exists + if os.path.exists(save_directory): + print(f"Directory {save_directory} already exists. Skipping model saving.") + return + + # Load config + config = AutoConfig.from_pretrained(model_name) + print(f"Config loaded from {model_name}") + + # Create model from config + model = AutoModelForCausalLM.from_config(config) + print("Model created from config") + + # Create save directory + os.makedirs(save_directory) + + # Save model and config + model.save_pretrained(save_directory) + config.save_pretrained(save_directory) + + print(f"Model and config have been saved to {save_directory}") + +if __name__ == "__main__": + model_name = "example/config.json" + save_directory = "Mixtral-llm-jp-v3-8x1.8B-checkpoint_init/" + + save_model_and_config(model_name, save_directory) \ No newline at end of file diff --git a/pretrain/installers/v3-moe-recipes-sakura/example/config.json b/pretrain/installers/v3-moe-recipes-sakura/example/config.json new file mode 100644 index 00000000..0899b3a6 --- /dev/null +++ b/pretrain/installers/v3-moe-recipes-sakura/example/config.json @@ -0,0 +1,31 @@ +{ + "_name_or_path": "", + "architectures": [ + "MixtralForCausalLM" + ], + "attention_dropout": 0.0, + "bos_token_id": 1, + "eos_token_id": 2, + "hidden_act": "silu", + "hidden_size": 2048, + "initializer_range": 0.02, + "intermediate_size": 7168, + "max_position_embeddings": 4096, + "model_type": "mixtral", + "num_attention_heads": 16, + "num_experts_per_tok": 2, + "num_hidden_layers": 24, + "num_key_value_heads": 16, + "num_local_experts": 8, + "output_router_logits": true, + "rms_norm_eps": 1e-05, + "rope_theta": 10000.0, + "router_aux_loss_coef": 0.01, + "router_jitter_noise": 0.0, + "sliding_window": null, + "tie_word_embeddings": false, + "torch_dtype": "bfloat16", + "transformers_version": "4.46.0.dev0", + "use_cache": false, + "vocab_size": 99574 + } \ No newline at end of file diff --git a/pretrain/installers/v3-moe-recipes-sakura/example/sbatch.sh b/pretrain/installers/v3-moe-recipes-sakura/example/sbatch.sh new file mode 100644 index 00000000..7c1f073a --- /dev/null +++ b/pretrain/installers/v3-moe-recipes-sakura/example/sbatch.sh @@ -0,0 +1,125 @@ +#!/bin/bash +# +# Example sbatch launcher script of pretraining tasks. +# +# This script only constructs cluster-related environment variables, and immediately +# calls mpirun with train.sh, which implements an actual invocation of the Megatron-LM +# trainer script. +# +# This script is installed together with other tools so that you can check if the +# installed environment works as expected by launching the job using this script. +# +# Usage: +# 1. cd {root directory that you installed training scripts} +# 2. sbatch example/sbatch.sh + +#SBATCH --job-name=pretrain-test +#SBATCH --partition=gpu-debug +#SBATCH --nodes=1 +#SBATCH --gres=gpu:8 +#SBATCH --ntasks-per-node=8 +#SBATCH --output=%x-%j.out +#SBATCH --error=%x-%j.err + +set -eu -o pipefail + +source scripts/environment.sh +source venv/bin/activate + +# CUTLASS +CUTLASS_HOME=src/cutlass/build +export LD_LIBRARY_PATH=${LD_LIBRARY_PATH}:${CUTLASS_HOME}/lib + +export MASTER_ADDR="$(scontrol show hostname $SLURM_JOB_NODELIST | head -n1)" +export MASTER_PORT=12800 + +echo "MASTER_ADDR=${MASTER_ADDR}" + +NUM_NODES=$SLURM_JOB_NUM_NODES +NUM_GPUS_PER_NODE=$(echo $SLURM_TASKS_PER_NODE | cut -d '(' -f 1) +NUM_GPUS=$((${NUM_NODES} * ${NUM_GPUS_PER_NODE})) + +echo NUM_NODES=$NUM_NODES +echo NUM_GPUS_PER_NODE=$NUM_GPUS_PER_NODE +echo NUM_GPUS=$NUM_GPUS + +MICRO_BATCH_SIZE=4 +GLOBAL_BATCH_SIZE=1024 +GRADIENTS_ACCUMULATION_STEPS=$((GLOBAL_BATCH_SIZE / MICRO_BATCH_SIZE / NUM_GPUS)) + +if [ $GRADIENTS_ACCUMULATION_STEPS -lt 1 ]; then + echo "Global batch size is too small for the number of GPUs" + exit 1 +fi + +WEIGHT_DECAY=0.1 +GRAD_CLIP=1 + +# deepspeed config +DEEPSPEED_CONFIG="deepspeed_config.json" + +BF16_ENABLED=true +DEEPSPEED_ZERO_STAGE=3 + +OVERLAP_COMMUNICATION=true +CONTINOUS_GRADIENTS=true + +DEEPSPEED_SUB_GROUP_SIZE=1e12 +DEEPSPEED_REDUCE_BUCKET_SIZE=1e9 +DEEPSPEED_STAGE3_PREFETCH_BUCKET_SIZE=5e8 +DEEPSPEED_STAGE3_PARAM_PERSISTENCE_THRESHOLD=1e6 + +DEEPSPEED_STAGE3_MAX_LIVE_PARAMETERS=1e9 +DEEPSPEED_STAGE3_MAX_REUSE_DISTANCE=1e9 + +WALL_CLOCK_BREAKDOWN=false + +DEEPSPEED_CONGIG_CONTENT=$( + cat <"src/moe-recipes/${DEEPSPEED_CONFIG}" + +# Initialization +python example/checkpoint_init.py + +mpirun \ + -np $NUM_GPUS \ + --npernode $NUM_GPUS_PER_NODE \ + -bind-to none \ + -map-by slot \ + -x MASTER_ADDR=$MASTER_ADDR \ + -x MASTER_PORT=$MASTER_PORT \ + -x NUM_NODES=$NUM_NODES \ + -x NUM_GPUS_PER_NODE=$NUM_GPUS_PER_NODE \ + -x GRADIENTS_ACCUMULATION_STEPS=$GRADIENTS_ACCUMULATION_STEPS \ + -x MICRO_BATCH_SIZE=$MICRO_BATCH_SIZE \ + -x GLOBAL_BATCH_SIZE=$GLOBAL_BATCH_SIZE \ + -x DEEPSPEED_CONFIG=$DEEPSPEED_CONFIG \ + -x DEEPSPEED_ZERO_STAGE=$DEEPSPEED_ZERO_STAGE \ + bash example/train.sh diff --git a/pretrain/installers/v3-moe-recipes-sakura/example/train.sh b/pretrain/installers/v3-moe-recipes-sakura/example/train.sh new file mode 100644 index 00000000..4a3f930e --- /dev/null +++ b/pretrain/installers/v3-moe-recipes-sakura/example/train.sh @@ -0,0 +1,113 @@ +#!/bin/bash +# Node-level moe-recipes launcher +# +# Environment variables that the script expects to be passed from mpirun: +# * MASTER_ADDR: Address of the master node +# * MASTER_PORT: Port number of the master node +# * NUM_NODES: Number of nodes assigned for this task +# * NUM_GPUS_PER_NODE: Number of GPUs in the node assined for this task + +set -eu -o pipefail + +source scripts/environment.sh +source scripts/mpi_variables.sh +source venv/bin/activate + +export LOGLEVEL=INFO +export NCCL_DEBUG=WARN +export NCCL_DEBUG_SUBSYS=WARN +export PYTHONFAULTHANDLER=1 +export CUDA_LAUNCH_BLOCKING=0 +export CUDNN_LOGDEST_DBG=stderr +export CUDNN_LOGERR_DBG=1 + +NUM_GPUS=$((${NUM_NODES} * ${NUM_GPUS_PER_NODE})) + +# training config +SEQ_LENGTH=4096 +SLIDING_WINDOW_SIZE=4096 +DATA_PARALLEL_SIZE=$NUM_GPUS + +if [ $GRADIENTS_ACCUMULATION_STEPS -lt 1 ]; then + echo "Global batch size is too small for the number of GPUs" + exit 1 +fi + +TRAIN_STEPS=5000 + +# optimizer config +LR=3e-4 +MIN_LR=3e-5 +LR_WARMUP_STEPS=1000 +LR_DECAY_STEPS=5000 +WEIGHT_DECAY=0.1 +GRAD_CLIP=1 + +ADAMW_BETA1=0.9 +ADAMW_BETA2=0.95 +ADAMW_EPS=1E-8 + +# model config +CURRENT_DIR=$(pwd) +TOKENIZER_MODEL=$CURRENT_DIR/src/llm-jp-tokenizer/models/ver3.0/llm-jp-tokenizer-100k.ver3.0b1.model + +if [ ! -f "$TOKENIZER_MODEL" ]; then + echo "Error: Tokenizer model not found at $TOKENIZER_MODEL" + echo "Current directory: $CURRENT_DIR" + echo "Please check the path and ensure the file exists." + exit 1 +fi + +CHECKPOINT_DIR=$CURRENT_DIR/Mixtral-llm-jp-v3-8x1.8B-checkpoint_init/ +CHECKPOINT_SAVE_DIR=checkpoints + +mkdir -p ${CHECKPOINT_SAVE_DIR} + +# data config +DATASET_DIR=/data/llm-jp-corpus/v3.0.0/training_resharded_tokenize_ver3.0 +DATA_PATH="" +DATA_PATH="${DATA_PATH} 2563804308 ${DATASET_DIR}/train/ja/wiki_0000.jsonl_text_document" + +# job name +JOB_NAME="test-$(whoami)" + +cd src/moe-recipes/ +python examples/finetuning.py \ + --seq-length ${SEQ_LENGTH} \ + --sliding-window-size ${SLIDING_WINDOW_SIZE} \ + --micro-batch-size ${MICRO_BATCH_SIZE} \ + --global-batch-size ${GLOBAL_BATCH_SIZE} \ + --train-iters ${TRAIN_STEPS} \ + --tokenizer-type Llama2Tokenizer \ + --tokenizer-model ${TOKENIZER_MODEL} \ + --data-path ${DATA_PATH} \ + --split 99999,1,0 \ + --lr ${LR} \ + --min-lr ${MIN_LR} \ + --lr-decay-style cosine \ + --lr-warmup-iters ${LR_WARMUP_STEPS} \ + --lr-decay-iters ${LR_DECAY_STEPS} \ + --weight-decay ${WEIGHT_DECAY} \ + --grad-clip-norm ${GRAD_CLIP} \ + --optimizer adam \ + --adam-beta1 $ADAMW_BETA1 \ + --adam-beta2 $ADAMW_BETA2 \ + --adam-eps $ADAMW_EPS \ + --save-interval 500 \ + --eval-interval 1000000000 \ + --eval-iters 1 \ + --bf16 \ + --mixed-precision \ + --base-model ${CHECKPOINT_DIR} \ + --save ${CHECKPOINT_SAVE_DIR} \ + --load ${CHECKPOINT_SAVE_DIR} \ + --use-zero \ + --zero-config ${DEEPSPEED_CONFIG} \ + --zero-stage ${DEEPSPEED_ZERO_STAGE} \ + --no-meta-device \ + --output-router-logits \ + --use-mpi \ + --continual-pretraining \ + --wandb-entity "llm-jp" \ + --wandb-project "sakura-test-moe" \ + --wandb-name "${JOB_NAME}" \ No newline at end of file diff --git a/pretrain/installers/v3-moe-recipes-sakura/install.sh b/pretrain/installers/v3-moe-recipes-sakura/install.sh new file mode 100644 index 00000000..6efd9ec7 --- /dev/null +++ b/pretrain/installers/v3-moe-recipes-sakura/install.sh @@ -0,0 +1,117 @@ +#!/bin/bash +# +# moe-recipes installation script for pretrain jobs on the Sakura cluster +# +# Usage: +# 1. Set the working directory to the directory this file is located. +# 2. Run `sbatch install.sh TARGET_DIR` with setting TARGET_DIR to the actual path. +# +# This script consumes 1 node on the `cpu` partition on the cluster. +# +# CAUTION: +# DO NOT change the content of this file and any other materials in the installer +# directory while the installation is being processed. + +#SBATCH --job-name=pretrain-install +#SBATCH --partition=cpu +#SBATCH --exclusive +#SBATCH --nodes=1 +#SBATCH --ntasks-per-node=1 +#SBATCH --mem=0 +#SBATCH --output=%x-%j.out +#SBATCH --error=%x-%j.err + +set -eux -o pipefail + +if [ $# -ne 1 ]; then + >&2 echo Usage: sbatch install.sh TARGET_DIR + exit 1 +fi + +INSTALLER_DIR=$(pwd) +TARGET_DIR=$1; shift + +>&2 echo INSTALLER_DIR=$INSTALLER_DIR +>&2 echo TARGET_DIR=$TARGET_DIR + +mkdir ${TARGET_DIR} +pushd ${TARGET_DIR} + +# copy basic scripts +# cp -a ${INSTALLER_DIR}/{install.sh,requirements.txt,scripts,example} . +cp -a ${INSTALLER_DIR}/{install.sh,requirements.txt,scripts,example} . + +source scripts/environment.sh + +# record current environment variables +set > installer_envvar.log + +# src is used to store all resources for from-scratch builds +mkdir src +pushd src + +# install Python +git clone https://github.com/python/cpython -b v${PRETRAIN_PYTHON_VERSION} +pushd cpython +./configure --prefix="${TARGET_DIR}/python" --enable-optimizations +make -j 64 +make install +popd + +popd # src + +# prepare venv +python/bin/python3 -m venv venv +source venv/bin/activate +python -m pip install --no-cache-dir -U pip + +# install PyTorch +python -m pip install \ + --no-cache-dir \ + --find-links https://download.pytorch.org/whl/torch_stable.html \ + torch==${PRETRAIN_TORCH_VERSION}+cu${PRETRAIN_CUDA_VERSION_SHORT} \ + torchvision==${PRETRAIN_TORCHVISION_VERSION}+cu${PRETRAIN_CUDA_VERSION_SHORT} + +# install Transformers +git clone https://github.com/llm-jp/transformers -b ${PRETRAIN_TRANSFORMERS_TAG} +pushd transformers +python -m pip install -e . +popd + +# install other requirements +python -m pip install --no-cache-dir -U -r requirements.txt + +# install for flash attention +python -m pip install flash-attn=="${PRETRAIN_FLASH_ATTENTION_VERSION}" --no-build-isolation + +pushd src + +# download our moe-recipes and build helper library +git clone https://github.com/llm-jp/moe-recipes.git -b ${PRETRAIN_MOE_RECIPES_TAG} +pushd moe-recipes/megatron_lm/megatron/core/datasets/ +# NOTE(odashi): +# Original makefile in the above directory uses the system's (or pyenv's) python3-config. +# But we need to invoke python3-config installed on our target directory. +MEGATRON_HELPER_CPPFLAGS=( + -O3 -Wall -shared -std=c++11 -fPIC -fdiagnostics-color + $(python -m pybind11 --includes) +) +MEGATRON_HELPER_EXT=$(${TARGET_DIR}/python/bin/python3-config --extension-suffix) +g++ ${MEGATRON_HELPER_CPPFLAGS[@]} helpers.cpp -o helpers${MEGATRON_HELPER_EXT} +popd + +# download our tokeniser +# Tokenizer +git clone https://github.com/llm-jp/llm-jp-tokenizer -b ${PRETRAIN_TOKENIZER_TAG} + +# Clone and build CUTLASS +git clone https://github.com/NVIDIA/cutlass.git +pushd cutlass +export CUDACXX=${CUDA_HOME}/bin/nvcc +mkdir build && pushd build +cmake .. -DCUTLASS_NVCC_ARCHS=90 +make -j $(nproc) +popd # build +popd # cutlass +popd # src +popd # ${TARGET_DIR} diff --git a/pretrain/installers/v3-moe-recipes-sakura/requirements.txt b/pretrain/installers/v3-moe-recipes-sakura/requirements.txt new file mode 100644 index 00000000..2e7925b2 --- /dev/null +++ b/pretrain/installers/v3-moe-recipes-sakura/requirements.txt @@ -0,0 +1,34 @@ +# huggingface +datasets==3.0.1 +accelerate==1.0.1 +optimum==1.23.1 +peft==0.13.2 + +appdirs==1.4.4 +loralib==0.1.2 +scipy==1.14.1 +py7zr==0.22.0 +bitsandbytes==0.44.1 +fire==0.7.0 + +black==24.10.0 +flake8==7.1.1 + +# tokenizer +sentencepiece==0.2.0 + +# logging +wandb==0.18.3 + +# multi node +deepspeed==0.15.2 +mpi4py==4.0.1 + +# megatron-lm +nltk==3.9.1 +pybind11==2.13.6 + +# fa2 +ninja==1.11.1.1 +packaging==24.1 +wheel==0.44.0 \ No newline at end of file diff --git a/pretrain/installers/v3-moe-recipes-sakura/scripts/environment.sh b/pretrain/installers/v3-moe-recipes-sakura/scripts/environment.sh new file mode 100644 index 00000000..d1cd3cce --- /dev/null +++ b/pretrain/installers/v3-moe-recipes-sakura/scripts/environment.sh @@ -0,0 +1,25 @@ +#!/bin/bash +# List of environment variables and module loads for pretrain tasks + +export PRETRAIN_CUDA_VERSION_MAJOR=12 +export PRETRAIN_CUDA_VERSION_MINOR=1 +export PRETRAIN_CUDA_VERSION=${PRETRAIN_CUDA_VERSION_MAJOR}.${PRETRAIN_CUDA_VERSION_MINOR} +export PRETRAIN_CUDA_VERSION_SHORT=${PRETRAIN_CUDA_VERSION_MAJOR}${PRETRAIN_CUDA_VERSION_MINOR} +export PRETRAIN_CUDNN_VERSION=8.9.4 +export PRETRAIN_HPCX_VERSION=2.17.1 +export PRETRAIN_NCCL_VERSION=2.20.5 + +export PRETRAIN_PYTHON_VERSION=3.10.14 +export PRETRAIN_TORCH_VERSION=2.3.1 +export PRETRAIN_TORCHVISION_VERSION=0.18.1 +export PRETRAIN_FLASH_ATTENTION_VERSION=2.6.3 +export PRETRAIN_TRANSFORMERS_TAG=feature/layer-wise-load-balance-loss +export PRETRAIN_MOE_RECIPES_TAG=sakura +# Ensure the appropriate Huggingface tokenizer is included +# https://github.com/llm-jp/scripts/pull/12#discussion_r1708415209 +export PRETRAIN_TOKENIZER_TAG=v3.0b2 + +module load cuda/${PRETRAIN_CUDA_VERSION} +module load /data/cudnn-tmp-install/modulefiles/${PRETRAIN_CUDNN_VERSION} +module load hpcx/${PRETRAIN_HPCX_VERSION}-gcc-cuda${PRETRAIN_CUDA_VERSION_MAJOR}/hpcx +module load nccl/${PRETRAIN_NCCL_VERSION} diff --git a/pretrain/installers/v3-moe-recipes-sakura/scripts/mpi_variables.sh b/pretrain/installers/v3-moe-recipes-sakura/scripts/mpi_variables.sh new file mode 100644 index 00000000..68803993 --- /dev/null +++ b/pretrain/installers/v3-moe-recipes-sakura/scripts/mpi_variables.sh @@ -0,0 +1,56 @@ +#!/bin/bash + +# See manual's "4-3 OpenMPI / UCX / NCCL 環境変数パラメータ" section for detail + +if [[ $HOSTNAME =~ ^a[0-9]{3}$ ]]; then + # Settings for GPU cluster A + + # MPI settings + export OMPI_MCA_btl_tcp_if_include="10.1.0.0/16,10.2.0.0/16,10.3.0.0/16,10.4.0.0/16" + + # UCX settings + export UCX_NET_DEVICES="mlx5_0:1,mlx5_1:1,mlx5_4:1,mlx5_5:1" + export UCX_MAX_EAGER_RAILS=4 + export UCX_MAX_RNDV_RAILS=4 + export UCX_IB_GPU_DIRECT_RDMA=1 + + # NCCL settings + export NCCL_IB_ADDR_RANGE="10.1.0.0/16,10.2.0.0/16,10.3.0.0/16,10.4.0.0/16" + export NCCL_IB_GID_INDEX=3 # Set gid = 3 to use RoCE v2 (not necessary) + export NCCL_IB_HCA="mlx5_0:1,mlx5_1:1,mlx5_4:1,mlx5_5:1" + export NCCL_IB_PCI_RELAXED_ORDERING=1 + export NCCL_IB_TC=106 +elif [[ $HOSTNAME =~ ^b[0-9]{3}$ ]]; then + # Settings for GPU cluster B + + # MPI settings + export OMPI_MCA_btl_tcp_if_include="10.5.0.0/16,10.6.0.0/16,10.7.0.0/16,10.8.0.0/16" + + # UCX settings + export UCX_NET_DEVICES="mlx5_0:1,mlx5_1:1,mlx5_4:1,mlx5_5:1" + export UCX_MAX_EAGER_RAILS=4 + export UCX_MAX_RNDV_RAILS=4 + export UCX_IB_GPU_DIRECT_RDMA=1 + + # NCCL settings + export NCCL_IB_ADDR_RANGE="10.5.0.0/16,10.6.0.0/16,10.7.0.0/16,10.8.0.0/16" + export NCCL_IB_GID_INDEX=3 # Set gid = 3 to use RoCE v2 (not necessary) + export NCCL_IB_HCA="mlx5_0:1,mlx5_1:1,mlx5_4:1,mlx5_5:1" + export NCCL_IB_PCI_RELAXED_ORDERING=1 + export NCCL_IB_TC=106 +elif [[ $HOSTNAME =~ ^c[0-9]{3}$ ]]; then + # Settings for CPU cluster + + # MPI settings + export OMPI_MCA_btl_tcp_if_include="10.1.0.0/16" + + # UCX settings + export UCX_NET_DEVICES="mlx5_0:1,mlx5_1:1" + export UCX_MAX_EAGER_RAILS=2 + export UCX_MAX_RNDV_RAILS=2 +else + # If executed on not supported environment (e.g. login nodes), + # exit with error + echo "$0: line $LINENO: hostname ($HOSTNAME) is not supported" 1>&2 + exit 1 +fi From 193f5c4144d521c7ae9d825178db08f7503a8ef2 Mon Sep 17 00:00:00 2001 From: Taishi-N324 Date: Mon, 14 Oct 2024 23:50:44 +0900 Subject: [PATCH 2/8] Fixing seed and adding explanations for environmental variables --- .../example/checkpoint_init.py | 22 +++++++++++++++++-- .../v3-moe-recipes-sakura/example/sbatch.sh | 7 ++++++ .../v3-moe-recipes-sakura/example/train.sh | 15 ++++++++----- .../v3-moe-recipes-sakura/install.sh | 3 +-- .../scripts/environment.sh | 1 + 5 files changed, 38 insertions(+), 10 deletions(-) diff --git a/pretrain/installers/v3-moe-recipes-sakura/example/checkpoint_init.py b/pretrain/installers/v3-moe-recipes-sakura/example/checkpoint_init.py index 108ede51..2c1ff7b8 100644 --- a/pretrain/installers/v3-moe-recipes-sakura/example/checkpoint_init.py +++ b/pretrain/installers/v3-moe-recipes-sakura/example/checkpoint_init.py @@ -1,7 +1,24 @@ -from transformers import AutoConfig, AutoModelForCausalLM import os +import random + +import numpy as np +import torch +from transformers import AutoConfig, AutoModelForCausalLM + + +def set_seed(seed: int) -> None: + random.seed(seed) + np.random.seed(seed) + torch.cuda.manual_seed(seed) + torch.manual_seed(seed) + torch.backends.cudnn.deterministic = True + torch.backends.cudnn.benchmark = False + def save_model_and_config(model_name, save_directory): + + set_seed(1234) + # Check if the directory already exists if os.path.exists(save_directory): print(f"Directory {save_directory} already exists. Skipping model saving.") @@ -24,8 +41,9 @@ def save_model_and_config(model_name, save_directory): print(f"Model and config have been saved to {save_directory}") + if __name__ == "__main__": model_name = "example/config.json" save_directory = "Mixtral-llm-jp-v3-8x1.8B-checkpoint_init/" - save_model_and_config(model_name, save_directory) \ No newline at end of file + save_model_and_config(model_name, save_directory) diff --git a/pretrain/installers/v3-moe-recipes-sakura/example/sbatch.sh b/pretrain/installers/v3-moe-recipes-sakura/example/sbatch.sh index 7c1f073a..3b609ade 100644 --- a/pretrain/installers/v3-moe-recipes-sakura/example/sbatch.sh +++ b/pretrain/installers/v3-moe-recipes-sakura/example/sbatch.sh @@ -108,11 +108,17 @@ echo "$DEEPSPEED_CONGIG_CONTENT" >"src/moe-recipes/${DEEPSPEED_CONFIG}" # Initialization python example/checkpoint_init.py +# Path to tokenizer and initialization checkpoint +CURRENT_DIR=$(pwd) + mpirun \ -np $NUM_GPUS \ --npernode $NUM_GPUS_PER_NODE \ -bind-to none \ -map-by slot \ + -x TORCH_NCCL_ASYNC_ERROR_HANDLING=1 \ + -x LD_LIBRARY_PATH \ + -x PATH \ -x MASTER_ADDR=$MASTER_ADDR \ -x MASTER_PORT=$MASTER_PORT \ -x NUM_NODES=$NUM_NODES \ @@ -122,4 +128,5 @@ mpirun \ -x GLOBAL_BATCH_SIZE=$GLOBAL_BATCH_SIZE \ -x DEEPSPEED_CONFIG=$DEEPSPEED_CONFIG \ -x DEEPSPEED_ZERO_STAGE=$DEEPSPEED_ZERO_STAGE \ + -x CURRENT_DIR=$CURRENT_DIR \ bash example/train.sh diff --git a/pretrain/installers/v3-moe-recipes-sakura/example/train.sh b/pretrain/installers/v3-moe-recipes-sakura/example/train.sh index 4a3f930e..40d32db5 100644 --- a/pretrain/installers/v3-moe-recipes-sakura/example/train.sh +++ b/pretrain/installers/v3-moe-recipes-sakura/example/train.sh @@ -2,10 +2,19 @@ # Node-level moe-recipes launcher # # Environment variables that the script expects to be passed from mpirun: +# * TORCH_NCCL_ASYNC_ERROR_HANDLING: Enable/disable async error handling for NCCL (1 for enable, 0 for disable) +# * LD_LIBRARY_PATH: Library path for dynamic linking +# * PATH: System path for executable files # * MASTER_ADDR: Address of the master node # * MASTER_PORT: Port number of the master node # * NUM_NODES: Number of nodes assigned for this task # * NUM_GPUS_PER_NODE: Number of GPUs in the node assined for this task +# * GRADIENTS_ACCUMULATION_STEPS: Number of gradient accumulation steps +# * MICRO_BATCH_SIZE: Micro batch size for training +# * GLOBAL_BATCH_SIZE: Global batch size for training +# * DEEPSPEED_CONFIG: Path to DeepSpeed configuration file +# * DEEPSPEED_ZERO_STAGE: DeepSpeed ZeRO stage +# * CURRENT_DIR: Current working directory set -eu -o pipefail @@ -28,11 +37,6 @@ SEQ_LENGTH=4096 SLIDING_WINDOW_SIZE=4096 DATA_PARALLEL_SIZE=$NUM_GPUS -if [ $GRADIENTS_ACCUMULATION_STEPS -lt 1 ]; then - echo "Global batch size is too small for the number of GPUs" - exit 1 -fi - TRAIN_STEPS=5000 # optimizer config @@ -48,7 +52,6 @@ ADAMW_BETA2=0.95 ADAMW_EPS=1E-8 # model config -CURRENT_DIR=$(pwd) TOKENIZER_MODEL=$CURRENT_DIR/src/llm-jp-tokenizer/models/ver3.0/llm-jp-tokenizer-100k.ver3.0b1.model if [ ! -f "$TOKENIZER_MODEL" ]; then diff --git a/pretrain/installers/v3-moe-recipes-sakura/install.sh b/pretrain/installers/v3-moe-recipes-sakura/install.sh index 6efd9ec7..48a86857 100644 --- a/pretrain/installers/v3-moe-recipes-sakura/install.sh +++ b/pretrain/installers/v3-moe-recipes-sakura/install.sh @@ -38,7 +38,6 @@ mkdir ${TARGET_DIR} pushd ${TARGET_DIR} # copy basic scripts -# cp -a ${INSTALLER_DIR}/{install.sh,requirements.txt,scripts,example} . cp -a ${INSTALLER_DIR}/{install.sh,requirements.txt,scripts,example} . source scripts/environment.sh @@ -105,7 +104,7 @@ popd git clone https://github.com/llm-jp/llm-jp-tokenizer -b ${PRETRAIN_TOKENIZER_TAG} # Clone and build CUTLASS -git clone https://github.com/NVIDIA/cutlass.git +git clone https://github.com/NVIDIA/cutlass.git -b ${PRETRAIN_CUTLASS_TAG} pushd cutlass export CUDACXX=${CUDA_HOME}/bin/nvcc mkdir build && pushd build diff --git a/pretrain/installers/v3-moe-recipes-sakura/scripts/environment.sh b/pretrain/installers/v3-moe-recipes-sakura/scripts/environment.sh index d1cd3cce..b7ad618f 100644 --- a/pretrain/installers/v3-moe-recipes-sakura/scripts/environment.sh +++ b/pretrain/installers/v3-moe-recipes-sakura/scripts/environment.sh @@ -15,6 +15,7 @@ export PRETRAIN_TORCHVISION_VERSION=0.18.1 export PRETRAIN_FLASH_ATTENTION_VERSION=2.6.3 export PRETRAIN_TRANSFORMERS_TAG=feature/layer-wise-load-balance-loss export PRETRAIN_MOE_RECIPES_TAG=sakura +export PRETRAIN_CUTLASS_TAG=v3.5.1 # Ensure the appropriate Huggingface tokenizer is included # https://github.com/llm-jp/scripts/pull/12#discussion_r1708415209 export PRETRAIN_TOKENIZER_TAG=v3.0b2 From 3ae1fdcb31702d3a731b27b98079e44c5bb80926 Mon Sep 17 00:00:00 2001 From: Taishi-N324 Date: Mon, 14 Oct 2024 23:57:15 +0900 Subject: [PATCH 3/8] Change path --- pretrain/installers/v3-moe-recipes-sakura/install.sh | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/pretrain/installers/v3-moe-recipes-sakura/install.sh b/pretrain/installers/v3-moe-recipes-sakura/install.sh index 48a86857..ba429d96 100644 --- a/pretrain/installers/v3-moe-recipes-sakura/install.sh +++ b/pretrain/installers/v3-moe-recipes-sakura/install.sh @@ -72,10 +72,12 @@ python -m pip install \ torchvision==${PRETRAIN_TORCHVISION_VERSION}+cu${PRETRAIN_CUDA_VERSION_SHORT} # install Transformers +pushd src git clone https://github.com/llm-jp/transformers -b ${PRETRAIN_TRANSFORMERS_TAG} pushd transformers python -m pip install -e . -popd +popd # transformers +popd # src # install other requirements python -m pip install --no-cache-dir -U -r requirements.txt From 0db7c132c9025b6828af9b2c31d1e6fbf2e8dcdf Mon Sep 17 00:00:00 2001 From: Taishi-N324 Date: Tue, 15 Oct 2024 11:38:28 +0900 Subject: [PATCH 4/8] Fixes around paths --- .../example/checkpoint_init.py | 2 +- .../v3-moe-recipes-sakura/example/sbatch.sh | 17 ++++++++++++----- .../v3-moe-recipes-sakura/example/train.sh | 18 +++++------------- 3 files changed, 18 insertions(+), 19 deletions(-) diff --git a/pretrain/installers/v3-moe-recipes-sakura/example/checkpoint_init.py b/pretrain/installers/v3-moe-recipes-sakura/example/checkpoint_init.py index 2c1ff7b8..d9d535ea 100644 --- a/pretrain/installers/v3-moe-recipes-sakura/example/checkpoint_init.py +++ b/pretrain/installers/v3-moe-recipes-sakura/example/checkpoint_init.py @@ -44,6 +44,6 @@ def save_model_and_config(model_name, save_directory): if __name__ == "__main__": model_name = "example/config.json" - save_directory = "Mixtral-llm-jp-v3-8x1.8B-checkpoint_init/" + save_directory = "Mixtral-llm-jp-v3-8x1.8B-initial-checkpoint" save_model_and_config(model_name, save_directory) diff --git a/pretrain/installers/v3-moe-recipes-sakura/example/sbatch.sh b/pretrain/installers/v3-moe-recipes-sakura/example/sbatch.sh index 3b609ade..beac4c0b 100644 --- a/pretrain/installers/v3-moe-recipes-sakura/example/sbatch.sh +++ b/pretrain/installers/v3-moe-recipes-sakura/example/sbatch.sh @@ -56,7 +56,7 @@ WEIGHT_DECAY=0.1 GRAD_CLIP=1 # deepspeed config -DEEPSPEED_CONFIG="deepspeed_config.json" +DEEPSPEED_CONFIG="src/moe-recipes/deepspeed_config.json" BF16_ENABLED=true DEEPSPEED_ZERO_STAGE=3 @@ -103,13 +103,20 @@ EOF ) # write deepspeed config file -echo "$DEEPSPEED_CONGIG_CONTENT" >"src/moe-recipes/${DEEPSPEED_CONFIG}" +echo "$DEEPSPEED_CONGIG_CONTENT" >$DEEPSPEED_CONFIG # Initialization python example/checkpoint_init.py -# Path to tokenizer and initialization checkpoint -CURRENT_DIR=$(pwd) +PYTHONPATH=${PYTHONPATH:-} + +if [ -z "$PYTHONPATH" ]; then + export PYTHONPATH="./src/moe-recipes:./src/moe-recipes/src" +else + export PYTHONPATH="./src/moe-recipes:./src/moe-recipes/src:${PYTHONPATH}" +fi + +echo "PYTHONPATH is now: $PYTHONPATH" mpirun \ -np $NUM_GPUS \ @@ -128,5 +135,5 @@ mpirun \ -x GLOBAL_BATCH_SIZE=$GLOBAL_BATCH_SIZE \ -x DEEPSPEED_CONFIG=$DEEPSPEED_CONFIG \ -x DEEPSPEED_ZERO_STAGE=$DEEPSPEED_ZERO_STAGE \ - -x CURRENT_DIR=$CURRENT_DIR \ + -x PYTHONPATH=$PYTHONPATH \ bash example/train.sh diff --git a/pretrain/installers/v3-moe-recipes-sakura/example/train.sh b/pretrain/installers/v3-moe-recipes-sakura/example/train.sh index 40d32db5..45444af0 100644 --- a/pretrain/installers/v3-moe-recipes-sakura/example/train.sh +++ b/pretrain/installers/v3-moe-recipes-sakura/example/train.sh @@ -14,7 +14,7 @@ # * GLOBAL_BATCH_SIZE: Global batch size for training # * DEEPSPEED_CONFIG: Path to DeepSpeed configuration file # * DEEPSPEED_ZERO_STAGE: DeepSpeed ZeRO stage -# * CURRENT_DIR: Current working directory +# * PYTHONPATH: Python module search path set -eu -o pipefail @@ -52,16 +52,9 @@ ADAMW_BETA2=0.95 ADAMW_EPS=1E-8 # model config -TOKENIZER_MODEL=$CURRENT_DIR/src/llm-jp-tokenizer/models/ver3.0/llm-jp-tokenizer-100k.ver3.0b1.model +TOKENIZER_MODEL=src/llm-jp-tokenizer/models/ver3.0/llm-jp-tokenizer-100k.ver3.0b1.model -if [ ! -f "$TOKENIZER_MODEL" ]; then - echo "Error: Tokenizer model not found at $TOKENIZER_MODEL" - echo "Current directory: $CURRENT_DIR" - echo "Please check the path and ensure the file exists." - exit 1 -fi - -CHECKPOINT_DIR=$CURRENT_DIR/Mixtral-llm-jp-v3-8x1.8B-checkpoint_init/ +CHECKPOINT_DIR=Mixtral-llm-jp-v3-8x1.8B-initial-checkpoint CHECKPOINT_SAVE_DIR=checkpoints mkdir -p ${CHECKPOINT_SAVE_DIR} @@ -74,8 +67,7 @@ DATA_PATH="${DATA_PATH} 2563804308 ${DATASET_DIR}/train/ja/wiki_0000.jsonl_text_ # job name JOB_NAME="test-$(whoami)" -cd src/moe-recipes/ -python examples/finetuning.py \ +python src/moe-recipes/examples/finetuning.py \ --seq-length ${SEQ_LENGTH} \ --sliding-window-size ${SLIDING_WINDOW_SIZE} \ --micro-batch-size ${MICRO_BATCH_SIZE} \ @@ -96,7 +88,7 @@ python examples/finetuning.py \ --adam-beta1 $ADAMW_BETA1 \ --adam-beta2 $ADAMW_BETA2 \ --adam-eps $ADAMW_EPS \ - --save-interval 500 \ + --save-interval 10 \ --eval-interval 1000000000 \ --eval-iters 1 \ --bf16 \ From c281febeb7734a611afe36541410e999dadb06d5 Mon Sep 17 00:00:00 2001 From: Taishi-N324 Date: Thu, 17 Oct 2024 07:30:27 +0900 Subject: [PATCH 5/8] Add v3-8x1.8b-exp1-sakura scripts --- .../v3-8x1.8b-exp1-sakura/checkpoint_init.py | 49 ++++ .../scripts/v3-8x1.8b-exp1-sakura/config.json | 31 +++ .../scripts/v3-8x1.8b-exp1-sakura/sbatch.sh | 125 +++++++++ .../scripts/v3-8x1.8b-exp1-sakura/train.sh | 252 ++++++++++++++++++ 4 files changed, 457 insertions(+) create mode 100644 pretrain/scripts/v3-8x1.8b-exp1-sakura/checkpoint_init.py create mode 100644 pretrain/scripts/v3-8x1.8b-exp1-sakura/config.json create mode 100644 pretrain/scripts/v3-8x1.8b-exp1-sakura/sbatch.sh create mode 100644 pretrain/scripts/v3-8x1.8b-exp1-sakura/train.sh diff --git a/pretrain/scripts/v3-8x1.8b-exp1-sakura/checkpoint_init.py b/pretrain/scripts/v3-8x1.8b-exp1-sakura/checkpoint_init.py new file mode 100644 index 00000000..7c60f303 --- /dev/null +++ b/pretrain/scripts/v3-8x1.8b-exp1-sakura/checkpoint_init.py @@ -0,0 +1,49 @@ +import os +import random + +import numpy as np +import torch +from transformers import AutoConfig, AutoModelForCausalLM + + +def set_seed(seed: int) -> None: + random.seed(seed) + np.random.seed(seed) + torch.cuda.manual_seed(seed) + torch.manual_seed(seed) + torch.backends.cudnn.deterministic = True + torch.backends.cudnn.benchmark = False + + +def save_model_and_config(model_name, save_directory): + + set_seed(1234) + + # Check if the directory already exists + if os.path.exists(save_directory): + print(f"Directory {save_directory} already exists. Skipping model saving.") + return + + # Load config + config = AutoConfig.from_pretrained(model_name) + print(f"Config loaded from {model_name}") + + # Create model from config + model = AutoModelForCausalLM.from_config(config) + print("Model created from config") + + # Create save directory + os.makedirs(save_directory) + + # Save model and config + model.save_pretrained(save_directory) + config.save_pretrained(save_directory) + + print(f"Model and config have been saved to {save_directory}") + + +if __name__ == "__main__": + model_name = "pretrain/scripts/v3-8x1.8b-exp1-sakura/config.json" + save_directory = "Mixtral-llm-jp-v3-8x1.8B-initial-checkpoint" + + save_model_and_config(model_name, save_directory) diff --git a/pretrain/scripts/v3-8x1.8b-exp1-sakura/config.json b/pretrain/scripts/v3-8x1.8b-exp1-sakura/config.json new file mode 100644 index 00000000..0899b3a6 --- /dev/null +++ b/pretrain/scripts/v3-8x1.8b-exp1-sakura/config.json @@ -0,0 +1,31 @@ +{ + "_name_or_path": "", + "architectures": [ + "MixtralForCausalLM" + ], + "attention_dropout": 0.0, + "bos_token_id": 1, + "eos_token_id": 2, + "hidden_act": "silu", + "hidden_size": 2048, + "initializer_range": 0.02, + "intermediate_size": 7168, + "max_position_embeddings": 4096, + "model_type": "mixtral", + "num_attention_heads": 16, + "num_experts_per_tok": 2, + "num_hidden_layers": 24, + "num_key_value_heads": 16, + "num_local_experts": 8, + "output_router_logits": true, + "rms_norm_eps": 1e-05, + "rope_theta": 10000.0, + "router_aux_loss_coef": 0.01, + "router_jitter_noise": 0.0, + "sliding_window": null, + "tie_word_embeddings": false, + "torch_dtype": "bfloat16", + "transformers_version": "4.46.0.dev0", + "use_cache": false, + "vocab_size": 99574 + } \ No newline at end of file diff --git a/pretrain/scripts/v3-8x1.8b-exp1-sakura/sbatch.sh b/pretrain/scripts/v3-8x1.8b-exp1-sakura/sbatch.sh new file mode 100644 index 00000000..27b47c06 --- /dev/null +++ b/pretrain/scripts/v3-8x1.8b-exp1-sakura/sbatch.sh @@ -0,0 +1,125 @@ +#!/bin/bash +#SBATCH --job-name=0059_v3-8x1.8b-exp1 +#SBATCH --partition=gpu-small +#SBATCH --nodes=8 +#SBATCH --gpus-per-node=8 +#SBATCH --ntasks-per-node=8 +#SBATCH --output=outputs/%x-%j.out +#SBATCH --error=outputs/%x-%j.err + +set -eu -o pipefail + +source scripts/environment.sh +source venv/bin/activate + +# CUTLASS +CUTLASS_HOME=src/cutlass/build +export LD_LIBRARY_PATH=${LD_LIBRARY_PATH}:${CUTLASS_HOME}/lib + +export MASTER_ADDR=$(scontrol show hostname $SLURM_JOB_NODELIST | head -n1) +export MASTER_PORT=$((10000 + ($SLURM_JOBID % 50000))) + +echo "MASTER_ADDR=${MASTER_ADDR}" + +NUM_NODES=$SLURM_JOB_NUM_NODES +NUM_GPUS_PER_NODE=$(echo $SLURM_TASKS_PER_NODE | cut -d '(' -f 1) +NUM_GPUS=$((${NUM_NODES} * ${NUM_GPUS_PER_NODE})) + +echo NUM_NODES=$NUM_NODES +echo NUM_GPUS_PER_NODE=$NUM_GPUS_PER_NODE +echo NUM_GPUS=$NUM_GPUS + +MICRO_BATCH_SIZE=8 +GLOBAL_BATCH_SIZE=1024 +GRADIENTS_ACCUMULATION_STEPS=$((GLOBAL_BATCH_SIZE / MICRO_BATCH_SIZE / NUM_GPUS)) + +if [ $GRADIENTS_ACCUMULATION_STEPS -lt 1 ]; then + echo "Global batch size is too small for the number of GPUs" + exit 1 +fi + +WEIGHT_DECAY=0.1 +GRAD_CLIP=1 + +# deepspeed config +DEEPSPEED_CONFIG="src/moe-recipes/deepspeed_config.json" + +BF16_ENABLED=true +DEEPSPEED_ZERO_STAGE=3 + +OVERLAP_COMMUNICATION=true +CONTINOUS_GRADIENTS=true + +DEEPSPEED_SUB_GROUP_SIZE=1e12 +DEEPSPEED_REDUCE_BUCKET_SIZE=1e9 +DEEPSPEED_STAGE3_PREFETCH_BUCKET_SIZE=5e8 +DEEPSPEED_STAGE3_PARAM_PERSISTENCE_THRESHOLD=1e6 + +DEEPSPEED_STAGE3_MAX_LIVE_PARAMETERS=1e9 +DEEPSPEED_STAGE3_MAX_REUSE_DISTANCE=1e9 + +WALL_CLOCK_BREAKDOWN=false + +DEEPSPEED_CONGIG_CONTENT=$( + cat <$DEEPSPEED_CONFIG + +# Initialization +python example/checkpoint_init.py + +PYTHONPATH=${PYTHONPATH:-} + +if [ -z "$PYTHONPATH" ]; then + export PYTHONPATH="./src/moe-recipes:./src/moe-recipes/src" +else + export PYTHONPATH="./src/moe-recipes:./src/moe-recipes/src:${PYTHONPATH}" +fi + +echo "PYTHONPATH is now: $PYTHONPATH" + +mpirun \ + -np $NUM_GPUS \ + --npernode $NUM_GPUS_PER_NODE \ + -bind-to none \ + -map-by slot \ + -x TORCH_NCCL_ASYNC_ERROR_HANDLING=1 \ + -x LD_LIBRARY_PATH \ + -x PATH \ + -x MASTER_ADDR=$MASTER_ADDR \ + -x MASTER_PORT=$MASTER_PORT \ + -x NUM_NODES=$NUM_NODES \ + -x NUM_GPUS_PER_NODE=$NUM_GPUS_PER_NODE \ + -x GRADIENTS_ACCUMULATION_STEPS=$GRADIENTS_ACCUMULATION_STEPS \ + -x MICRO_BATCH_SIZE=$MICRO_BATCH_SIZE \ + -x GLOBAL_BATCH_SIZE=$GLOBAL_BATCH_SIZE \ + -x DEEPSPEED_CONFIG=$DEEPSPEED_CONFIG \ + -x DEEPSPEED_ZERO_STAGE=$DEEPSPEED_ZERO_STAGE \ + -x PYTHONPATH=$PYTHONPATH \ + bash scripts/v3-8x1.8b-exp1-sakura/train.sh diff --git a/pretrain/scripts/v3-8x1.8b-exp1-sakura/train.sh b/pretrain/scripts/v3-8x1.8b-exp1-sakura/train.sh new file mode 100644 index 00000000..eb30a807 --- /dev/null +++ b/pretrain/scripts/v3-8x1.8b-exp1-sakura/train.sh @@ -0,0 +1,252 @@ +#!/bin/bash + +# For details about the model, see: +# https://github.com/llm-jp/model-cards/pull/25 + +set -eu -o pipefail + +source scripts/environment.sh +source scripts/mpi_variables.sh +source venv/bin/activate + +# open file limit +ulimit -n 65536 1048576 + +export LOGLEVEL=INFO +export NCCL_DEBUG=WARN +export NCCL_DEBUG_SUBSYS=WARN +export PYTHONFAULTHANDLER=1 +export CUDA_LAUNCH_BLOCKING=0 +export CUDNN_LOGDEST_DBG=stderr +export CUDNN_LOGERR_DBG=1 + +NUM_GPUS=$((${NUM_NODES} * ${NUM_GPUS_PER_NODE})) + +# training config +SEQ_LENGTH=4096 +SLIDING_WINDOW_SIZE=4096 +DATA_PARALLEL_SIZE=$NUM_GPUS + +TRAIN_STEPS=494120 + +# optimizer config +LR=3e-4 +MIN_LR=3e-5 +LR_WARMUP_STEPS=2000 +LR_DECAY_STEPS=494120 +WEIGHT_DECAY=0.1 +GRAD_CLIP=1 + +ADAMW_BETA1=0.9 +ADAMW_BETA2=0.95 +ADAMW_EPS=1E-8 + +# model config +TOKENIZER_MODEL=src/llm-jp-tokenizer/models/ver3.0/llm-jp-tokenizer-100k.ver3.0b1.model + +CHECKPOINT_DIR=Mixtral-llm-jp-v3-8x1.8B-initial-checkpoint +CHECKPOINT_SAVE_DIR=checkpoints + +mkdir -p ${CHECKPOINT_SAVE_DIR} + +# data config +DATASET_DIR=/home/shared/corpus/llm-jp-corpus/v3.0.0/training_resharded_tokenize_ver3.0 +DATASET_V3_1_DIR=/home/shared/corpus/llm-jp-corpus/v3.1.0/tokenize/v3.0b1 +TRAIN_DATA_CACHE_PATH=/home/shared/experiments/0059_v3-8x1.8b-exp1/train_data_cache_path + +TRAIN_DATA_PATH="" + +# code stack +TRAIN_DATA_PATH="${TRAIN_DATA_PATH} 14486363187 ${DATASET_DIR}/train/code/stack_0000.jsonl_text_document" +TRAIN_DATA_PATH="${TRAIN_DATA_PATH} 12799385151 ${DATASET_DIR}/train/code/stack_0001.jsonl_text_document" +TRAIN_DATA_PATH="${TRAIN_DATA_PATH} 17282923545 ${DATASET_DIR}/train/code/stack_0002.jsonl_text_document" +TRAIN_DATA_PATH="${TRAIN_DATA_PATH} 8861329235 ${DATASET_DIR}/train/code/stack_0003.jsonl_text_document" +TRAIN_DATA_PATH="${TRAIN_DATA_PATH} 6713413649 ${DATASET_DIR}/train/code/stack_0004.jsonl_text_document" +TRAIN_DATA_PATH="${TRAIN_DATA_PATH} 8976432285 ${DATASET_DIR}/train/code/stack_0005.jsonl_text_document" +TRAIN_DATA_PATH="${TRAIN_DATA_PATH} 17961273649 ${DATASET_DIR}/train/code/stack_0006.jsonl_text_document" +TRAIN_DATA_PATH="${TRAIN_DATA_PATH} 12016948303 ${DATASET_DIR}/train/code/stack_0007.jsonl_text_document" +TRAIN_DATA_PATH="${TRAIN_DATA_PATH} 14953094719 ${DATASET_DIR}/train/code/stack_0008.jsonl_text_document" + +# ja cc 1 +TRAIN_DATA_PATH="${TRAIN_DATA_PATH} 23783124862 ${DATASET_DIR}/train/ja/cc-1_0000.jsonl_text_document" +TRAIN_DATA_PATH="${TRAIN_DATA_PATH} 36378129564 ${DATASET_DIR}/train/ja/cc-1_0001.jsonl_text_document" +TRAIN_DATA_PATH="${TRAIN_DATA_PATH} 35477545812 ${DATASET_DIR}/train/ja/cc-1_0002.jsonl_text_document" +TRAIN_DATA_PATH="${TRAIN_DATA_PATH} 35917231868 ${DATASET_DIR}/train/ja/cc-1_0003.jsonl_text_document" +TRAIN_DATA_PATH="${TRAIN_DATA_PATH} 46203062776 ${DATASET_DIR}/train/ja/cc-1_0004.jsonl_text_document" +TRAIN_DATA_PATH="${TRAIN_DATA_PATH} 40396278536 ${DATASET_DIR}/train/ja/cc-1_0005.jsonl_text_document" +TRAIN_DATA_PATH="${TRAIN_DATA_PATH} 33444216206 ${DATASET_DIR}/train/ja/cc-1_0006.jsonl_text_document" +TRAIN_DATA_PATH="${TRAIN_DATA_PATH} 32375495374 ${DATASET_DIR}/train/ja/cc-1_0007.jsonl_text_document" +TRAIN_DATA_PATH="${TRAIN_DATA_PATH} 36068919622 ${DATASET_DIR}/train/ja/cc-1_0008.jsonl_text_document" +TRAIN_DATA_PATH="${TRAIN_DATA_PATH} 26274952324 ${DATASET_DIR}/train/ja/cc-1_0009.jsonl_text_document" +TRAIN_DATA_PATH="${TRAIN_DATA_PATH} 24024422756 ${DATASET_DIR}/train/ja/cc-1_0010.jsonl_text_document" +TRAIN_DATA_PATH="${TRAIN_DATA_PATH} 34590145510 ${DATASET_DIR}/train/ja/cc-1_0011.jsonl_text_document" +TRAIN_DATA_PATH="${TRAIN_DATA_PATH} 29567301906 ${DATASET_DIR}/train/ja/cc-1_0012.jsonl_text_document" +TRAIN_DATA_PATH="${TRAIN_DATA_PATH} 26690562242 ${DATASET_DIR}/train/ja/cc-1_0013.jsonl_text_document" + +# ja cc 2 +TRAIN_DATA_PATH="${TRAIN_DATA_PATH} 35813749376 ${DATASET_DIR}/train/ja/cc-2_0000.jsonl_text_document" +TRAIN_DATA_PATH="${TRAIN_DATA_PATH} 40034668924 ${DATASET_DIR}/train/ja/cc-2_0001.jsonl_text_document" +TRAIN_DATA_PATH="${TRAIN_DATA_PATH} 31191828858 ${DATASET_DIR}/train/ja/cc-2_0002.jsonl_text_document" +TRAIN_DATA_PATH="${TRAIN_DATA_PATH} 25086109508 ${DATASET_DIR}/train/ja/cc-2_0003.jsonl_text_document" +TRAIN_DATA_PATH="${TRAIN_DATA_PATH} 18979589830 ${DATASET_DIR}/train/ja/cc-2_0004.jsonl_text_document" + +# ja cc 3 +TRAIN_DATA_PATH="${TRAIN_DATA_PATH} 40987803038 ${DATASET_DIR}/train/ja/cc-3_0000.jsonl_text_document" +TRAIN_DATA_PATH="${TRAIN_DATA_PATH} 41333549162 ${DATASET_DIR}/train/ja/cc-3_0001.jsonl_text_document" +TRAIN_DATA_PATH="${TRAIN_DATA_PATH} 29810274406 ${DATASET_DIR}/train/ja/cc-3_0002.jsonl_text_document" +TRAIN_DATA_PATH="${TRAIN_DATA_PATH} 22787733940 ${DATASET_DIR}/train/ja/cc-3_0003.jsonl_text_document" +TRAIN_DATA_PATH="${TRAIN_DATA_PATH} 15544493906 ${DATASET_DIR}/train/ja/cc-3_0004.jsonl_text_document" + +# ja kaken +TRAIN_DATA_PATH="${TRAIN_DATA_PATH} 1826105478 ${DATASET_DIR}/train/ja/kaken_0000.jsonl_text_document" + +# ja warp html +TRAIN_DATA_PATH="${TRAIN_DATA_PATH} 1329440698 ${DATASET_DIR}/train/ja/warp-html-01-06_0000.jsonl_text_document" +TRAIN_DATA_PATH="${TRAIN_DATA_PATH} 1397268214 ${DATASET_DIR}/train/ja/warp-html-07-12_0000.jsonl_text_document" + +# ja warp pdf +TRAIN_DATA_PATH="${TRAIN_DATA_PATH} 30149711608 ${DATASET_V3_1_DIR}/train2/ja/warp-pdf-e00_0000.jsonl_text_document" +TRAIN_DATA_PATH="${TRAIN_DATA_PATH} 30023232706 ${DATASET_V3_1_DIR}/train2/ja/warp-pdf-e00_0001.jsonl_text_document" + +# ja warp pdf 0.2 +TRAIN_DATA_PATH="${TRAIN_DATA_PATH} 15396388677 ${DATASET_V3_1_DIR}/train2/ja/warp-pdf-e02_0000.jsonl_text_document" +TRAIN_DATA_PATH="${TRAIN_DATA_PATH} 13225220331 ${DATASET_V3_1_DIR}/train2/ja/warp-pdf-e02_0001.jsonl_text_document" +TRAIN_DATA_PATH="${TRAIN_DATA_PATH} 12433511477 ${DATASET_V3_1_DIR}/train2/ja/warp-pdf-e02_0002.jsonl_text_document" +TRAIN_DATA_PATH="${TRAIN_DATA_PATH} 14722870558 ${DATASET_V3_1_DIR}/train2/ja/warp-pdf-e02_0003.jsonl_text_document" +TRAIN_DATA_PATH="${TRAIN_DATA_PATH} 14818300138 ${DATASET_V3_1_DIR}/train2/ja/warp-pdf-e02_0004.jsonl_text_document" +TRAIN_DATA_PATH="${TRAIN_DATA_PATH} 14827819309 ${DATASET_V3_1_DIR}/train2/ja/warp-pdf-e02_0005.jsonl_text_document" +TRAIN_DATA_PATH="${TRAIN_DATA_PATH} 13394854115 ${DATASET_V3_1_DIR}/train2/ja/warp-pdf-e02_0006.jsonl_text_document" +TRAIN_DATA_PATH="${TRAIN_DATA_PATH} 14369730518 ${DATASET_V3_1_DIR}/train2/ja/warp-pdf-e02_0007.jsonl_text_document" +TRAIN_DATA_PATH="${TRAIN_DATA_PATH} 14027593174 ${DATASET_V3_1_DIR}/train2/ja/warp-pdf-e02_0008.jsonl_text_document" +TRAIN_DATA_PATH="${TRAIN_DATA_PATH} 14719994730 ${DATASET_V3_1_DIR}/train2/ja/warp-pdf-e02_0009.jsonl_text_document" +TRAIN_DATA_PATH="${TRAIN_DATA_PATH} 9865165774 ${DATASET_V3_1_DIR}/train2/ja/warp-pdf-e02_0010.jsonl_text_document" +TRAIN_DATA_PATH="${TRAIN_DATA_PATH} 14525215128 ${DATASET_V3_1_DIR}/train2/ja/warp-pdf-e02_0011.jsonl_text_document" +TRAIN_DATA_PATH="${TRAIN_DATA_PATH} 10835111330 ${DATASET_V3_1_DIR}/train2/ja/warp-pdf-e02_0012.jsonl_text_document" + +# ja wiki +TRAIN_DATA_PATH="${TRAIN_DATA_PATH} 2563804308 ${DATASET_DIR}/train/ja/wiki_0000.jsonl_text_document" + +# en dolma books +TRAIN_DATA_PATH="${TRAIN_DATA_PATH} 5494262694 ${DATASET_DIR}/train/en/dolma-books_0000.jsonl_text_document" + +# en dolma c4 +TRAIN_DATA_PATH="${TRAIN_DATA_PATH} 17052861266 ${DATASET_DIR}/train/en/dolma-c4_0000.jsonl_text_document" +TRAIN_DATA_PATH="${TRAIN_DATA_PATH} 17051260422 ${DATASET_DIR}/train/en/dolma-c4_0001.jsonl_text_document" +TRAIN_DATA_PATH="${TRAIN_DATA_PATH} 17056648148 ${DATASET_DIR}/train/en/dolma-c4_0002.jsonl_text_document" +TRAIN_DATA_PATH="${TRAIN_DATA_PATH} 17057773049 ${DATASET_DIR}/train/en/dolma-c4_0003.jsonl_text_document" +TRAIN_DATA_PATH="${TRAIN_DATA_PATH} 17047888113 ${DATASET_DIR}/train/en/dolma-c4_0004.jsonl_text_document" +TRAIN_DATA_PATH="${TRAIN_DATA_PATH} 17046511755 ${DATASET_DIR}/train/en/dolma-c4_0005.jsonl_text_document" +TRAIN_DATA_PATH="${TRAIN_DATA_PATH} 17058086815 ${DATASET_DIR}/train/en/dolma-c4_0006.jsonl_text_document" +TRAIN_DATA_PATH="${TRAIN_DATA_PATH} 17049490900 ${DATASET_DIR}/train/en/dolma-c4_0007.jsonl_text_document" +TRAIN_DATA_PATH="${TRAIN_DATA_PATH} 17051009552 ${DATASET_DIR}/train/en/dolma-c4_0008.jsonl_text_document" +TRAIN_DATA_PATH="${TRAIN_DATA_PATH} 14932405246 ${DATASET_DIR}/train/en/dolma-c4_0009.jsonl_text_document" +TRAIN_DATA_PATH="${TRAIN_DATA_PATH} 13142696712 ${DATASET_DIR}/train/en/dolma-c4_0010.jsonl_text_document" + +# en dolma cc +TRAIN_DATA_PATH="${TRAIN_DATA_PATH} 15473522696 ${DATASET_DIR}/train/en/dolma-cc-head_0000.jsonl_text_document" +TRAIN_DATA_PATH="${TRAIN_DATA_PATH} 15767913273 ${DATASET_DIR}/train/en/dolma-cc-head_0001.jsonl_text_document" +TRAIN_DATA_PATH="${TRAIN_DATA_PATH} 16664785078 ${DATASET_DIR}/train/en/dolma-cc-head_0002.jsonl_text_document" +TRAIN_DATA_PATH="${TRAIN_DATA_PATH} 16860035920 ${DATASET_DIR}/train/en/dolma-cc-head_0003.jsonl_text_document" +TRAIN_DATA_PATH="${TRAIN_DATA_PATH} 17197613512 ${DATASET_DIR}/train/en/dolma-cc-head_0004.jsonl_text_document" +TRAIN_DATA_PATH="${TRAIN_DATA_PATH} 16363353173 ${DATASET_DIR}/train/en/dolma-cc-head_0005.jsonl_text_document" +TRAIN_DATA_PATH="${TRAIN_DATA_PATH} 15303692924 ${DATASET_DIR}/train/en/dolma-cc-head_0006.jsonl_text_document" +TRAIN_DATA_PATH="${TRAIN_DATA_PATH} 15766283829 ${DATASET_DIR}/train/en/dolma-cc-head_0007.jsonl_text_document" +TRAIN_DATA_PATH="${TRAIN_DATA_PATH} 13483997219 ${DATASET_DIR}/train/en/dolma-cc-head_0008.jsonl_text_document" +TRAIN_DATA_PATH="${TRAIN_DATA_PATH} 12561851173 ${DATASET_DIR}/train/en/dolma-cc-head_0009.jsonl_text_document" +TRAIN_DATA_PATH="${TRAIN_DATA_PATH} 14206017429 ${DATASET_DIR}/train/en/dolma-cc-head_0010.jsonl_text_document" +TRAIN_DATA_PATH="${TRAIN_DATA_PATH} 18455249471 ${DATASET_DIR}/train/en/dolma-cc-head_0011.jsonl_text_document" +TRAIN_DATA_PATH="${TRAIN_DATA_PATH} 18359243399 ${DATASET_DIR}/train/en/dolma-cc-head_0012.jsonl_text_document" +TRAIN_DATA_PATH="${TRAIN_DATA_PATH} 16268609444 ${DATASET_DIR}/train/en/dolma-cc-head_0013.jsonl_text_document" +TRAIN_DATA_PATH="${TRAIN_DATA_PATH} 15209913539 ${DATASET_DIR}/train/en/dolma-cc-head_0014.jsonl_text_document" +TRAIN_DATA_PATH="${TRAIN_DATA_PATH} 15601099503 ${DATASET_DIR}/train/en/dolma-cc-head_0015.jsonl_text_document" +TRAIN_DATA_PATH="${TRAIN_DATA_PATH} 16354139164 ${DATASET_DIR}/train/en/dolma-cc-head_0016.jsonl_text_document" +TRAIN_DATA_PATH="${TRAIN_DATA_PATH} 19563123039 ${DATASET_DIR}/train/en/dolma-cc-head_0017.jsonl_text_document" +TRAIN_DATA_PATH="${TRAIN_DATA_PATH} 17794386584 ${DATASET_DIR}/train/en/dolma-cc-head_0018.jsonl_text_document" +TRAIN_DATA_PATH="${TRAIN_DATA_PATH} 17974377563 ${DATASET_DIR}/train/en/dolma-cc-head_0019.jsonl_text_document" +TRAIN_DATA_PATH="${TRAIN_DATA_PATH} 19152181306 ${DATASET_DIR}/train/en/dolma-cc-head_0020.jsonl_text_document" +TRAIN_DATA_PATH="${TRAIN_DATA_PATH} 16841018460 ${DATASET_DIR}/train/en/dolma-cc-head_0021.jsonl_text_document" +TRAIN_DATA_PATH="${TRAIN_DATA_PATH} 15622566364 ${DATASET_DIR}/train/en/dolma-cc-head_0022.jsonl_text_document" +TRAIN_DATA_PATH="${TRAIN_DATA_PATH} 14998264524 ${DATASET_DIR}/train/en/dolma-cc-head_0023.jsonl_text_document" +TRAIN_DATA_PATH="${TRAIN_DATA_PATH} 19994706100 ${DATASET_DIR}/train/en/dolma-cc-head_0024.jsonl_text_document" +TRAIN_DATA_PATH="${TRAIN_DATA_PATH} 19266785326 ${DATASET_DIR}/train/en/dolma-cc-head_0025.jsonl_text_document" +TRAIN_DATA_PATH="${TRAIN_DATA_PATH} 17797970694 ${DATASET_DIR}/train/en/dolma-cc-head_0026.jsonl_text_document" +TRAIN_DATA_PATH="${TRAIN_DATA_PATH} 18662607705 ${DATASET_DIR}/train/en/dolma-cc-head_0027.jsonl_text_document" +TRAIN_DATA_PATH="${TRAIN_DATA_PATH} 18428148263 ${DATASET_DIR}/train/en/dolma-cc-head_0028.jsonl_text_document" +TRAIN_DATA_PATH="${TRAIN_DATA_PATH} 19152709797 ${DATASET_DIR}/train/en/dolma-cc-head_0029.jsonl_text_document" +TRAIN_DATA_PATH="${TRAIN_DATA_PATH} 19567672702 ${DATASET_DIR}/train/en/dolma-cc-head_0030.jsonl_text_document" +TRAIN_DATA_PATH="${TRAIN_DATA_PATH} 15453203385 ${DATASET_DIR}/train/en/dolma-cc-head_0031.jsonl_text_document" +TRAIN_DATA_PATH="${TRAIN_DATA_PATH} 16946844380 ${DATASET_DIR}/train/en/dolma-cc-head_0032.jsonl_text_document" +TRAIN_DATA_PATH="${TRAIN_DATA_PATH} 16719501611 ${DATASET_DIR}/train/en/dolma-cc-head_0033.jsonl_text_document" +TRAIN_DATA_PATH="${TRAIN_DATA_PATH} 16348054343 ${DATASET_DIR}/train/en/dolma-cc-head_0034.jsonl_text_document" +TRAIN_DATA_PATH="${TRAIN_DATA_PATH} 18292316049 ${DATASET_DIR}/train/en/dolma-cc-head_0035.jsonl_text_document" + +# en dolma science paper +TRAIN_DATA_PATH="${TRAIN_DATA_PATH} 8089227423 ${DATASET_DIR}/train/en/dolma-pes2o_0000.jsonl_text_document" +TRAIN_DATA_PATH="${TRAIN_DATA_PATH} 20185217235 ${DATASET_DIR}/train/en/dolma-pes2o_0001.jsonl_text_document" +TRAIN_DATA_PATH="${TRAIN_DATA_PATH} 18622836173 ${DATASET_DIR}/train/en/dolma-pes2o_0002.jsonl_text_document" +TRAIN_DATA_PATH="${TRAIN_DATA_PATH} 15956491971 ${DATASET_DIR}/train/en/dolma-pes2o_0003.jsonl_text_document" + +# en dolma reddit +TRAIN_DATA_PATH="${TRAIN_DATA_PATH} 17412289508 ${DATASET_DIR}/train/en/dolma-reddit_0000.jsonl_text_document" +TRAIN_DATA_PATH="${TRAIN_DATA_PATH} 17315996345 ${DATASET_DIR}/train/en/dolma-reddit_0001.jsonl_text_document" +TRAIN_DATA_PATH="${TRAIN_DATA_PATH} 17095921975 ${DATASET_DIR}/train/en/dolma-reddit_0002.jsonl_text_document" +TRAIN_DATA_PATH="${TRAIN_DATA_PATH} 15808400388 ${DATASET_DIR}/train/en/dolma-reddit_0003.jsonl_text_document" +TRAIN_DATA_PATH="${TRAIN_DATA_PATH} 15425532535 ${DATASET_DIR}/train/en/dolma-reddit_0004.jsonl_text_document" + +# en dolma wiki +TRAIN_DATA_PATH="${TRAIN_DATA_PATH} 3896965449 ${DATASET_DIR}/train/en/dolma-wiki_0000.jsonl_text_document" + +# en wiki +TRAIN_DATA_PATH="${TRAIN_DATA_PATH} 4744259830 ${DATASET_DIR}/train/en/wiki_0000.jsonl_text_document" + +# zh wiki +TRAIN_DATA_PATH="${TRAIN_DATA_PATH} 840277331 ${DATASET_DIR}/train/zh/wiki_0000.jsonl_text_document" + +# ko wiki +TRAIN_DATA_PATH="${TRAIN_DATA_PATH} 316296219 ${DATASET_DIR}/train/ko/wiki_0000.jsonl_text_document" + +# job name +WANDB_ENTITY="llm-jp" +WANDB_PROJECT="v3-8x1.8b" +WANDB_JOB="train-exp1" + +python src/moe-recipes/examples/finetuning.py \ + --seq-length ${SEQ_LENGTH} \ + --sliding-window-size ${SLIDING_WINDOW_SIZE} \ + --micro-batch-size ${MICRO_BATCH_SIZE} \ + --global-batch-size ${GLOBAL_BATCH_SIZE} \ + --train-iters ${TRAIN_STEPS} \ + --tokenizer-type Llama2Tokenizer \ + --tokenizer-model ${TOKENIZER_MODEL} \ + --data-path ${TRAIN_DATA_PATH} \ + --data-cache-path ${TRAIN_DATA_CACHE_PATH} \ + --split 99999,1,0 \ + --lr ${LR} \ + --min-lr ${MIN_LR} \ + --lr-decay-style cosine \ + --lr-warmup-iters ${LR_WARMUP_STEPS} \ + --lr-decay-iters ${LR_DECAY_STEPS} \ + --weight-decay ${WEIGHT_DECAY} \ + --grad-clip-norm ${GRAD_CLIP} \ + --optimizer adam \ + --adam-beta1 $ADAMW_BETA1 \ + --adam-beta2 $ADAMW_BETA2 \ + --adam-eps $ADAMW_EPS \ + --save-interval 500 \ + --eval-interval 1000000000 \ + --eval-iters 1 \ + --bf16 \ + --mixed-precision \ + --base-model ${CHECKPOINT_DIR} \ + --save ${CHECKPOINT_SAVE_DIR} \ + --load ${CHECKPOINT_SAVE_DIR} \ + --use-zero \ + --zero-config ${DEEPSPEED_CONFIG} \ + --zero-stage ${DEEPSPEED_ZERO_STAGE} \ + --no-meta-device \ + --output-router-logits \ + --use-mpi \ + --continual-pretraining \ + --wandb-entity "${WANDB_ENTITY}" \ + --wandb-project "${WANDB_PROJECT}" \ + --wandb-name "${WANDB_JOB}" \ No newline at end of file From 73248b9fb82b56e0d51ab19ede318f0479f85df1 Mon Sep 17 00:00:00 2001 From: Taishi-N324 Date: Tue, 22 Oct 2024 19:50:51 +0900 Subject: [PATCH 6/8] WIP --- .../v3-8x1.8b-exp2-sakura/checkpoint_init.py | 302 ++++++++++++++++++ .../scripts/v3-8x1.8b-exp2-sakura/config.json | 31 ++ 2 files changed, 333 insertions(+) create mode 100644 pretrain/scripts/v3-8x1.8b-exp2-sakura/checkpoint_init.py create mode 100644 pretrain/scripts/v3-8x1.8b-exp2-sakura/config.json diff --git a/pretrain/scripts/v3-8x1.8b-exp2-sakura/checkpoint_init.py b/pretrain/scripts/v3-8x1.8b-exp2-sakura/checkpoint_init.py new file mode 100644 index 00000000..a71c8f6b --- /dev/null +++ b/pretrain/scripts/v3-8x1.8b-exp2-sakura/checkpoint_init.py @@ -0,0 +1,302 @@ +import argparse +import logging +import random +import re + +import numpy as np +import torch +from tqdm import tqdm +from transformers import AutoConfig, AutoModelForCausalLM + +logging.basicConfig( + level=logging.INFO, format="%(asctime)s - %(levelname)s - %(message)s" +) +logger = logging.getLogger(__name__) + + +def initialize_ffn_weights(size, init_method, current_mean=0, current_std=0.02): + logger.info("Initializing FFN weights:") + logger.info(f" Size: {size}") + logger.info(f" Initialization method: {init_method}") + logger.info(f" Current mean: {current_mean}") + logger.info(f" Current std: {current_std}") + + if init_method == "zero_mean_002std": + logger.info(" Using zero mean and 0.02 standard deviation") + return torch.normal(mean=0, std=0.02, size=size) + elif init_method == "zero_mean_current_std": + logger.info(f" Using zero mean and current standard deviation ({current_std})") + return torch.normal(mean=0, std=current_std, size=size) + elif init_method == "current_mean_002std": + logger.info( + f" Using current mean ({current_mean}) and 0.02 standard deviation" + ) + return torch.normal(mean=current_mean, std=0.02, size=size) + elif init_method == "current_mean_current_std": + logger.info( + f" Using current mean ({current_mean}) and current standard deviation ({current_std})" + ) + return torch.normal(mean=current_mean, std=current_std, size=size) + else: + logger.error(f"Unknown initialization method: {init_method}") + raise ValueError(f"Unknown initialization method: {init_method}") + + +def partially_initialize( + tensor, + init_indices, + is_down_proj, + layer_idx, + expert_idx, + init_method, + share_init_indices, + ffn_init_ratio, +): + if is_down_proj: + init_part = tensor[:, init_indices] + else: + init_part = tensor[init_indices, :] + + current_mean = init_part.mean().item() + current_std = init_part.std().item() + if is_down_proj: + init_tensor = initialize_ffn_weights( + (tensor.size(0), len(init_indices)), + init_method, + current_mean=current_mean, + current_std=current_std, + ).to(dtype=torch.bfloat16) + tensor[:, init_indices] = init_tensor + else: + init_tensor = initialize_ffn_weights( + (len(init_indices), tensor.size(1)), + init_method, + current_mean=current_mean, + current_std=current_std, + ).to(dtype=torch.bfloat16) + tensor[init_indices, :] = init_tensor + + logger.info( + f"Layer {layer_idx}, Expert {expert_idx}, {'Down_proj' if is_down_proj else 'Gate_proj/Up_proj'}: " + f"Original size: {tensor.size()}, " + f"Initialization method: {init_method}, Share init indices: {share_init_indices}, " + f"Init ratio: {ffn_init_ratio}, Init size: {len(init_indices)}, " + f"Init part mean: {current_mean:.4f}, Init part std: {current_std:.4f}" + ) + logger.info(f"Init indices: {init_indices[:10]}... (showing first 10 elements)") + + return tensor + + +def set_seed(seed: int) -> None: + random.seed(seed) + np.random.seed(seed) + torch.cuda.manual_seed(seed) + torch.manual_seed(seed) + torch.backends.cudnn.deterministic = True + torch.backends.cudnn.benchmark = False + + +def initialize_gate_weights(size): + return torch.normal(mean=0, std=0.02, size=size) + + +def replace_model_parameters( + source_model_path, + target_config_path, + output_path, + num_experts, + num_layers, + seed, + init_method, + share_init_indices, + ffn_init_ratio, +): + set_seed(seed) + logger.info("Starting model parameter replacement process") + logger.info("Configuration:") + logger.info(f" Source model: {source_model_path}") + logger.info(f" Target config: {target_config_path}") + logger.info(f" Output path: {output_path}") + logger.info(f" Number of experts: {num_experts}") + logger.info(f" Number of layers: {num_layers}") + logger.info(f" Seed: {seed}") + logger.info(f" FFN initialization method: {init_method}") + logger.info(f" Share initialization indices: {share_init_indices}") + logger.info(f" FFN initialization ratio: {ffn_init_ratio}") + + logger.info("Loading source model") + source_model = AutoModelForCausalLM.from_pretrained( + source_model_path, torch_dtype=torch.bfloat16 + ) + logger.info("Loading target config") + target_config = AutoConfig.from_pretrained(target_config_path) + logger.info("Creating target model from config") + target_model = AutoModelForCausalLM.from_config( + target_config, torch_dtype=torch.bfloat16 + ) + source_intermediate_size = source_model.config.intermediate_size + target_intermediate_size = target_config.intermediate_size + logger.info(f"Source intermediate size: {source_intermediate_size}") + logger.info(f"Target intermediate size: {target_intermediate_size}") + + exclude_pattern = r"model\.layers\.\d+\.mlp\.(gate_proj|up_proj|down_proj)\.weight" + exclude_layers = set() + for name in target_model.state_dict().keys(): + if re.match(exclude_pattern, name): + exclude_layers.add(name) + + base_src = "model.layers.{}.block_sparse_moe.experts.{}" + base_tgt = "model.layers.{}.mlp" + replace_mapping = { + f"{base_src}.w1.weight": f"{base_tgt}.gate_proj.weight", + f"{base_src}.w2.weight": f"{base_tgt}.down_proj.weight", + f"{base_src}.w3.weight": f"{base_tgt}.up_proj.weight", + } + + source_state_dict = source_model.state_dict() + target_state_dict = target_model.state_dict() + + for name, param in tqdm(target_state_dict.items(), desc="Replacing parameters"): + if name not in exclude_layers and name in source_state_dict: + target_state_dict[name] = source_state_dict[name] + logger.info(f"Parameter {name} replaced") + + for layer_idx in tqdm(range(num_layers), desc="Initializing gate weights"): + gate_weight_name = f"model.layers.{layer_idx}.block_sparse_moe.gate.weight" + if gate_weight_name in target_state_dict: + target_state_dict[gate_weight_name] = initialize_gate_weights( + target_state_dict[gate_weight_name].size() + ) + logger.info( + f"Gate weight {gate_weight_name} initialized with normal distribution (std=0.02)" + ) + init_size = int(target_intermediate_size * ffn_init_ratio) + for layer_idx in tqdm(range(num_layers), desc="Replacing FFN layers"): + + if share_init_indices: + shared_init_indices = torch.randperm(target_intermediate_size)[:init_size] + logger.info( + f"Layer {layer_idx}, Generated shared init indices: {shared_init_indices[:10]}... (showing first 10 elements)" + ) + for expert_idx in range(num_experts): + if not share_init_indices: + init_indices = torch.randperm(target_intermediate_size)[:init_size] + logger.info( + f"Layer {layer_idx}, Expert {expert_idx}, Generated init indices: {init_indices[:10]}... (showing first 10 elements)" + ) + else: + init_indices = shared_init_indices + + for target_pattern, source_pattern in replace_mapping.items(): + target_name = target_pattern.format(layer_idx, expert_idx) + source_name = source_pattern.format(layer_idx) + if ( + target_name in target_state_dict + and source_name in source_state_dict + ): + source_tensor = source_state_dict[source_name] + + # Determine if it's down_proj (w2) or not + is_down_proj = "down_proj" in source_name + logger.info( + f"Layer {layer_idx}, Expert {expert_idx}, Original tensor shape: {source_tensor.shape}" + ) + if ( + source_tensor.size(1 if is_down_proj else 0) + > target_intermediate_size + ): + # Resize the tensor if necessary + if is_down_proj: + source_tensor = source_tensor[:, :target_intermediate_size] + else: + source_tensor = source_tensor[:target_intermediate_size, :] + + initialized_tensor = partially_initialize( + source_tensor, + init_indices, + is_down_proj, + layer_idx, + expert_idx, + init_method, + share_init_indices, + ffn_init_ratio, + ) + logger.info( + f"Layer {layer_idx}, Expert {expert_idx}, Initialized tensor shape: {initialized_tensor.shape}" + ) + target_state_dict[target_name] = initialized_tensor + + logger.info(f"FFN layer {target_name} replaced with {source_name}") + + target_model.load_state_dict(target_state_dict) + target_model.save_pretrained(output_path, torch_dtype=torch.bfloat16) + logger.info(f"Modified model saved to {output_path}") + + +def main(): + parser = argparse.ArgumentParser(description="Replace model parameters") + parser.add_argument( + "--ffn_init_method", + type=str, + choices=[ + "zero_mean_002std", + "zero_mean_current_std", + "current_mean_002std", + "current_mean_current_std", + ], + required=True, + help="Method for initializing FFN weights", + ) + parser.add_argument( + "--share_init_indices", + action="store_true", + help="Share initialization indices across experts within each layer", + ) + parser.add_argument( + "--ffn_init_ratio", + type=float, + default=0.5, + help="Ratio of initialized weights (0.0 to 1.0)", + ) + parser.add_argument( + "--source_model_path", type=str, required=True, help="Path to the source model" + ) + parser.add_argument( + "--target_config_path", + type=str, + required=True, + help="Path to the target model config", + ) + parser.add_argument( + "--output_path", type=str, required=True, help="Path to save the modified model" + ) + parser.add_argument( + "--num_experts", + type=int, + required=True, + help="Number of experts in the MoE model", + ) + parser.add_argument( + "--num_layers", type=int, required=True, help="Number of layers in the model" + ) + parser.add_argument( + "--seed", type=int, default=42, help="Random seed for reproducibility" + ) + args = parser.parse_args() + + replace_model_parameters( + args.source_model_path, + args.target_config_path, + args.output_path, + args.num_experts, + args.num_layers, + args.seed, + args.ffn_init_method, + args.share_init_indices, + args.ffn_init_ratio, + ) + + +if __name__ == "__main__": + main() diff --git a/pretrain/scripts/v3-8x1.8b-exp2-sakura/config.json b/pretrain/scripts/v3-8x1.8b-exp2-sakura/config.json new file mode 100644 index 00000000..22bebfec --- /dev/null +++ b/pretrain/scripts/v3-8x1.8b-exp2-sakura/config.json @@ -0,0 +1,31 @@ +{ + "_name_or_path": "", + "architectures": [ + "MixtralForCausalLM" + ], + "attention_dropout": 0.0, + "bos_token_id": 1, + "eos_token_id": 2, + "hidden_act": "silu", + "hidden_size": 2048, + "initializer_range": 0.02, + "intermediate_size": 7168, + "max_position_embeddings": 4096, + "model_type": "mixtral", + "num_attention_heads": 16, + "num_experts_per_tok": 2, + "num_hidden_layers": 24, + "num_key_value_heads": 16, + "num_local_experts": 8, + "output_router_logits": true, + "rms_norm_eps": 1e-05, + "rope_theta": 10000.0, + "router_aux_loss_coef": 0.01, + "router_jitter_noise": 0.0, + "sliding_window": null, + "tie_word_embeddings": false, + "torch_dtype": "bfloat16", + "transformers_version": "4.46.0.dev0", + "use_cache": false, + "vocab_size": 99584 + } \ No newline at end of file From 1cf589896b5a8fc3bc944159b30bf2be3b868d77 Mon Sep 17 00:00:00 2001 From: Taishi-N324 Date: Tue, 22 Oct 2024 19:53:42 +0900 Subject: [PATCH 7/8] fix name --- pretrain/installers/v3-moe-recipes-sakura/README.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pretrain/installers/v3-moe-recipes-sakura/README.md b/pretrain/installers/v3-moe-recipes-sakura/README.md index 5a9850fe..bd3a1637 100644 --- a/pretrain/installers/v3-moe-recipes-sakura/README.md +++ b/pretrain/installers/v3-moe-recipes-sakura/README.md @@ -1,6 +1,6 @@ -# moe-recipes installation script for Sakura/LLM-jp v3 models +# moe-recipes installation script for Sakura/LLM-jp-3 models -LLM-jp v3シリーズでのMoE実験を行うためのmoe-recipes環境をSakuraクラスタにインストールするためのスクリプトです。 +LLM-jp-3でのMoE実験を行うためのmoe-recipes環境をSakuraクラスタにインストールするためのスクリプトです。 System Pythonやpyenvに依存しない閉じた環境を指定したディレクトリ上に構築します。 ## Usage From e59f4870f807e87e158242f7909520e90d37abe8 Mon Sep 17 00:00:00 2001 From: Taishi-N324 Date: Thu, 31 Oct 2024 22:36:37 +0900 Subject: [PATCH 8/8] Add v3-8x1.8b-exp2 --- .../scripts/v3-8x1.8b-exp2-sakura/sbatch.sh | 122 +++++++++ .../scripts/v3-8x1.8b-exp2-sakura/train.sh | 252 ++++++++++++++++++ 2 files changed, 374 insertions(+) create mode 100644 pretrain/scripts/v3-8x1.8b-exp2-sakura/sbatch.sh create mode 100644 pretrain/scripts/v3-8x1.8b-exp2-sakura/train.sh diff --git a/pretrain/scripts/v3-8x1.8b-exp2-sakura/sbatch.sh b/pretrain/scripts/v3-8x1.8b-exp2-sakura/sbatch.sh new file mode 100644 index 00000000..8e655c3c --- /dev/null +++ b/pretrain/scripts/v3-8x1.8b-exp2-sakura/sbatch.sh @@ -0,0 +1,122 @@ +#!/bin/bash +#SBATCH --job-name=0061_v3-8x1.8b-exp2 +#SBATCH --partition=gpu-small +#SBATCH --nodes=8 +#SBATCH --gpus-per-node=8 +#SBATCH --ntasks-per-node=8 +#SBATCH --output=outputs/%x-%j.out +#SBATCH --error=outputs/%x-%j.err + +set -eu -o pipefail + +source scripts/environment.sh +source venv/bin/activate + +# CUTLASS +CUTLASS_HOME=src/cutlass/build +export LD_LIBRARY_PATH=${LD_LIBRARY_PATH}:${CUTLASS_HOME}/lib + +export MASTER_ADDR=$(scontrol show hostname $SLURM_JOB_NODELIST | head -n1) +export MASTER_PORT=$((10000 + ($SLURM_JOBID % 50000))) + +echo "MASTER_ADDR=${MASTER_ADDR}" + +NUM_NODES=$SLURM_JOB_NUM_NODES +NUM_GPUS_PER_NODE=$(echo $SLURM_TASKS_PER_NODE | cut -d '(' -f 1) +NUM_GPUS=$((${NUM_NODES} * ${NUM_GPUS_PER_NODE})) + +echo NUM_NODES=$NUM_NODES +echo NUM_GPUS_PER_NODE=$NUM_GPUS_PER_NODE +echo NUM_GPUS=$NUM_GPUS + +MICRO_BATCH_SIZE=4 +GLOBAL_BATCH_SIZE=1024 +GRADIENTS_ACCUMULATION_STEPS=$((GLOBAL_BATCH_SIZE / MICRO_BATCH_SIZE / NUM_GPUS)) + +if [ $GRADIENTS_ACCUMULATION_STEPS -lt 1 ]; then + echo "Global batch size is too small for the number of GPUs" + exit 1 +fi + +WEIGHT_DECAY=0.1 +GRAD_CLIP=1 + +# deepspeed config +DEEPSPEED_CONFIG="src/moe-recipes/deepspeed_config.json" + +BF16_ENABLED=true +DEEPSPEED_ZERO_STAGE=3 + +OVERLAP_COMMUNICATION=true +CONTINOUS_GRADIENTS=true + +DEEPSPEED_SUB_GROUP_SIZE=1e12 +DEEPSPEED_REDUCE_BUCKET_SIZE=1e9 +DEEPSPEED_STAGE3_PREFETCH_BUCKET_SIZE=5e8 +DEEPSPEED_STAGE3_PARAM_PERSISTENCE_THRESHOLD=1e6 + +DEEPSPEED_STAGE3_MAX_LIVE_PARAMETERS=1e9 +DEEPSPEED_STAGE3_MAX_REUSE_DISTANCE=1e9 + +WALL_CLOCK_BREAKDOWN=false + +DEEPSPEED_CONGIG_CONTENT=$( + cat <$DEEPSPEED_CONFIG + +PYTHONPATH=${PYTHONPATH:-} + +if [ -z "$PYTHONPATH" ]; then + export PYTHONPATH="./src/moe-recipes:./src/moe-recipes/src" +else + export PYTHONPATH="./src/moe-recipes:./src/moe-recipes/src:${PYTHONPATH}" +fi + +echo "PYTHONPATH is now: $PYTHONPATH" + +mpirun \ + -np $NUM_GPUS \ + --npernode $NUM_GPUS_PER_NODE \ + -bind-to none \ + -map-by slot \ + -x TORCH_NCCL_ASYNC_ERROR_HANDLING=1 \ + -x LD_LIBRARY_PATH \ + -x PATH \ + -x MASTER_ADDR=$MASTER_ADDR \ + -x MASTER_PORT=$MASTER_PORT \ + -x NUM_NODES=$NUM_NODES \ + -x NUM_GPUS_PER_NODE=$NUM_GPUS_PER_NODE \ + -x GRADIENTS_ACCUMULATION_STEPS=$GRADIENTS_ACCUMULATION_STEPS \ + -x MICRO_BATCH_SIZE=$MICRO_BATCH_SIZE \ + -x GLOBAL_BATCH_SIZE=$GLOBAL_BATCH_SIZE \ + -x DEEPSPEED_CONFIG=$DEEPSPEED_CONFIG \ + -x DEEPSPEED_ZERO_STAGE=$DEEPSPEED_ZERO_STAGE \ + -x PYTHONPATH=$PYTHONPATH \ + bash scripts/v3-8x1.8b-exp2-sakura/train.sh diff --git a/pretrain/scripts/v3-8x1.8b-exp2-sakura/train.sh b/pretrain/scripts/v3-8x1.8b-exp2-sakura/train.sh new file mode 100644 index 00000000..0f89c6a3 --- /dev/null +++ b/pretrain/scripts/v3-8x1.8b-exp2-sakura/train.sh @@ -0,0 +1,252 @@ +#!/bin/bash + +# For details about the model, see: +# https://github.com/llm-jp/model-cards/pull/25 + +set -eu -o pipefail + +source scripts/environment.sh +source scripts/mpi_variables.sh +source venv/bin/activate + +# open file limit +ulimit -n 65536 1048576 + +export LOGLEVEL=INFO +export NCCL_DEBUG=WARN +export NCCL_DEBUG_SUBSYS=WARN +export PYTHONFAULTHANDLER=1 +export CUDA_LAUNCH_BLOCKING=0 +export CUDNN_LOGDEST_DBG=stderr +export CUDNN_LOGERR_DBG=1 + +NUM_GPUS=$((${NUM_NODES} * ${NUM_GPUS_PER_NODE})) + +# training config +SEQ_LENGTH=4096 +SLIDING_WINDOW_SIZE=4096 +DATA_PARALLEL_SIZE=$NUM_GPUS + +TRAIN_STEPS=494120 + +# optimizer config +LR=3e-4 +MIN_LR=3e-5 +LR_WARMUP_STEPS=2000 +LR_DECAY_STEPS=494120 +WEIGHT_DECAY=0.1 +GRAD_CLIP=1 + +ADAMW_BETA1=0.9 +ADAMW_BETA2=0.95 +ADAMW_EPS=1E-8 + +# model config +TOKENIZER_MODEL=src/llm-jp-tokenizer/models/ver3.0/llm-jp-tokenizer-100k.ver3.0b1.model + +CHECKPOINT_DIR=Mixtral-llm-jp-v3-8x1.8B-initial-checkpoint_lam-las-ind +CHECKPOINT_SAVE_DIR=checkpoints-Mixtral-llm-jp-v3-8x1.8B-initial-checkpoint_lam-las-ind + +mkdir -p ${CHECKPOINT_SAVE_DIR} + +# data config +DATASET_DIR=/home/shared/corpus/llm-jp-corpus/v3.0.0/training_resharded_tokenize_ver3.0 +DATASET_V3_1_DIR=/home/shared/corpus/llm-jp-corpus/v3.1.0/tokenize/v3.0b1 +TRAIN_DATA_CACHE_PATH=/home/shared/experiments/0061_v3-8x1.8b-exp2/train_data_cache_path + +TRAIN_DATA_PATH="" + +# code stack +TRAIN_DATA_PATH="${TRAIN_DATA_PATH} 14486363187 ${DATASET_DIR}/train/code/stack_0000.jsonl_text_document" +TRAIN_DATA_PATH="${TRAIN_DATA_PATH} 12799385151 ${DATASET_DIR}/train/code/stack_0001.jsonl_text_document" +TRAIN_DATA_PATH="${TRAIN_DATA_PATH} 17282923545 ${DATASET_DIR}/train/code/stack_0002.jsonl_text_document" +TRAIN_DATA_PATH="${TRAIN_DATA_PATH} 8861329235 ${DATASET_DIR}/train/code/stack_0003.jsonl_text_document" +TRAIN_DATA_PATH="${TRAIN_DATA_PATH} 6713413649 ${DATASET_DIR}/train/code/stack_0004.jsonl_text_document" +TRAIN_DATA_PATH="${TRAIN_DATA_PATH} 8976432285 ${DATASET_DIR}/train/code/stack_0005.jsonl_text_document" +TRAIN_DATA_PATH="${TRAIN_DATA_PATH} 17961273649 ${DATASET_DIR}/train/code/stack_0006.jsonl_text_document" +TRAIN_DATA_PATH="${TRAIN_DATA_PATH} 12016948303 ${DATASET_DIR}/train/code/stack_0007.jsonl_text_document" +TRAIN_DATA_PATH="${TRAIN_DATA_PATH} 14953094719 ${DATASET_DIR}/train/code/stack_0008.jsonl_text_document" + +# ja cc 1 +TRAIN_DATA_PATH="${TRAIN_DATA_PATH} 23783124862 ${DATASET_DIR}/train/ja/cc-1_0000.jsonl_text_document" +TRAIN_DATA_PATH="${TRAIN_DATA_PATH} 36378129564 ${DATASET_DIR}/train/ja/cc-1_0001.jsonl_text_document" +TRAIN_DATA_PATH="${TRAIN_DATA_PATH} 35477545812 ${DATASET_DIR}/train/ja/cc-1_0002.jsonl_text_document" +TRAIN_DATA_PATH="${TRAIN_DATA_PATH} 35917231868 ${DATASET_DIR}/train/ja/cc-1_0003.jsonl_text_document" +TRAIN_DATA_PATH="${TRAIN_DATA_PATH} 46203062776 ${DATASET_DIR}/train/ja/cc-1_0004.jsonl_text_document" +TRAIN_DATA_PATH="${TRAIN_DATA_PATH} 40396278536 ${DATASET_DIR}/train/ja/cc-1_0005.jsonl_text_document" +TRAIN_DATA_PATH="${TRAIN_DATA_PATH} 33444216206 ${DATASET_DIR}/train/ja/cc-1_0006.jsonl_text_document" +TRAIN_DATA_PATH="${TRAIN_DATA_PATH} 32375495374 ${DATASET_DIR}/train/ja/cc-1_0007.jsonl_text_document" +TRAIN_DATA_PATH="${TRAIN_DATA_PATH} 36068919622 ${DATASET_DIR}/train/ja/cc-1_0008.jsonl_text_document" +TRAIN_DATA_PATH="${TRAIN_DATA_PATH} 26274952324 ${DATASET_DIR}/train/ja/cc-1_0009.jsonl_text_document" +TRAIN_DATA_PATH="${TRAIN_DATA_PATH} 24024422756 ${DATASET_DIR}/train/ja/cc-1_0010.jsonl_text_document" +TRAIN_DATA_PATH="${TRAIN_DATA_PATH} 34590145510 ${DATASET_DIR}/train/ja/cc-1_0011.jsonl_text_document" +TRAIN_DATA_PATH="${TRAIN_DATA_PATH} 29567301906 ${DATASET_DIR}/train/ja/cc-1_0012.jsonl_text_document" +TRAIN_DATA_PATH="${TRAIN_DATA_PATH} 26690562242 ${DATASET_DIR}/train/ja/cc-1_0013.jsonl_text_document" + +# ja cc 2 +TRAIN_DATA_PATH="${TRAIN_DATA_PATH} 35813749376 ${DATASET_DIR}/train/ja/cc-2_0000.jsonl_text_document" +TRAIN_DATA_PATH="${TRAIN_DATA_PATH} 40034668924 ${DATASET_DIR}/train/ja/cc-2_0001.jsonl_text_document" +TRAIN_DATA_PATH="${TRAIN_DATA_PATH} 31191828858 ${DATASET_DIR}/train/ja/cc-2_0002.jsonl_text_document" +TRAIN_DATA_PATH="${TRAIN_DATA_PATH} 25086109508 ${DATASET_DIR}/train/ja/cc-2_0003.jsonl_text_document" +TRAIN_DATA_PATH="${TRAIN_DATA_PATH} 18979589830 ${DATASET_DIR}/train/ja/cc-2_0004.jsonl_text_document" + +# ja cc 3 +TRAIN_DATA_PATH="${TRAIN_DATA_PATH} 40987803038 ${DATASET_DIR}/train/ja/cc-3_0000.jsonl_text_document" +TRAIN_DATA_PATH="${TRAIN_DATA_PATH} 41333549162 ${DATASET_DIR}/train/ja/cc-3_0001.jsonl_text_document" +TRAIN_DATA_PATH="${TRAIN_DATA_PATH} 29810274406 ${DATASET_DIR}/train/ja/cc-3_0002.jsonl_text_document" +TRAIN_DATA_PATH="${TRAIN_DATA_PATH} 22787733940 ${DATASET_DIR}/train/ja/cc-3_0003.jsonl_text_document" +TRAIN_DATA_PATH="${TRAIN_DATA_PATH} 15544493906 ${DATASET_DIR}/train/ja/cc-3_0004.jsonl_text_document" + +# ja kaken +TRAIN_DATA_PATH="${TRAIN_DATA_PATH} 1826105478 ${DATASET_DIR}/train/ja/kaken_0000.jsonl_text_document" + +# ja warp html +TRAIN_DATA_PATH="${TRAIN_DATA_PATH} 1329440698 ${DATASET_DIR}/train/ja/warp-html-01-06_0000.jsonl_text_document" +TRAIN_DATA_PATH="${TRAIN_DATA_PATH} 1397268214 ${DATASET_DIR}/train/ja/warp-html-07-12_0000.jsonl_text_document" + +# ja warp pdf +TRAIN_DATA_PATH="${TRAIN_DATA_PATH} 30149711608 ${DATASET_V3_1_DIR}/train2/ja/warp-pdf-e00_0000.jsonl_text_document" +TRAIN_DATA_PATH="${TRAIN_DATA_PATH} 30023232706 ${DATASET_V3_1_DIR}/train2/ja/warp-pdf-e00_0001.jsonl_text_document" + +# ja warp pdf 0.2 +TRAIN_DATA_PATH="${TRAIN_DATA_PATH} 15396388677 ${DATASET_V3_1_DIR}/train2/ja/warp-pdf-e02_0000.jsonl_text_document" +TRAIN_DATA_PATH="${TRAIN_DATA_PATH} 13225220331 ${DATASET_V3_1_DIR}/train2/ja/warp-pdf-e02_0001.jsonl_text_document" +TRAIN_DATA_PATH="${TRAIN_DATA_PATH} 12433511477 ${DATASET_V3_1_DIR}/train2/ja/warp-pdf-e02_0002.jsonl_text_document" +TRAIN_DATA_PATH="${TRAIN_DATA_PATH} 14722870558 ${DATASET_V3_1_DIR}/train2/ja/warp-pdf-e02_0003.jsonl_text_document" +TRAIN_DATA_PATH="${TRAIN_DATA_PATH} 14818300138 ${DATASET_V3_1_DIR}/train2/ja/warp-pdf-e02_0004.jsonl_text_document" +TRAIN_DATA_PATH="${TRAIN_DATA_PATH} 14827819309 ${DATASET_V3_1_DIR}/train2/ja/warp-pdf-e02_0005.jsonl_text_document" +TRAIN_DATA_PATH="${TRAIN_DATA_PATH} 13394854115 ${DATASET_V3_1_DIR}/train2/ja/warp-pdf-e02_0006.jsonl_text_document" +TRAIN_DATA_PATH="${TRAIN_DATA_PATH} 14369730518 ${DATASET_V3_1_DIR}/train2/ja/warp-pdf-e02_0007.jsonl_text_document" +TRAIN_DATA_PATH="${TRAIN_DATA_PATH} 14027593174 ${DATASET_V3_1_DIR}/train2/ja/warp-pdf-e02_0008.jsonl_text_document" +TRAIN_DATA_PATH="${TRAIN_DATA_PATH} 14719994730 ${DATASET_V3_1_DIR}/train2/ja/warp-pdf-e02_0009.jsonl_text_document" +TRAIN_DATA_PATH="${TRAIN_DATA_PATH} 9865165774 ${DATASET_V3_1_DIR}/train2/ja/warp-pdf-e02_0010.jsonl_text_document" +TRAIN_DATA_PATH="${TRAIN_DATA_PATH} 14525215128 ${DATASET_V3_1_DIR}/train2/ja/warp-pdf-e02_0011.jsonl_text_document" +TRAIN_DATA_PATH="${TRAIN_DATA_PATH} 10835111330 ${DATASET_V3_1_DIR}/train2/ja/warp-pdf-e02_0012.jsonl_text_document" + +# ja wiki +TRAIN_DATA_PATH="${TRAIN_DATA_PATH} 2563804308 ${DATASET_DIR}/train/ja/wiki_0000.jsonl_text_document" + +# en dolma books +TRAIN_DATA_PATH="${TRAIN_DATA_PATH} 5494262694 ${DATASET_DIR}/train/en/dolma-books_0000.jsonl_text_document" + +# en dolma c4 +TRAIN_DATA_PATH="${TRAIN_DATA_PATH} 17052861266 ${DATASET_DIR}/train/en/dolma-c4_0000.jsonl_text_document" +TRAIN_DATA_PATH="${TRAIN_DATA_PATH} 17051260422 ${DATASET_DIR}/train/en/dolma-c4_0001.jsonl_text_document" +TRAIN_DATA_PATH="${TRAIN_DATA_PATH} 17056648148 ${DATASET_DIR}/train/en/dolma-c4_0002.jsonl_text_document" +TRAIN_DATA_PATH="${TRAIN_DATA_PATH} 17057773049 ${DATASET_DIR}/train/en/dolma-c4_0003.jsonl_text_document" +TRAIN_DATA_PATH="${TRAIN_DATA_PATH} 17047888113 ${DATASET_DIR}/train/en/dolma-c4_0004.jsonl_text_document" +TRAIN_DATA_PATH="${TRAIN_DATA_PATH} 17046511755 ${DATASET_DIR}/train/en/dolma-c4_0005.jsonl_text_document" +TRAIN_DATA_PATH="${TRAIN_DATA_PATH} 17058086815 ${DATASET_DIR}/train/en/dolma-c4_0006.jsonl_text_document" +TRAIN_DATA_PATH="${TRAIN_DATA_PATH} 17049490900 ${DATASET_DIR}/train/en/dolma-c4_0007.jsonl_text_document" +TRAIN_DATA_PATH="${TRAIN_DATA_PATH} 17051009552 ${DATASET_DIR}/train/en/dolma-c4_0008.jsonl_text_document" +TRAIN_DATA_PATH="${TRAIN_DATA_PATH} 14932405246 ${DATASET_DIR}/train/en/dolma-c4_0009.jsonl_text_document" +TRAIN_DATA_PATH="${TRAIN_DATA_PATH} 13142696712 ${DATASET_DIR}/train/en/dolma-c4_0010.jsonl_text_document" + +# en dolma cc +TRAIN_DATA_PATH="${TRAIN_DATA_PATH} 15473522696 ${DATASET_DIR}/train/en/dolma-cc-head_0000.jsonl_text_document" +TRAIN_DATA_PATH="${TRAIN_DATA_PATH} 15767913273 ${DATASET_DIR}/train/en/dolma-cc-head_0001.jsonl_text_document" +TRAIN_DATA_PATH="${TRAIN_DATA_PATH} 16664785078 ${DATASET_DIR}/train/en/dolma-cc-head_0002.jsonl_text_document" +TRAIN_DATA_PATH="${TRAIN_DATA_PATH} 16860035920 ${DATASET_DIR}/train/en/dolma-cc-head_0003.jsonl_text_document" +TRAIN_DATA_PATH="${TRAIN_DATA_PATH} 17197613512 ${DATASET_DIR}/train/en/dolma-cc-head_0004.jsonl_text_document" +TRAIN_DATA_PATH="${TRAIN_DATA_PATH} 16363353173 ${DATASET_DIR}/train/en/dolma-cc-head_0005.jsonl_text_document" +TRAIN_DATA_PATH="${TRAIN_DATA_PATH} 15303692924 ${DATASET_DIR}/train/en/dolma-cc-head_0006.jsonl_text_document" +TRAIN_DATA_PATH="${TRAIN_DATA_PATH} 15766283829 ${DATASET_DIR}/train/en/dolma-cc-head_0007.jsonl_text_document" +TRAIN_DATA_PATH="${TRAIN_DATA_PATH} 13483997219 ${DATASET_DIR}/train/en/dolma-cc-head_0008.jsonl_text_document" +TRAIN_DATA_PATH="${TRAIN_DATA_PATH} 12561851173 ${DATASET_DIR}/train/en/dolma-cc-head_0009.jsonl_text_document" +TRAIN_DATA_PATH="${TRAIN_DATA_PATH} 14206017429 ${DATASET_DIR}/train/en/dolma-cc-head_0010.jsonl_text_document" +TRAIN_DATA_PATH="${TRAIN_DATA_PATH} 18455249471 ${DATASET_DIR}/train/en/dolma-cc-head_0011.jsonl_text_document" +TRAIN_DATA_PATH="${TRAIN_DATA_PATH} 18359243399 ${DATASET_DIR}/train/en/dolma-cc-head_0012.jsonl_text_document" +TRAIN_DATA_PATH="${TRAIN_DATA_PATH} 16268609444 ${DATASET_DIR}/train/en/dolma-cc-head_0013.jsonl_text_document" +TRAIN_DATA_PATH="${TRAIN_DATA_PATH} 15209913539 ${DATASET_DIR}/train/en/dolma-cc-head_0014.jsonl_text_document" +TRAIN_DATA_PATH="${TRAIN_DATA_PATH} 15601099503 ${DATASET_DIR}/train/en/dolma-cc-head_0015.jsonl_text_document" +TRAIN_DATA_PATH="${TRAIN_DATA_PATH} 16354139164 ${DATASET_DIR}/train/en/dolma-cc-head_0016.jsonl_text_document" +TRAIN_DATA_PATH="${TRAIN_DATA_PATH} 19563123039 ${DATASET_DIR}/train/en/dolma-cc-head_0017.jsonl_text_document" +TRAIN_DATA_PATH="${TRAIN_DATA_PATH} 17794386584 ${DATASET_DIR}/train/en/dolma-cc-head_0018.jsonl_text_document" +TRAIN_DATA_PATH="${TRAIN_DATA_PATH} 17974377563 ${DATASET_DIR}/train/en/dolma-cc-head_0019.jsonl_text_document" +TRAIN_DATA_PATH="${TRAIN_DATA_PATH} 19152181306 ${DATASET_DIR}/train/en/dolma-cc-head_0020.jsonl_text_document" +TRAIN_DATA_PATH="${TRAIN_DATA_PATH} 16841018460 ${DATASET_DIR}/train/en/dolma-cc-head_0021.jsonl_text_document" +TRAIN_DATA_PATH="${TRAIN_DATA_PATH} 15622566364 ${DATASET_DIR}/train/en/dolma-cc-head_0022.jsonl_text_document" +TRAIN_DATA_PATH="${TRAIN_DATA_PATH} 14998264524 ${DATASET_DIR}/train/en/dolma-cc-head_0023.jsonl_text_document" +TRAIN_DATA_PATH="${TRAIN_DATA_PATH} 19994706100 ${DATASET_DIR}/train/en/dolma-cc-head_0024.jsonl_text_document" +TRAIN_DATA_PATH="${TRAIN_DATA_PATH} 19266785326 ${DATASET_DIR}/train/en/dolma-cc-head_0025.jsonl_text_document" +TRAIN_DATA_PATH="${TRAIN_DATA_PATH} 17797970694 ${DATASET_DIR}/train/en/dolma-cc-head_0026.jsonl_text_document" +TRAIN_DATA_PATH="${TRAIN_DATA_PATH} 18662607705 ${DATASET_DIR}/train/en/dolma-cc-head_0027.jsonl_text_document" +TRAIN_DATA_PATH="${TRAIN_DATA_PATH} 18428148263 ${DATASET_DIR}/train/en/dolma-cc-head_0028.jsonl_text_document" +TRAIN_DATA_PATH="${TRAIN_DATA_PATH} 19152709797 ${DATASET_DIR}/train/en/dolma-cc-head_0029.jsonl_text_document" +TRAIN_DATA_PATH="${TRAIN_DATA_PATH} 19567672702 ${DATASET_DIR}/train/en/dolma-cc-head_0030.jsonl_text_document" +TRAIN_DATA_PATH="${TRAIN_DATA_PATH} 15453203385 ${DATASET_DIR}/train/en/dolma-cc-head_0031.jsonl_text_document" +TRAIN_DATA_PATH="${TRAIN_DATA_PATH} 16946844380 ${DATASET_DIR}/train/en/dolma-cc-head_0032.jsonl_text_document" +TRAIN_DATA_PATH="${TRAIN_DATA_PATH} 16719501611 ${DATASET_DIR}/train/en/dolma-cc-head_0033.jsonl_text_document" +TRAIN_DATA_PATH="${TRAIN_DATA_PATH} 16348054343 ${DATASET_DIR}/train/en/dolma-cc-head_0034.jsonl_text_document" +TRAIN_DATA_PATH="${TRAIN_DATA_PATH} 18292316049 ${DATASET_DIR}/train/en/dolma-cc-head_0035.jsonl_text_document" + +# en dolma science paper +TRAIN_DATA_PATH="${TRAIN_DATA_PATH} 8089227423 ${DATASET_DIR}/train/en/dolma-pes2o_0000.jsonl_text_document" +TRAIN_DATA_PATH="${TRAIN_DATA_PATH} 20185217235 ${DATASET_DIR}/train/en/dolma-pes2o_0001.jsonl_text_document" +TRAIN_DATA_PATH="${TRAIN_DATA_PATH} 18622836173 ${DATASET_DIR}/train/en/dolma-pes2o_0002.jsonl_text_document" +TRAIN_DATA_PATH="${TRAIN_DATA_PATH} 15956491971 ${DATASET_DIR}/train/en/dolma-pes2o_0003.jsonl_text_document" + +# en dolma reddit +TRAIN_DATA_PATH="${TRAIN_DATA_PATH} 17412289508 ${DATASET_DIR}/train/en/dolma-reddit_0000.jsonl_text_document" +TRAIN_DATA_PATH="${TRAIN_DATA_PATH} 17315996345 ${DATASET_DIR}/train/en/dolma-reddit_0001.jsonl_text_document" +TRAIN_DATA_PATH="${TRAIN_DATA_PATH} 17095921975 ${DATASET_DIR}/train/en/dolma-reddit_0002.jsonl_text_document" +TRAIN_DATA_PATH="${TRAIN_DATA_PATH} 15808400388 ${DATASET_DIR}/train/en/dolma-reddit_0003.jsonl_text_document" +TRAIN_DATA_PATH="${TRAIN_DATA_PATH} 15425532535 ${DATASET_DIR}/train/en/dolma-reddit_0004.jsonl_text_document" + +# en dolma wiki +TRAIN_DATA_PATH="${TRAIN_DATA_PATH} 3896965449 ${DATASET_DIR}/train/en/dolma-wiki_0000.jsonl_text_document" + +# en wiki +TRAIN_DATA_PATH="${TRAIN_DATA_PATH} 4744259830 ${DATASET_DIR}/train/en/wiki_0000.jsonl_text_document" + +# zh wiki +TRAIN_DATA_PATH="${TRAIN_DATA_PATH} 840277331 ${DATASET_DIR}/train/zh/wiki_0000.jsonl_text_document" + +# ko wiki +TRAIN_DATA_PATH="${TRAIN_DATA_PATH} 316296219 ${DATASET_DIR}/train/ko/wiki_0000.jsonl_text_document" + +# job name +WANDB_ENTITY="llm-jp" +WANDB_PROJECT="v3-8x1.8b" +WANDB_JOB="train-exp2" + +python src/moe-recipes/examples/finetuning.py \ + --seq-length ${SEQ_LENGTH} \ + --sliding-window-size ${SLIDING_WINDOW_SIZE} \ + --micro-batch-size ${MICRO_BATCH_SIZE} \ + --global-batch-size ${GLOBAL_BATCH_SIZE} \ + --train-iters ${TRAIN_STEPS} \ + --tokenizer-type Llama2Tokenizer \ + --tokenizer-model ${TOKENIZER_MODEL} \ + --data-path ${TRAIN_DATA_PATH} \ + --data-cache-path ${TRAIN_DATA_CACHE_PATH} \ + --split 99999,1,0 \ + --lr ${LR} \ + --min-lr ${MIN_LR} \ + --lr-decay-style cosine \ + --lr-warmup-iters ${LR_WARMUP_STEPS} \ + --lr-decay-iters ${LR_DECAY_STEPS} \ + --weight-decay ${WEIGHT_DECAY} \ + --grad-clip-norm ${GRAD_CLIP} \ + --optimizer adam \ + --adam-beta1 $ADAMW_BETA1 \ + --adam-beta2 $ADAMW_BETA2 \ + --adam-eps $ADAMW_EPS \ + --save-interval 500 \ + --eval-interval 1000000000 \ + --eval-iters 1 \ + --bf16 \ + --mixed-precision \ + --base-model ${CHECKPOINT_DIR} \ + --save ${CHECKPOINT_SAVE_DIR} \ + --load ${CHECKPOINT_SAVE_DIR} \ + --use-zero \ + --zero-config ${DEEPSPEED_CONFIG} \ + --zero-stage ${DEEPSPEED_ZERO_STAGE} \ + --no-meta-device \ + --output-router-logits \ + --use-mpi \ + --continual-pretraining \ + --wandb-entity "${WANDB_ENTITY}" \ + --wandb-project "${WANDB_PROJECT}" \ + --wandb-name "${WANDB_JOB}" \ No newline at end of file