|
| 1 | +name: ~Build wheel template |
| 2 | + |
| 3 | +on: |
| 4 | + workflow_call: |
| 5 | + inputs: |
| 6 | + runs-on: |
| 7 | + description: "The runner to use for the build" |
| 8 | + required: true |
| 9 | + type: string |
| 10 | + python-version: |
| 11 | + description: "The Python version to use for the build" |
| 12 | + required: true |
| 13 | + type: string |
| 14 | + cuda-version: |
| 15 | + description: "The CUDA version to use for the build" |
| 16 | + required: true |
| 17 | + type: string |
| 18 | + torch-version: |
| 19 | + description: "The PyTorch version to use for the build" |
| 20 | + required: true |
| 21 | + type: string |
| 22 | + cxx11_abi: |
| 23 | + description: "The C++11 ABI to use for the build" |
| 24 | + required: true |
| 25 | + type: string |
| 26 | + upload-to-release: |
| 27 | + description: "Upload wheel to this release" |
| 28 | + required: false |
| 29 | + type: boolean |
| 30 | + default: false |
| 31 | + release-version: |
| 32 | + description: "Upload wheel to this release" |
| 33 | + required: false |
| 34 | + type: string |
| 35 | + |
| 36 | +defaults: |
| 37 | + run: |
| 38 | + shell: bash -x -e -u -o pipefail {0} |
| 39 | + |
| 40 | +jobs: |
| 41 | + build-wheel: |
| 42 | + runs-on: ${{ inputs.runs-on }} |
| 43 | + name: Build wheel (${{ inputs.release-version }}-${{ inputs.python-version }}-${{ inputs.cuda-version }}-${{ inputs.torch-version }}-${{ inputs.cxx11_abi }}) |
| 44 | + steps: |
| 45 | + - name: Checkout |
| 46 | + uses: actions/checkout@v4 |
| 47 | + with: |
| 48 | + ref: ${{ inputs.release-version }} |
| 49 | + submodules: recursive |
| 50 | + |
| 51 | + - name: Set up Python |
| 52 | + uses: actions/setup-python@v5 |
| 53 | + with: |
| 54 | + python-version: ${{ inputs.python-version }} |
| 55 | + |
| 56 | + - name: Set CUDA and PyTorch versions |
| 57 | + run: | |
| 58 | + echo "MATRIX_CUDA_VERSION=$(echo ${{ inputs.cuda-version }} | awk -F \. {'print $1 $2'})" >> $GITHUB_ENV |
| 59 | + echo "MATRIX_TORCH_VERSION=$(echo ${{ inputs.torch-version }} | awk -F \. {'print $1 "." $2'})" >> $GITHUB_ENV |
| 60 | + echo "WHEEL_CUDA_VERSION=$(echo ${{ inputs.cuda-version }} | awk -F \. {'print $1'})" >> $GITHUB_ENV |
| 61 | + echo "MATRIX_PYTHON_VERSION=$(echo ${{ inputs.python-version }} | awk -F \. {'print $1 $2'})" >> $GITHUB_ENV |
| 62 | +
|
| 63 | + - name: Free up disk space |
| 64 | + if: ${{ runner.os == 'Linux' }} |
| 65 | + # https://github.com/easimon/maximize-build-space/blob/master/action.yml |
| 66 | + # https://github.com/easimon/maximize-build-space/tree/test-report |
| 67 | + run: | |
| 68 | + sudo rm -rf /usr/share/dotnet |
| 69 | + sudo rm -rf /opt/ghc |
| 70 | + sudo rm -rf /opt/hostedtoolcache/CodeQL |
| 71 | +
|
| 72 | + - name: Set up swap space |
| 73 | + if: runner.os == 'Linux' |
| 74 | + |
| 75 | + with: |
| 76 | + swap-size-gb: 10 |
| 77 | + |
| 78 | + - name: Install CUDA ${{ inputs.cuda-version }} |
| 79 | + if: ${{ inputs.cuda-version != 'cpu' }} |
| 80 | + |
| 81 | + id: cuda-toolkit |
| 82 | + with: |
| 83 | + cuda: ${{ inputs.cuda-version }} |
| 84 | + linux-local-args: '["--toolkit"]' |
| 85 | + # default method is "local", and we're hitting some error with caching for CUDA 11.8 and 12.1 |
| 86 | + # method: ${{ (inputs.cuda-version == '11.8.0' || inputs.cuda-version == '12.1.0') && 'network' || 'local' }} |
| 87 | + method: "network" |
| 88 | + |
| 89 | + - name: Install additional CUDA libraries |
| 90 | + run: | |
| 91 | + CUDA_VERSION=$(echo ${{ inputs.cuda-version }} | awk -F \. {'print $1 "-" $2'}) |
| 92 | + sudo apt-get update |
| 93 | + sudo apt-get install -y libcusparse-$CUDA_VERSION libcusolver-$CUDA_VERSION |
| 94 | + sudo apt-get clean |
| 95 | +
|
| 96 | + - name: Install PyTorch ${{ inputs.torch-version }}+cu${{ inputs.cuda-version }} |
| 97 | + run: | |
| 98 | + pip install --upgrade pip |
| 99 | + # With python 3.13 and torch 2.5.1, unless we update typing-extensions, we get error |
| 100 | + # AttributeError: attribute '__default__' of 'typing.ParamSpec' objects is not writable |
| 101 | + pip install typing-extensions==4.12.2 |
| 102 | + # We want to figure out the CUDA version to download pytorch |
| 103 | + # e.g. we can have system CUDA version being 11.7 but if torch==1.12 then we need to download the wheel from cu116 |
| 104 | + # see https://github.com/pytorch/pytorch/blob/main/RELEASE.md#release-compatibility-matrix |
| 105 | + # This code is ugly, maybe there's a better way to do this. |
| 106 | + export TORCH_CUDA_VERSION=$(python -c "from os import environ as env; \ |
| 107 | + minv = {'2.4': 118, '2.5': 118, '2.6': 118, '2.7': 118, '2.8': 126}[env['MATRIX_TORCH_VERSION']]; \ |
| 108 | + maxv = {'2.4': 124, '2.5': 124, '2.6': 126, '2.7': 128, '2.8': 129}[env['MATRIX_TORCH_VERSION']]; \ |
| 109 | + print(minv if int(env['MATRIX_CUDA_VERSION']) < 120 else maxv)" \ |
| 110 | + ) |
| 111 | + if [[ ${{ inputs.torch-version }} == *"dev"* ]]; then |
| 112 | + # pip install --no-cache-dir --pre torch==${{ inputs.torch-version }} --index-url https://download.pytorch.org/whl/nightly/cu${TORCH_CUDA_VERSION} |
| 113 | + # Can't use --no-deps because we need cudnn etc. |
| 114 | + # Hard-coding this version of pytorch-triton for torch 2.6.0.dev20241001 |
| 115 | + pip install jinja2 |
| 116 | + pip install https://download.pytorch.org/whl/nightly/pytorch_triton-3.1.0%2Bcf34004b8a-cp${MATRIX_PYTHON_VERSION}-cp${MATRIX_PYTHON_VERSION}-linux_x86_64.whl |
| 117 | + pip install --no-cache-dir --pre https://download.pytorch.org/whl/nightly/cu${TORCH_CUDA_VERSION}/torch-${{ inputs.torch-version }}%2Bcu${TORCH_CUDA_VERSION}-cp${MATRIX_PYTHON_VERSION}-cp${MATRIX_PYTHON_VERSION}-linux_x86_64.whl |
| 118 | + else |
| 119 | + pip install --no-cache-dir torch==${{ inputs.torch-version }} --index-url https://download.pytorch.org/whl/cu${TORCH_CUDA_VERSION} |
| 120 | + fi |
| 121 | + nvcc --version |
| 122 | + python --version |
| 123 | + python -c "import torch; print('PyTorch:', torch.__version__)" |
| 124 | + python -c "import torch; print('CUDA:', torch.version.cuda)" |
| 125 | + python -c "from torch.utils import cpp_extension; print (cpp_extension.CUDA_HOME)" |
| 126 | +
|
| 127 | + - name: Restore build cache |
| 128 | + uses: actions/cache/restore@v4 |
| 129 | + with: |
| 130 | + path: build.tar |
| 131 | + key: build-${{ inputs.release-version }}-${{ inputs.python-version }}-${{ inputs.cuda-version }}-${{ inputs.torch-version }}-${{ inputs.cxx11_abi }}-${{ github.run_number }}-${{ github.run_attempt }} |
| 132 | + restore-keys: | |
| 133 | + build-${{ inputs.release-version }}-${{ inputs.python-version }}-${{ inputs.cuda-version }}-${{ inputs.torch-version }}-${{ inputs.cxx11_abi }}- |
| 134 | +
|
| 135 | + - name: Unpack build cache |
| 136 | + run: | |
| 137 | + echo ::group::Adjust timestamps |
| 138 | + sudo find / -exec touch -t 197001010000 {} + || true |
| 139 | + echo ::endgroup:: |
| 140 | +
|
| 141 | + if [ -f build.tar ]; then |
| 142 | + find . -mindepth 1 -maxdepth 1 ! -name 'build.tar' -exec rm -rf {} + |
| 143 | + tar -xpvf build.tar -C . |
| 144 | + else |
| 145 | + echo "No build.tar found, skipping" |
| 146 | + fi |
| 147 | +
|
| 148 | + ls -al ./ |
| 149 | + ls -al build/ || true |
| 150 | + ls -al csrc/ || true |
| 151 | +
|
| 152 | + - name: Build wheel |
| 153 | + id: build_wheel |
| 154 | + run: | |
| 155 | + # We want setuptools >= 49.6.0 otherwise we can't compile the extension if system CUDA version is 11.7 and pytorch cuda version is 11.6 |
| 156 | + # https://github.com/pytorch/pytorch/blob/664058fa83f1d8eede5d66418abff6e20bd76ca8/torch/utils/cpp_extension.py#L810 |
| 157 | + # However this still fails so I'm using a newer version of setuptools |
| 158 | + pip install setuptools==75.8.0 |
| 159 | + pip install ninja packaging wheel |
| 160 | + export PATH=/usr/local/nvidia/bin:/usr/local/nvidia/lib64:$PATH |
| 161 | + export LD_LIBRARY_PATH=/usr/local/nvidia/lib64:/usr/local/cuda/lib64:$LD_LIBRARY_PATH |
| 162 | + # Limit MAX_JOBS otherwise the github runner goes OOM |
| 163 | + # nvcc 11.8 can compile with 2 jobs, but nvcc 12.3 goes OOM |
| 164 | +
|
| 165 | + export MAX_JOBS=$([ "$MATRIX_CUDA_VERSION" == "129" ] && echo 1 || echo 2) |
| 166 | + export NVCC_THREADS=2 |
| 167 | + export TORCH_CUDA_ARCH_LIST="7.0 7.2 7.5 8.0 8.6 8.7 9.0+PTX" |
| 168 | +
|
| 169 | + # 5h timeout since GH allows max 6h and we want some buffer |
| 170 | + EXIT_CODE=0 |
| 171 | + timeout 5h python setup.py bdist_wheel --dist-dir=dist || EXIT_CODE=$? |
| 172 | +
|
| 173 | + if [ $EXIT_CODE -eq 0 ]; then |
| 174 | + tmpname=cu${WHEEL_CUDA_VERSION}torch${MATRIX_TORCH_VERSION}cxx11abi${{ inputs.cxx11_abi }} |
| 175 | + wheel_name=$(ls dist/*whl | xargs -n 1 basename | sed "s/-/+$tmpname-/2") |
| 176 | + ls dist/*whl |xargs -I {} mv {} dist/${wheel_name} |
| 177 | + echo "wheel_name=${wheel_name}" >> $GITHUB_ENV |
| 178 | + fi |
| 179 | +
|
| 180 | + # Store exit code in GitHub env for later steps |
| 181 | + echo "build_exit_code=$EXIT_CODE" | tee -a "$GITHUB_OUTPUT" |
| 182 | +
|
| 183 | + # Do not fail the job if timeout killed the build |
| 184 | + exit $EXIT_CODE |
| 185 | +
|
| 186 | + - name: Log build logs after timeout |
| 187 | + if: always() && steps.build_wheel.outputs.build_exit_code == 124 |
| 188 | + run: | |
| 189 | + ls -al ./ |
| 190 | + tar -cvf build.tar . --atime-preserve=replace |
| 191 | +
|
| 192 | + - name: Save build cache timeout |
| 193 | + if: always() && steps.build_wheel.outputs.build_exit_code == 124 |
| 194 | + uses: actions/cache/save@v4 |
| 195 | + with: |
| 196 | + key: build-${{ inputs.release-version }}-${{ inputs.python-version }}-${{ inputs.cuda-version }}-${{ inputs.torch-version }}-${{ inputs.cxx11_abi }}-${{ github.run_number }}-${{ github.run_attempt }} |
| 197 | + path: build.tar |
| 198 | + |
| 199 | + - name: Log Built Wheels |
| 200 | + run: | |
| 201 | + ls dist |
| 202 | +
|
| 203 | + - name: Get Release with tag |
| 204 | + id: get_current_release |
| 205 | + uses: joutvhu/get-release@v1 |
| 206 | + with: |
| 207 | + tag_name: ${{ inputs.release-version }} |
| 208 | + env: |
| 209 | + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} |
| 210 | + |
| 211 | + - name: Upload Release Asset |
| 212 | + id: upload_release_asset |
| 213 | + if: inputs.upload-to-release |
| 214 | + uses: actions/upload-release-asset@v1 |
| 215 | + env: |
| 216 | + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} |
| 217 | + with: |
| 218 | + upload_url: ${{ steps.get_current_release.outputs.upload_url }} |
| 219 | + asset_path: ./dist/${{env.wheel_name}} |
| 220 | + asset_name: ${{env.wheel_name}} |
| 221 | + asset_content_type: application/* |
0 commit comments