Skip to content
Draft
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
19 commits
Select commit Hold shift + click to select a range
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
@@ -1,3 +1,5 @@
arm_variant_type:
- sbsa
blas_impl:
- generic
c_compiler:
Expand Down Expand Up @@ -75,6 +77,7 @@ zip_keys:
- cxx_compiler_version
- c_stdlib_version
- cuda_compiler_version
- arm_variant_type
- - channel_targets
- is_rc
zlib:
Expand Down
Original file line number Diff line number Diff line change
@@ -1,3 +1,5 @@
arm_variant_type:
- sbsa
blas_impl:
- generic
c_compiler:
Expand Down Expand Up @@ -75,6 +77,7 @@ zip_keys:
- cxx_compiler_version
- c_stdlib_version
- cuda_compiler_version
- arm_variant_type
- - channel_targets
- is_rc
zlib:
Expand Down
Original file line number Diff line number Diff line change
@@ -1,3 +1,5 @@
arm_variant_type:
- sbsa
blas_impl:
- generic
c_compiler:
Expand Down Expand Up @@ -75,6 +77,7 @@ zip_keys:
- cxx_compiler_version
- c_stdlib_version
- cuda_compiler_version
- arm_variant_type
- - channel_targets
- is_rc
zlib:
Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,84 @@
arm_variant_type:
- tegra
blas_impl:
- generic
c_compiler:
- gcc
c_compiler_version:
- '14'
c_stdlib:
- sysroot
c_stdlib_version:
- '2.34'
channel_sources:
- conda-forge
channel_targets:
- conda-forge main
cuda_compiler:
- cuda-nvcc
cuda_compiler_version:
- '12.9'
cxx_compiler:
- gxx
cxx_compiler_version:
- '14'
docker_image:
- quay.io/condaforge/linux-anvil-x86_64:alma9
fmt:
- '12.1'
github_actions_labels:
- cirun-openstack-gpu-2xlarge
is_rc:
- 'False'
libabseil:
- '20260107'
libblas:
- 3.9.* *netlib
libcblas:
- 3.9.* *netlib
libcudnn_dev:
- '9'
liblapack:
- 3.9.* *netlib
libmagma_devel:
- '2.9'
libmagma_sparse:
- '2.9'
libprotobuf:
- 6.33.5
libtorch:
- '2.9'
mkl:
- '2025'
nccl:
- '2'
numpy:
- '2'
orc:
- 2.2.2
pin_run_as_build:
python:
min_pin: x.x
max_pin: x.x
pybind11_abi:
- '11'
python:
- 3.10.* *_cpython
- 3.11.* *_cpython
- 3.12.* *_cpython
- 3.13.* *_cp313
- 3.14.* *_cp314
pytorch:
- '2.9'
target_platform:
- linux-aarch64
zip_keys:
- - c_compiler_version
- cxx_compiler_version
- c_stdlib_version
- cuda_compiler_version
- arm_variant_type
- - channel_targets
- is_rc
zlib:
- '1'
21 changes: 0 additions & 21 deletions .ci_support/migrations/absl_grpc_proto.yaml

This file was deleted.

29 changes: 0 additions & 29 deletions .ci_support/migrations/absl_grpc_proto_25Q2.yaml

This file was deleted.

77 changes: 77 additions & 0 deletions .ci_support/migrations/arm_variant_type.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,77 @@
# time stamped to be the day before the CUDA 11.8 migrator
migrator_ts: 2145852000 # 2037-12-31
__migrator:
# this migration should not be unpaused!
# It's intended to be copied on an as-needed basis to feedstocks that want to support tegra
paused: true
operation: key_add
migration_number:
1
build_number:
1
override_cbc_keys:
- cuda_compiler_stub
check_solvable: false
primary_key: cuda_compiler_version
ordering:
arm_variant_type:
- None
- sbsa
- tegra
additional_zip_keys: # [linux and aarch64 and os.environ.get("CF_CUDA_ENABLED", "False") == "True"]
- arm_variant_type # [linux and aarch64 and os.environ.get("CF_CUDA_ENABLED", "False") == "True"]
wait_for_migrators:
- cuda129
- aarch64 and ppc64le addition
commit_message: |2
Build for NVIDIA Tegra devices and CUDA 12.9

This migration adds `arm_variant_type=tegra` to the build matrix to support NVIDIA Tegra
devices compatible with CUDA 12.9. This migrator is only applicable to the `linux-aarch4`
platform because Tegra is specific to that architecture. Non-Tegra ARM
devices are assumed to be SBSA-compliant (Server Base System Architecture). The
default value of `arm_variant_type` is `sbsa` or it is undefined for non-ARM platforms.
Tegra devices compatible with CUDA 13.0 are SBSA-compliant, and do not need a separate
build. Only Orin (sm_87) and later devices are supported because earlier Tegra devices are
not supported by CUDA 12.9.

In addition to this migrator, the `arm-variant` package must be added to the build
requirements of the recipe in order to constrain the CUDA compiler to the correct variant.

```yaml
# A fake selector may be needed for conda-build to pick up arm_variant_type as a variant
# [arm_variant_type]

requirements:
build:
- {{ compiler('cuda') }}
- arm-variant * {{ arm_variant_type }} # [linux and aarch64 and cuda_compiler_version != "None"]
```

For v1 recipes, the work-around looks as follows:
```yaml
context:
# ensure arm_variant_type gets detected as a used variable
touch_arm_variant_type: ${{ arm_variant_type }}
```

Please read the conda-forge CUDA recipe guide for more information:
https://github.com/conda-forge/cuda-feedstock/blob/main/recipe/doc/recipe_guide.md#building-for-arm-tegra-devices

c_compiler_version: # [linux and aarch64 and os.environ.get("CF_CUDA_ENABLED", "False") == "True"]
- 14 # [linux and aarch64 and os.environ.get("CF_CUDA_ENABLED", "False") == "True"]

cxx_compiler_version: # [linux and aarch64 and os.environ.get("CF_CUDA_ENABLED", "False") == "True"]
- 14 # [linux and aarch64 and os.environ.get("CF_CUDA_ENABLED", "False") == "True"]

fortran_compiler_version: # [linux and aarch64 and os.environ.get("CF_CUDA_ENABLED", "False") == "True"]
- 14 # [linux and aarch64 and os.environ.get("CF_CUDA_ENABLED", "False") == "True"]

cuda_compiler_version: # [linux and aarch64 and os.environ.get("CF_CUDA_ENABLED", "False") == "True"]
- 12.9 # [linux and aarch64 and os.environ.get("CF_CUDA_ENABLED", "False") == "True"]

c_stdlib_version: # [linux and aarch64 and os.environ.get("CF_CUDA_ENABLED", "False") == "True"]
- 2.34 # [linux and aarch64 and os.environ.get("CF_CUDA_ENABLED", "False") == "True"]

arm_variant_type: # [linux and aarch64 and os.environ.get("CF_CUDA_ENABLED", "False") == "True"]
- tegra # [linux and aarch64 and os.environ.get("CF_CUDA_ENABLED", "False") == "True"]
Original file line number Diff line number Diff line change
@@ -0,0 +1,69 @@
blas_impl:
- generic
c_compiler:
- vs2022
c_stdlib:
- vs
channel_sources:
- conda-forge
channel_targets:
- conda-forge main
cuda_compiler:
- cuda-nvcc
cuda_compiler_version:
- '12.8'
cxx_compiler:
- vs2022
fmt:
- '12.1'
github_actions_labels:
- cirun-azure-windows-4xlarge
is_rc:
- 'False'
libabseil:
- '20260107'
libblas:
- 3.9.* *netlib
libcblas:
- 3.9.* *netlib
libcudnn_dev:
- '9'
liblapack:
- 3.9.* *netlib
libmagma_devel:
- '2.9'
libmagma_sparse:
- '2.9'
libprotobuf:
- 6.33.5
libtorch:
- '2.9'
mkl:
- '2025'
mkl_devel:
- '2025'
numpy:
- '2'
orc:
- 2.2.2
pin_run_as_build:
python:
min_pin: x.x
max_pin: x.x
pybind11_abi:
- '11'
python:
- 3.10.* *_cpython
- 3.11.* *_cpython
- 3.12.* *_cpython
- 3.13.* *_cp313
- 3.14.* *_cp314
pytorch:
- '2.9'
target_platform:
- win-64
zip_keys:
- - channel_targets
- is_rc
zlib:
- '1'
Original file line number Diff line number Diff line change
@@ -0,0 +1,69 @@
blas_impl:
- generic
c_compiler:
- vs2022
c_stdlib:
- vs
channel_sources:
- conda-forge
channel_targets:
- conda-forge main
cuda_compiler:
- cuda-nvcc
cuda_compiler_version:
- '13.0'
cxx_compiler:
- vs2022
fmt:
- '12.1'
github_actions_labels:
- cirun-azure-windows-4xlarge
is_rc:
- 'False'
libabseil:
- '20260107'
libblas:
- 3.9.* *netlib
libcblas:
- 3.9.* *netlib
libcudnn_dev:
- '9'
liblapack:
- 3.9.* *netlib
libmagma_devel:
- '2.9'
libmagma_sparse:
- '2.9'
libprotobuf:
- 6.33.5
libtorch:
- '2.9'
mkl:
- '2025'
mkl_devel:
- '2025'
numpy:
- '2'
orc:
- 2.2.2
pin_run_as_build:
python:
min_pin: x.x
max_pin: x.x
pybind11_abi:
- '11'
python:
- 3.10.* *_cpython
- 3.11.* *_cpython
- 3.12.* *_cpython
- 3.13.* *_cp313
- 3.14.* *_cp314
pytorch:
- '2.9'
target_platform:
- win-64
zip_keys:
- - channel_targets
- is_rc
zlib:
- '1'
Loading
Loading