Skip to content

Commit 6d5d2d2

Browse files
authored
Merge branch 'pytorch:master' into generator
2 parents d60c447 + 4c586bd commit 6d5d2d2

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

62 files changed

+816
-1043
lines changed

.bazelrc

Lines changed: 0 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -79,18 +79,6 @@ build:native_arch_posix --host_copt=-march=native
7979

8080
build:mkl_open_source_only --define=tensorflow_mkldnn_contraction_kernel=1
8181

82-
build:cuda --repo_env TF_NEED_CUDA=1
83-
# "sm" means we emit only cubin, which is forward compatible within a GPU generation.
84-
# "compute" means we emit both cubin and PTX, which is larger but also forward compatible to future GPU generations.
85-
build:cuda --crosstool_top=@local_config_cuda//crosstool:toolchain
86-
build:cuda --@local_config_cuda//:enable_cuda
87-
build:cuda --define=xla_python_enable_gpu=true
88-
build:cuda --cxxopt=-DXLA_CUDA=1
89-
90-
# Coverage with cuda/gcc/nvcc requires manually setting coverage flags.
91-
coverage:cuda --per_file_copt=third_party/.*,torch_xla/.*@--coverage
92-
coverage:cuda --linkopt=-lgcov
93-
9482
build:acl --define==build_with_acl=true
9583

9684
build:nonccl --define=no_nccl_support=true
@@ -105,7 +93,6 @@ build:tpu --define=with_tpu_support=true
10593

10694
# Run tests serially with TPU and GPU (only 1 device is available).
10795
test:tpu --local_test_jobs=1
108-
test:cuda --local_test_jobs=1
10996

11097
#########################################################################
11198
# RBE config options below.

.circleci/build.sh

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -50,7 +50,6 @@ source $XLA_DIR/xla_env
5050
export GCLOUD_SERVICE_KEY_FILE="$XLA_DIR/default_credentials.json"
5151
export SILO_NAME='cache-silo-ci-dev-3.8_cuda_12.1' # cache bucket for CI
5252
export BUILD_CPP_TESTS='1'
53-
export TF_CUDA_COMPUTE_CAPABILITIES="sm_50,sm_70,sm_75,compute_80,$TF_CUDA_COMPUTE_CAPABILITIES"
5453
build_torch_xla $XLA_DIR
5554

5655
popd

.circleci/common.sh

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -112,7 +112,8 @@ function build_torch_xla() {
112112
# Need to uncomment the line below.
113113
# Currently it fails upstream XLA CI.
114114
# pip install plugins/cuda -v
115-
pip install 'torch_xla[pallas]'
115+
pip install --pre torch_xla[pallas] --index-url https://us-python.pkg.dev/ml-oss-artifacts-published/jax/simple/ --find-links https://storage.googleapis.com/jax-releases/libtpu_releases.html
116+
116117
popd
117118
}
118119

.github/upstream/Dockerfile

Lines changed: 0 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -15,11 +15,6 @@ ARG tpuvm=""
1515
# Disable CUDA for PyTorch
1616
ENV USE_CUDA "0"
1717

18-
# Enable CUDA for XLA
19-
ENV XLA_CUDA "${cuda}"
20-
ENV TF_CUDA_COMPUTE_CAPABILITIES "${cuda_compute}"
21-
ENV TF_CUDA_PATHS "/usr/local/cuda,/usr/include,/usr"
22-
2318
# CUDA build guidance
2419
ENV NVIDIA_VISIBLE_DEVICES all
2520
ENV NVIDIA_DRIVER_CAPABILITIES compute,utility

.github/workflows/_test.yml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -140,7 +140,7 @@ jobs:
140140
set -x
141141
142142
pip install expecttest unittest-xml-reporting
143-
pip install 'torch_xla[pallas]'
143+
pip install --pre 'torch_xla[pallas]' --index-url https://us-python.pkg.dev/ml-oss-artifacts-published/jax/simple/ --find-links https://storage.googleapis.com/jax-releases/libtpu_releases.html
144144
145145
if [[ ! -z "$RUN_BENCHMARK_TESTS" ]]; then
146146
pip install -r pytorch/xla/benchmarks/requirements.txt

.github/workflows/_tpu_ci.yml

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -52,8 +52,8 @@ jobs:
5252
pip install fsspec
5353
pip install rich
5454
# jax and libtpu is needed for pallas tests.
55-
pip install 'torch_xla[pallas]'
56-
pip install 'torch_xla[tpu]' -f https://storage.googleapis.com/libtpu-wheels/index.html -f https://storage.googleapis.com/libtpu-releases/index.html
55+
pip install --pre 'torch_xla[pallas]' --index-url https://us-python.pkg.dev/ml-oss-artifacts-published/jax/simple/ --find-links https://storage.googleapis.com/jax-releases/libtpu_releases.html'
56+
pip install --pre 'torch_xla[tpu]' --index-url https://us-python.pkg.dev/ml-oss-artifacts-published/jax/simple/ --find-links https://storage.googleapis.com/jax-releases/libtpu_releases.html'
5757
pip install --upgrade protobuf
5858
- name: Run Tests (${{ matrix.test_script }})
5959
if: inputs.has_code_changes == 'true'

BUILD

Lines changed: 0 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -46,22 +46,6 @@ cc_binary(
4646
]),
4747
)
4848

49-
cc_binary(
50-
name = "_XLAC_cuda_functions.so",
51-
copts = [
52-
"-fopenmp",
53-
"-fPIC",
54-
],
55-
linkopts = [
56-
"-Wl,-soname,_XLAC_cuda_functions.so",
57-
],
58-
linkshared = 1,
59-
visibility = ["//visibility:public"],
60-
deps = [
61-
"//torch_xla/csrc:aten_cuda_functions",
62-
],
63-
)
64-
6549
test_suite(
6650
name = "cpp_tests",
6751
# testonly = True,

CONTRIBUTING.md

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -162,7 +162,7 @@ commands on your Linux machine directly, outside of the container.
162162
-f https://storage.googleapis.com/libtpu-releases/index.html
163163
164164
# Optional: if you're using custom kernels, install pallas dependencies
165-
pip install torch_xla[pallas]
165+
pip install --pre torch_xla[pallas] --index-url https://us-python.pkg.dev/ml-oss-artifacts-published/jax/simple/ --find-links https://storage.googleapis.com/jax-releases/libtpu_releases.html
166166
```
167167

168168
1. If you are running on a TPU VM, ensure `torch` and `torch_xla` were built and

README.md

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -35,7 +35,7 @@ Note: Builds are available for Python 3.8 to 3.11; please use one of the support
3535
pip install torch==2.8.0 'torch_xla[tpu]==2.8.0'
3636

3737
# Optional: if you're using custom kernels, install pallas dependencies
38-
pip install 'torch_xla[pallas]'
38+
pip install --pre torch_xla[pallas] --index-url https://us-python.pkg.dev/ml-oss-artifacts-published/jax/simple/ --find-links https://storage.googleapis.com/jax-releases/libtpu_releases.html
3939
```
4040
**As of 07/16/2025 and starting from Pytorch/XLA 2.8 release, PyTorch/XLA will
4141
provide nightly and release wheels for Python 3.11 to 3.13**

WORKSPACE

Lines changed: 15 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -46,7 +46,7 @@ new_local_repository(
4646

4747
# To build PyTorch/XLA with a new revison of OpenXLA, update the xla_hash to
4848
# the openxla git commit hash and note the date of the commit.
49-
xla_hash = '3d5ece64321630dade7ff733ae1353fc3c83d9cc' # Committed on 2025-06-17.
49+
xla_hash = '92f7b5952dd585c5be17c9a5caad27407005b513' # Committed on 2025-08-15.
5050

5151
http_archive(
5252
name = "xla",
@@ -58,7 +58,7 @@ http_archive(
5858
patches = [
5959
"//openxla_patches:gpu_nvml.diff",
6060
"//openxla_patches:gpu_race_condition.diff",
61-
"//openxla_patches:count_down.diff",
61+
"//openxla_patches:no_fortify.diff",
6262
],
6363
strip_prefix = "xla-" + xla_hash,
6464
urls = [
@@ -81,6 +81,19 @@ http_archive(
8181
# path = "/path/to/openxla",
8282
# )
8383

84+
# Initialize OpenXLA's external dependencies. There is an specific order
85+
# which those dependencies are initialized, because for bazel it's the
86+
# first definition that takes precedence.
87+
# We follow what openxla/xla does exactly:
88+
# https://github.com/openxla/xla/blob/main/WORKSPACE#L37
89+
load("@xla//:workspace4.bzl", "xla_workspace4")
90+
91+
xla_workspace4()
92+
93+
load("@xla//:workspace3.bzl", "xla_workspace3")
94+
95+
xla_workspace3()
96+
8497
# Initialize hermetic Python
8598
load("@xla//third_party/py:python_init_rules.bzl", "python_init_rules")
8699

@@ -115,14 +128,6 @@ install_deps()
115128

116129

117130

118-
# Initialize OpenXLA's external dependencies.
119-
load("@xla//:workspace4.bzl", "xla_workspace4")
120-
121-
xla_workspace4()
122-
123-
load("@xla//:workspace3.bzl", "xla_workspace3")
124-
125-
xla_workspace3()
126131

127132
load("@xla//:workspace2.bzl", "xla_workspace2")
128133

0 commit comments

Comments
 (0)