2024-03-27 17:27:49 +00:00
|
|
|
# buildifier: disable=load-on-top
|
|
|
|
|
|
2016-05-26 11:05:13 -08:00
|
|
|
workspace(name = "org_tensorflow")
|
|
|
|
|
|
2024-04-05 14:30:08 -07:00
|
|
|
# buildifier: disable=load-on-top
|
|
|
|
|
|
2025-12-11 18:35:29 -08:00
|
|
|
load("//third_party:repo.bzl", "tf_http_archive", "tf_mirror_urls")
|
2023-07-06 12:19:25 -07:00
|
|
|
|
2025-12-11 18:35:29 -08:00
|
|
|
tf_http_archive(
|
2025-05-16 07:10:36 -07:00
|
|
|
name = "rules_shell",
|
|
|
|
|
sha256 = "bc61ef94facc78e20a645726f64756e5e285a045037c7a61f65af2941f4c25e1",
|
|
|
|
|
strip_prefix = "rules_shell-0.4.1",
|
2025-12-11 18:35:29 -08:00
|
|
|
urls = tf_mirror_urls(
|
|
|
|
|
"https://github.com/bazelbuild/rules_shell/releases/download/v0.4.1/rules_shell-v0.4.1.tar.gz",
|
|
|
|
|
),
|
2025-05-16 07:10:36 -07:00
|
|
|
)
|
|
|
|
|
|
2025-12-04 10:20:56 -08:00
|
|
|
# Initialize toolchains for ML projects.
|
|
|
|
|
#
|
|
|
|
|
# A hermetic build system is designed to produce completely reproducible builds for C++.
|
|
|
|
|
# Details: https://github.com/google-ml-infra/rules_ml_toolchain
|
2025-12-11 18:35:29 -08:00
|
|
|
tf_http_archive(
|
2025-12-04 10:20:56 -08:00
|
|
|
name = "rules_ml_toolchain",
|
2025-12-26 15:11:48 -08:00
|
|
|
sha256 = "07802f21916a113be78ff2110891239bd5183ad09d8c42f6f9b04e4e0bfa5505",
|
|
|
|
|
strip_prefix = "rules_ml_toolchain-802e0dbbcc3cd82ac5b0accbff6f95b70106d0d1",
|
2025-12-11 18:35:29 -08:00
|
|
|
urls = tf_mirror_urls(
|
2025-12-26 15:11:48 -08:00
|
|
|
"https://github.com/google-ml-infra/rules_ml_toolchain/archive/802e0dbbcc3cd82ac5b0accbff6f95b70106d0d1.tar.gz",
|
2025-12-11 18:35:29 -08:00
|
|
|
),
|
2025-12-04 10:20:56 -08:00
|
|
|
)
|
|
|
|
|
|
|
|
|
|
load(
|
|
|
|
|
"@rules_ml_toolchain//cc/deps:cc_toolchain_deps.bzl",
|
|
|
|
|
"cc_toolchain_deps",
|
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
cc_toolchain_deps()
|
|
|
|
|
|
|
|
|
|
register_toolchains("@rules_ml_toolchain//cc:linux_x86_64_linux_x86_64")
|
|
|
|
|
|
|
|
|
|
register_toolchains("@rules_ml_toolchain//cc:linux_x86_64_linux_x86_64_cuda")
|
|
|
|
|
|
|
|
|
|
register_toolchains("@rules_ml_toolchain//cc:linux_aarch64_linux_aarch64")
|
|
|
|
|
|
|
|
|
|
register_toolchains("@rules_ml_toolchain//cc:linux_aarch64_linux_aarch64_cuda")
|
|
|
|
|
|
2024-06-28 16:54:53 -07:00
|
|
|
# Initialize the TensorFlow repository and all dependencies.
|
|
|
|
|
#
|
|
|
|
|
# The cascade of load() statements and tf_workspace?() calls works around the
|
|
|
|
|
# restriction that load() statements need to be at the top of .bzl files.
|
|
|
|
|
# E.g. we can not retrieve a new repository with http_archive and then load()
|
|
|
|
|
# a macro from that repository in the same file.
|
|
|
|
|
load("@//tensorflow:workspace3.bzl", "tf_workspace3")
|
2023-10-09 05:28:06 -07:00
|
|
|
|
2024-06-28 16:54:53 -07:00
|
|
|
tf_workspace3()
|
2023-10-09 05:28:06 -07:00
|
|
|
|
2025-05-16 07:10:36 -07:00
|
|
|
load("@rules_shell//shell:repositories.bzl", "rules_shell_dependencies", "rules_shell_toolchains")
|
|
|
|
|
|
|
|
|
|
rules_shell_dependencies()
|
|
|
|
|
|
|
|
|
|
rules_shell_toolchains()
|
|
|
|
|
|
2024-06-28 16:54:53 -07:00
|
|
|
# Initialize hermetic Python
|
2025-03-04 15:34:22 -08:00
|
|
|
load("@local_xla//third_party/py:python_init_rules.bzl", "python_init_rules")
|
2023-07-06 12:19:25 -07:00
|
|
|
|
2024-06-28 16:54:53 -07:00
|
|
|
python_init_rules()
|
2023-07-06 12:19:25 -07:00
|
|
|
|
2025-03-04 15:34:22 -08:00
|
|
|
load("@local_xla//third_party/py:python_init_repositories.bzl", "python_init_repositories")
|
2023-07-06 12:19:25 -07:00
|
|
|
|
2024-06-28 16:54:53 -07:00
|
|
|
python_init_repositories(
|
|
|
|
|
default_python_version = "system",
|
2024-09-20 10:34:10 -07:00
|
|
|
local_wheel_dist_folder = "dist",
|
|
|
|
|
local_wheel_inclusion_list = [
|
|
|
|
|
"tensorflow*",
|
2024-10-11 09:19:29 -07:00
|
|
|
"tf_nightly*",
|
2024-09-20 10:34:10 -07:00
|
|
|
],
|
|
|
|
|
local_wheel_workspaces = ["//:WORKSPACE"],
|
2024-06-28 16:54:53 -07:00
|
|
|
requirements = {
|
|
|
|
|
"3.9": "//:requirements_lock_3_9.txt",
|
|
|
|
|
"3.10": "//:requirements_lock_3_10.txt",
|
|
|
|
|
"3.11": "//:requirements_lock_3_11.txt",
|
|
|
|
|
"3.12": "//:requirements_lock_3_12.txt",
|
2025-04-08 14:06:24 -07:00
|
|
|
"3.13": "//:requirements_lock_3_13.txt",
|
2024-06-28 16:54:53 -07:00
|
|
|
},
|
2023-07-06 12:19:25 -07:00
|
|
|
)
|
|
|
|
|
|
2025-03-04 15:34:22 -08:00
|
|
|
load("@local_xla//third_party/py:python_init_toolchains.bzl", "python_init_toolchains")
|
2023-07-06 12:19:25 -07:00
|
|
|
|
2024-06-28 16:54:53 -07:00
|
|
|
python_init_toolchains()
|
2023-07-06 12:19:25 -07:00
|
|
|
|
2025-03-04 15:34:22 -08:00
|
|
|
load("@local_xla//third_party/py:python_init_pip.bzl", "python_init_pip")
|
2023-07-06 12:19:25 -07:00
|
|
|
|
2024-06-28 16:54:53 -07:00
|
|
|
python_init_pip()
|
2023-07-06 12:19:25 -07:00
|
|
|
|
2024-06-28 16:54:53 -07:00
|
|
|
load("@pypi//:requirements.bzl", "install_deps")
|
2021-01-18 11:01:52 -08:00
|
|
|
|
2024-06-28 16:54:53 -07:00
|
|
|
install_deps()
|
|
|
|
|
# End hermetic Python initialization
|
2021-01-18 11:01:52 -08:00
|
|
|
|
2021-02-01 11:59:46 -08:00
|
|
|
load("@//tensorflow:workspace2.bzl", "tf_workspace2")
|
2021-01-18 11:01:52 -08:00
|
|
|
|
2021-02-01 11:59:46 -08:00
|
|
|
tf_workspace2()
|
2018-11-30 11:11:23 -08:00
|
|
|
|
2021-02-01 11:59:46 -08:00
|
|
|
load("@//tensorflow:workspace1.bzl", "tf_workspace1")
|
2017-02-03 17:13:49 -08:00
|
|
|
|
2021-02-01 11:59:46 -08:00
|
|
|
tf_workspace1()
|
2019-06-19 04:14:12 -07:00
|
|
|
|
2021-02-01 11:59:46 -08:00
|
|
|
load("@//tensorflow:workspace0.bzl", "tf_workspace0")
|
2020-12-01 21:04:15 -08:00
|
|
|
|
2021-02-01 11:59:46 -08:00
|
|
|
tf_workspace0()
|
Introduce hermetic CUDA in Google ML projects.
1) Hermetic CUDA rules allow building wheels with GPU support on a machine without GPUs, as well as running Bazel GPU tests on a machine with only GPUs and NVIDIA driver installed. When `--config=cuda` is provided in Bazel options, Bazel will download CUDA, CUDNN and NCCL redistributions in the cache, and use them during build and test phases.
[Default location of CUNN redistributions](https://developer.download.nvidia.com/compute/cudnn/redist/)
[Default location of CUDA redistributions](https://developer.download.nvidia.com/compute/cuda/redist/)
[Default location of NCCL redistributions](https://pypi.org/project/nvidia-nccl-cu12/#history)
2) To include hermetic CUDA rules in your project, add the following in the WORKSPACE of the downstream project dependent on XLA.
Note: use `@local_tsl` instead of `@tsl` in Tensorflow project.
```
load(
"@tsl//third_party/gpus/cuda/hermetic:cuda_json_init_repository.bzl",
"cuda_json_init_repository",
)
cuda_json_init_repository()
load(
"@cuda_redist_json//:distributions.bzl",
"CUDA_REDISTRIBUTIONS",
"CUDNN_REDISTRIBUTIONS",
)
load(
"@tsl//third_party/gpus/cuda/hermetic:cuda_redist_init_repositories.bzl",
"cuda_redist_init_repositories",
"cudnn_redist_init_repository",
)
cuda_redist_init_repositories(
cuda_redistributions = CUDA_REDISTRIBUTIONS,
)
cudnn_redist_init_repository(
cudnn_redistributions = CUDNN_REDISTRIBUTIONS,
)
load(
"@tsl//third_party/gpus/cuda/hermetic:cuda_configure.bzl",
"cuda_configure",
)
cuda_configure(name = "local_config_cuda")
load(
"@tsl//third_party/nccl/hermetic:nccl_redist_init_repository.bzl",
"nccl_redist_init_repository",
)
nccl_redist_init_repository()
load(
"@tsl//third_party/nccl/hermetic:nccl_configure.bzl",
"nccl_configure",
)
nccl_configure(name = "local_config_nccl")
```
PiperOrigin-RevId: 662981325
2024-08-14 10:57:53 -07:00
|
|
|
|
2025-03-04 13:25:05 -08:00
|
|
|
load(
|
2025-03-04 15:34:22 -08:00
|
|
|
"@local_xla//third_party/py:python_wheel.bzl",
|
2025-09-03 16:30:48 -07:00
|
|
|
"nvidia_wheel_versions_repository",
|
2025-03-04 13:25:05 -08:00
|
|
|
"python_wheel_version_suffix_repository",
|
|
|
|
|
)
|
|
|
|
|
|
2025-09-03 16:30:48 -07:00
|
|
|
nvidia_wheel_versions_repository(
|
|
|
|
|
name = "nvidia_wheel_versions",
|
|
|
|
|
versions_source = "//ci/official/requirements_updater:nvidia-requirements.txt",
|
|
|
|
|
)
|
|
|
|
|
|
2025-03-04 13:25:05 -08:00
|
|
|
python_wheel_version_suffix_repository(name = "tf_wheel_version_suffix")
|
|
|
|
|
|
Introduce hermetic CUDA in Google ML projects.
1) Hermetic CUDA rules allow building wheels with GPU support on a machine without GPUs, as well as running Bazel GPU tests on a machine with only GPUs and NVIDIA driver installed. When `--config=cuda` is provided in Bazel options, Bazel will download CUDA, CUDNN and NCCL redistributions in the cache, and use them during build and test phases.
[Default location of CUNN redistributions](https://developer.download.nvidia.com/compute/cudnn/redist/)
[Default location of CUDA redistributions](https://developer.download.nvidia.com/compute/cuda/redist/)
[Default location of NCCL redistributions](https://pypi.org/project/nvidia-nccl-cu12/#history)
2) To include hermetic CUDA rules in your project, add the following in the WORKSPACE of the downstream project dependent on XLA.
Note: use `@local_tsl` instead of `@tsl` in Tensorflow project.
```
load(
"@tsl//third_party/gpus/cuda/hermetic:cuda_json_init_repository.bzl",
"cuda_json_init_repository",
)
cuda_json_init_repository()
load(
"@cuda_redist_json//:distributions.bzl",
"CUDA_REDISTRIBUTIONS",
"CUDNN_REDISTRIBUTIONS",
)
load(
"@tsl//third_party/gpus/cuda/hermetic:cuda_redist_init_repositories.bzl",
"cuda_redist_init_repositories",
"cudnn_redist_init_repository",
)
cuda_redist_init_repositories(
cuda_redistributions = CUDA_REDISTRIBUTIONS,
)
cudnn_redist_init_repository(
cudnn_redistributions = CUDNN_REDISTRIBUTIONS,
)
load(
"@tsl//third_party/gpus/cuda/hermetic:cuda_configure.bzl",
"cuda_configure",
)
cuda_configure(name = "local_config_cuda")
load(
"@tsl//third_party/nccl/hermetic:nccl_redist_init_repository.bzl",
"nccl_redist_init_repository",
)
nccl_redist_init_repository()
load(
"@tsl//third_party/nccl/hermetic:nccl_configure.bzl",
"nccl_configure",
)
nccl_configure(name = "local_config_nccl")
```
PiperOrigin-RevId: 662981325
2024-08-14 10:57:53 -07:00
|
|
|
load(
|
2025-06-25 11:30:11 -07:00
|
|
|
"@rules_ml_toolchain//third_party/gpus/cuda/hermetic:cuda_json_init_repository.bzl",
|
Introduce hermetic CUDA in Google ML projects.
1) Hermetic CUDA rules allow building wheels with GPU support on a machine without GPUs, as well as running Bazel GPU tests on a machine with only GPUs and NVIDIA driver installed. When `--config=cuda` is provided in Bazel options, Bazel will download CUDA, CUDNN and NCCL redistributions in the cache, and use them during build and test phases.
[Default location of CUNN redistributions](https://developer.download.nvidia.com/compute/cudnn/redist/)
[Default location of CUDA redistributions](https://developer.download.nvidia.com/compute/cuda/redist/)
[Default location of NCCL redistributions](https://pypi.org/project/nvidia-nccl-cu12/#history)
2) To include hermetic CUDA rules in your project, add the following in the WORKSPACE of the downstream project dependent on XLA.
Note: use `@local_tsl` instead of `@tsl` in Tensorflow project.
```
load(
"@tsl//third_party/gpus/cuda/hermetic:cuda_json_init_repository.bzl",
"cuda_json_init_repository",
)
cuda_json_init_repository()
load(
"@cuda_redist_json//:distributions.bzl",
"CUDA_REDISTRIBUTIONS",
"CUDNN_REDISTRIBUTIONS",
)
load(
"@tsl//third_party/gpus/cuda/hermetic:cuda_redist_init_repositories.bzl",
"cuda_redist_init_repositories",
"cudnn_redist_init_repository",
)
cuda_redist_init_repositories(
cuda_redistributions = CUDA_REDISTRIBUTIONS,
)
cudnn_redist_init_repository(
cudnn_redistributions = CUDNN_REDISTRIBUTIONS,
)
load(
"@tsl//third_party/gpus/cuda/hermetic:cuda_configure.bzl",
"cuda_configure",
)
cuda_configure(name = "local_config_cuda")
load(
"@tsl//third_party/nccl/hermetic:nccl_redist_init_repository.bzl",
"nccl_redist_init_repository",
)
nccl_redist_init_repository()
load(
"@tsl//third_party/nccl/hermetic:nccl_configure.bzl",
"nccl_configure",
)
nccl_configure(name = "local_config_nccl")
```
PiperOrigin-RevId: 662981325
2024-08-14 10:57:53 -07:00
|
|
|
"cuda_json_init_repository",
|
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
cuda_json_init_repository()
|
|
|
|
|
|
|
|
|
|
load(
|
|
|
|
|
"@cuda_redist_json//:distributions.bzl",
|
|
|
|
|
"CUDA_REDISTRIBUTIONS",
|
|
|
|
|
"CUDNN_REDISTRIBUTIONS",
|
|
|
|
|
)
|
|
|
|
|
load(
|
2025-06-25 11:30:11 -07:00
|
|
|
"@rules_ml_toolchain//third_party/gpus/cuda/hermetic:cuda_redist_init_repositories.bzl",
|
Introduce hermetic CUDA in Google ML projects.
1) Hermetic CUDA rules allow building wheels with GPU support on a machine without GPUs, as well as running Bazel GPU tests on a machine with only GPUs and NVIDIA driver installed. When `--config=cuda` is provided in Bazel options, Bazel will download CUDA, CUDNN and NCCL redistributions in the cache, and use them during build and test phases.
[Default location of CUNN redistributions](https://developer.download.nvidia.com/compute/cudnn/redist/)
[Default location of CUDA redistributions](https://developer.download.nvidia.com/compute/cuda/redist/)
[Default location of NCCL redistributions](https://pypi.org/project/nvidia-nccl-cu12/#history)
2) To include hermetic CUDA rules in your project, add the following in the WORKSPACE of the downstream project dependent on XLA.
Note: use `@local_tsl` instead of `@tsl` in Tensorflow project.
```
load(
"@tsl//third_party/gpus/cuda/hermetic:cuda_json_init_repository.bzl",
"cuda_json_init_repository",
)
cuda_json_init_repository()
load(
"@cuda_redist_json//:distributions.bzl",
"CUDA_REDISTRIBUTIONS",
"CUDNN_REDISTRIBUTIONS",
)
load(
"@tsl//third_party/gpus/cuda/hermetic:cuda_redist_init_repositories.bzl",
"cuda_redist_init_repositories",
"cudnn_redist_init_repository",
)
cuda_redist_init_repositories(
cuda_redistributions = CUDA_REDISTRIBUTIONS,
)
cudnn_redist_init_repository(
cudnn_redistributions = CUDNN_REDISTRIBUTIONS,
)
load(
"@tsl//third_party/gpus/cuda/hermetic:cuda_configure.bzl",
"cuda_configure",
)
cuda_configure(name = "local_config_cuda")
load(
"@tsl//third_party/nccl/hermetic:nccl_redist_init_repository.bzl",
"nccl_redist_init_repository",
)
nccl_redist_init_repository()
load(
"@tsl//third_party/nccl/hermetic:nccl_configure.bzl",
"nccl_configure",
)
nccl_configure(name = "local_config_nccl")
```
PiperOrigin-RevId: 662981325
2024-08-14 10:57:53 -07:00
|
|
|
"cuda_redist_init_repositories",
|
|
|
|
|
"cudnn_redist_init_repository",
|
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
cuda_redist_init_repositories(
|
|
|
|
|
cuda_redistributions = CUDA_REDISTRIBUTIONS,
|
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
cudnn_redist_init_repository(
|
|
|
|
|
cudnn_redistributions = CUDNN_REDISTRIBUTIONS,
|
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
load(
|
2025-06-25 11:30:11 -07:00
|
|
|
"@rules_ml_toolchain//third_party/gpus/cuda/hermetic:cuda_configure.bzl",
|
Introduce hermetic CUDA in Google ML projects.
1) Hermetic CUDA rules allow building wheels with GPU support on a machine without GPUs, as well as running Bazel GPU tests on a machine with only GPUs and NVIDIA driver installed. When `--config=cuda` is provided in Bazel options, Bazel will download CUDA, CUDNN and NCCL redistributions in the cache, and use them during build and test phases.
[Default location of CUNN redistributions](https://developer.download.nvidia.com/compute/cudnn/redist/)
[Default location of CUDA redistributions](https://developer.download.nvidia.com/compute/cuda/redist/)
[Default location of NCCL redistributions](https://pypi.org/project/nvidia-nccl-cu12/#history)
2) To include hermetic CUDA rules in your project, add the following in the WORKSPACE of the downstream project dependent on XLA.
Note: use `@local_tsl` instead of `@tsl` in Tensorflow project.
```
load(
"@tsl//third_party/gpus/cuda/hermetic:cuda_json_init_repository.bzl",
"cuda_json_init_repository",
)
cuda_json_init_repository()
load(
"@cuda_redist_json//:distributions.bzl",
"CUDA_REDISTRIBUTIONS",
"CUDNN_REDISTRIBUTIONS",
)
load(
"@tsl//third_party/gpus/cuda/hermetic:cuda_redist_init_repositories.bzl",
"cuda_redist_init_repositories",
"cudnn_redist_init_repository",
)
cuda_redist_init_repositories(
cuda_redistributions = CUDA_REDISTRIBUTIONS,
)
cudnn_redist_init_repository(
cudnn_redistributions = CUDNN_REDISTRIBUTIONS,
)
load(
"@tsl//third_party/gpus/cuda/hermetic:cuda_configure.bzl",
"cuda_configure",
)
cuda_configure(name = "local_config_cuda")
load(
"@tsl//third_party/nccl/hermetic:nccl_redist_init_repository.bzl",
"nccl_redist_init_repository",
)
nccl_redist_init_repository()
load(
"@tsl//third_party/nccl/hermetic:nccl_configure.bzl",
"nccl_configure",
)
nccl_configure(name = "local_config_nccl")
```
PiperOrigin-RevId: 662981325
2024-08-14 10:57:53 -07:00
|
|
|
"cuda_configure",
|
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
cuda_configure(name = "local_config_cuda")
|
|
|
|
|
|
|
|
|
|
load(
|
2025-06-25 11:30:11 -07:00
|
|
|
"@rules_ml_toolchain//third_party/nccl/hermetic:nccl_redist_init_repository.bzl",
|
Introduce hermetic CUDA in Google ML projects.
1) Hermetic CUDA rules allow building wheels with GPU support on a machine without GPUs, as well as running Bazel GPU tests on a machine with only GPUs and NVIDIA driver installed. When `--config=cuda` is provided in Bazel options, Bazel will download CUDA, CUDNN and NCCL redistributions in the cache, and use them during build and test phases.
[Default location of CUNN redistributions](https://developer.download.nvidia.com/compute/cudnn/redist/)
[Default location of CUDA redistributions](https://developer.download.nvidia.com/compute/cuda/redist/)
[Default location of NCCL redistributions](https://pypi.org/project/nvidia-nccl-cu12/#history)
2) To include hermetic CUDA rules in your project, add the following in the WORKSPACE of the downstream project dependent on XLA.
Note: use `@local_tsl` instead of `@tsl` in Tensorflow project.
```
load(
"@tsl//third_party/gpus/cuda/hermetic:cuda_json_init_repository.bzl",
"cuda_json_init_repository",
)
cuda_json_init_repository()
load(
"@cuda_redist_json//:distributions.bzl",
"CUDA_REDISTRIBUTIONS",
"CUDNN_REDISTRIBUTIONS",
)
load(
"@tsl//third_party/gpus/cuda/hermetic:cuda_redist_init_repositories.bzl",
"cuda_redist_init_repositories",
"cudnn_redist_init_repository",
)
cuda_redist_init_repositories(
cuda_redistributions = CUDA_REDISTRIBUTIONS,
)
cudnn_redist_init_repository(
cudnn_redistributions = CUDNN_REDISTRIBUTIONS,
)
load(
"@tsl//third_party/gpus/cuda/hermetic:cuda_configure.bzl",
"cuda_configure",
)
cuda_configure(name = "local_config_cuda")
load(
"@tsl//third_party/nccl/hermetic:nccl_redist_init_repository.bzl",
"nccl_redist_init_repository",
)
nccl_redist_init_repository()
load(
"@tsl//third_party/nccl/hermetic:nccl_configure.bzl",
"nccl_configure",
)
nccl_configure(name = "local_config_nccl")
```
PiperOrigin-RevId: 662981325
2024-08-14 10:57:53 -07:00
|
|
|
"nccl_redist_init_repository",
|
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
nccl_redist_init_repository()
|
|
|
|
|
|
|
|
|
|
load(
|
2025-06-25 11:30:11 -07:00
|
|
|
"@rules_ml_toolchain//third_party/nccl/hermetic:nccl_configure.bzl",
|
Introduce hermetic CUDA in Google ML projects.
1) Hermetic CUDA rules allow building wheels with GPU support on a machine without GPUs, as well as running Bazel GPU tests on a machine with only GPUs and NVIDIA driver installed. When `--config=cuda` is provided in Bazel options, Bazel will download CUDA, CUDNN and NCCL redistributions in the cache, and use them during build and test phases.
[Default location of CUNN redistributions](https://developer.download.nvidia.com/compute/cudnn/redist/)
[Default location of CUDA redistributions](https://developer.download.nvidia.com/compute/cuda/redist/)
[Default location of NCCL redistributions](https://pypi.org/project/nvidia-nccl-cu12/#history)
2) To include hermetic CUDA rules in your project, add the following in the WORKSPACE of the downstream project dependent on XLA.
Note: use `@local_tsl` instead of `@tsl` in Tensorflow project.
```
load(
"@tsl//third_party/gpus/cuda/hermetic:cuda_json_init_repository.bzl",
"cuda_json_init_repository",
)
cuda_json_init_repository()
load(
"@cuda_redist_json//:distributions.bzl",
"CUDA_REDISTRIBUTIONS",
"CUDNN_REDISTRIBUTIONS",
)
load(
"@tsl//third_party/gpus/cuda/hermetic:cuda_redist_init_repositories.bzl",
"cuda_redist_init_repositories",
"cudnn_redist_init_repository",
)
cuda_redist_init_repositories(
cuda_redistributions = CUDA_REDISTRIBUTIONS,
)
cudnn_redist_init_repository(
cudnn_redistributions = CUDNN_REDISTRIBUTIONS,
)
load(
"@tsl//third_party/gpus/cuda/hermetic:cuda_configure.bzl",
"cuda_configure",
)
cuda_configure(name = "local_config_cuda")
load(
"@tsl//third_party/nccl/hermetic:nccl_redist_init_repository.bzl",
"nccl_redist_init_repository",
)
nccl_redist_init_repository()
load(
"@tsl//third_party/nccl/hermetic:nccl_configure.bzl",
"nccl_configure",
)
nccl_configure(name = "local_config_nccl")
```
PiperOrigin-RevId: 662981325
2024-08-14 10:57:53 -07:00
|
|
|
"nccl_configure",
|
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
nccl_configure(name = "local_config_nccl")
|
2025-06-09 16:41:27 -07:00
|
|
|
|
|
|
|
|
load(
|
2025-06-25 11:30:11 -07:00
|
|
|
"@rules_ml_toolchain//third_party/nvshmem/hermetic:nvshmem_json_init_repository.bzl",
|
2025-06-09 16:41:27 -07:00
|
|
|
"nvshmem_json_init_repository",
|
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
nvshmem_json_init_repository()
|
|
|
|
|
|
|
|
|
|
load(
|
|
|
|
|
"@nvshmem_redist_json//:distributions.bzl",
|
|
|
|
|
"NVSHMEM_REDISTRIBUTIONS",
|
|
|
|
|
)
|
|
|
|
|
load(
|
2025-06-25 11:30:11 -07:00
|
|
|
"@rules_ml_toolchain//third_party/nvshmem/hermetic:nvshmem_redist_init_repository.bzl",
|
2025-06-09 16:41:27 -07:00
|
|
|
"nvshmem_redist_init_repository",
|
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
nvshmem_redist_init_repository(
|
|
|
|
|
nvshmem_redistributions = NVSHMEM_REDISTRIBUTIONS,
|
|
|
|
|
)
|