From a6206958d28a064564ef132091b9c617ae005f49 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Johannes=20G=C3=A4=C3=9Fler?= Date: Wed, 15 Apr 2026 16:01:46 +0200 Subject: [PATCH] CUDA: require explicit opt-in for P2P access (#21910) --- docs/build.md | 6 ++++++ ggml/src/ggml-cuda/ggml-cuda.cu | 22 ++++++++++++---------- 2 files changed, 18 insertions(+), 10 deletions(-) diff --git a/docs/build.md b/docs/build.md index 38a4d512de..a18479b334 100644 --- a/docs/build.md +++ b/docs/build.md @@ -281,6 +281,12 @@ Use `GGML_CUDA_FORCE_CUBLAS_COMPUTE_16F` environment variable to force use FP16 The environment variable `GGML_CUDA_ENABLE_UNIFIED_MEMORY=1` can be used to enable unified memory in Linux. This allows swapping to system RAM instead of crashing when the GPU VRAM is exhausted. In Windows this setting is available in the NVIDIA control panel as `System Memory Fallback`. +### Peer Access + +The environment variable `GGML_CUDA_P2P` can be set to enable peer-to-peer access between multiple GPUs, allowing them to transfer data directly rather than to go through system memory. +Requires driver support (usually restricted to workstation/datacenter GPUs). +May cause crashes or corrupted outputs for some motherboards and BIOS settings (e.g. IOMMU). + ### Performance Tuning The following compilation options are also available to tweak performance: diff --git a/ggml/src/ggml-cuda/ggml-cuda.cu b/ggml/src/ggml-cuda/ggml-cuda.cu index 5d81befec3..c17db3875a 100644 --- a/ggml/src/ggml-cuda/ggml-cuda.cu +++ b/ggml/src/ggml-cuda/ggml-cuda.cu @@ -324,16 +324,18 @@ static ggml_cuda_device_info ggml_cuda_init() { // configure logging to stdout // CUBLAS_CHECK(cublasLoggerConfigure(1, 1, 0, nullptr)); - for (int id = 0; id < info.device_count; ++id) { - ggml_cuda_set_device(id); - for (int id_other = 0; id_other < info.device_count; ++id_other) { - if (id == id_other) { - continue; - } - int can_access_peer; - CUDA_CHECK(cudaDeviceCanAccessPeer(&can_access_peer, id, id_other)); - if (can_access_peer) { - CUDA_CHECK(cudaDeviceEnablePeerAccess(id_other, 0)); + if (getenv("GGML_CUDA_P2P") != nullptr) { + for (int id = 0; id < info.device_count; ++id) { + ggml_cuda_set_device(id); + for (int id_other = 0; id_other < info.device_count; ++id_other) { + if (id == id_other) { + continue; + } + int can_access_peer; + CUDA_CHECK(cudaDeviceCanAccessPeer(&can_access_peer, id, id_other)); + if (can_access_peer) { + CUDA_CHECK(cudaDeviceEnablePeerAccess(id_other, 0)); + } } } }