Shortcuts

torch.cuda 的源代码

r"""
此包添加了对CUDA张量类型的支持。

它实现了与CPU张量相同的功能,但它们利用
GPU进行计算。

它是延迟初始化的,因此您可以始终导入它,并使用
:func:`is_available()`来确定您的系统是否支持CUDA。

:ref:`cuda-semantics`提供了更多关于使用CUDA的详细信息。
"""


import contextlib
import importlib
import os
import sys
import threading
import traceback
import warnings
from functools import lru_cache
from typing import Any, Callable, cast, List, Optional, Tuple, Union

import torch
import torch._C
from torch.types import Device
from .. import device as _device
from .._utils import _dummy_type, _LazySeedTracker, classproperty
from ._utils import _get_device_index
from .graphs import (
    CUDAGraph,
    graph,
    graph_pool_handle,
    is_current_stream_capturing,
    make_graphed_callables,
)
from .streams import Event, ExternalStream, Stream

try:
    from torch._C import _cudart  # type: ignore[attr-defined]
except ImportError:
    _cudart = None

_initialized = False
_tls = threading.local()
_initialization_lock = threading.Lock()
_queued_calls: List[
    Tuple[Callable[[], None], List[str]]
] = []  # 在初始化之前不要调用这些
_is_in_bad_fork = getattr(torch._C, "_cuda_isInBadFork", lambda: False)
_device_t = Union[_device, str, int, None]

_HAS_PYNVML = False
_PYNVML_ERR = None
try:
    import pynvml  # type: ignore[import]

    _HAS_PYNVML = True
except ImportError as err:
    _PYNVML_ERR = err  # 有时安装了库,但导入失败,因此我们记录错误以便以后使用

_lazy_seed_tracker = _LazySeedTracker()

# 如果PyTorch在没有CUDA的情况下编译,定义虚拟的_CudaDeviceProperties类型
if hasattr(torch._C, "_CudaDeviceProperties"):
    _CudaDeviceProperties = torch._C._CudaDeviceProperties
else:
    _CudaDeviceProperties = _dummy_type("_CudaDeviceProperties")  # type: ignore[assignment, misc]

if hasattr(torch._C, "_cuda_exchangeDevice"):
    _exchange_device = torch._C._cuda_exchangeDevice
else:

    def _exchange_device(device: int) -> int:
        if device < 0:
            return -1
        raise RuntimeError("PyTorch是在没有CUDA支持的情况下编译的")


if hasattr(torch._C, "_cuda_maybeExchangeDevice"):
    _maybe_exchange_device = torch._C._cuda_maybeExchangeDevice
else:

    def _maybe_exchange_device(device: int) -> int:
        if device < 0:
            return -1
        raise RuntimeError("PyTorch是在没有CUDA支持的情况下编译的")


has_half: bool = True
has_magma: bool = torch._C._has_magma

default_generators: Tuple[torch._C.Generator] = ()  # type: ignore[assignment]


def _is_compiled() -> bool:
    r"""如果编译时带有CUDA支持,返回true。"""
    return hasattr(torch._C, "_cuda_getDeviceCount")


def _nvml_based_avail() -> bool:
    return os.getenv("PYTORCH_NVML_BASED_CUDA_CHECK") == "1"


[docs]def is_available() -> bool: r"""返回一个布尔值,指示CUDA当前是否可用。""" if not _is_compiled(): return False if _nvml_based_avail(): # 用户设置了一个环境变量,要求此可用性检查尝试避免fork中毒 # 使用NVML的代价是较弱的CUDA可用性