torch.nn.utils.rnn 的源代码
```html
import warnings from typing import Iterable, List, NamedTuple, Tuple, Union import torch from torch import Tensor from ... import _VF from ..._jit_internal import Optional __all__ = ['PackedSequence', 'invert_permutation', 'pack_padded_sequence', 'pad_packed_sequence', 'pad_sequence', 'unpad_sequence', 'pack_sequence', 'unpack_sequence'] class PackedSequence_(NamedTuple): data: torch.Tensor batch_sizes: torch.Tensor sorted_indices: Optional[torch.Tensor] unsorted_indices: Optional[torch.Tensor] def bind(optional, fn): if optional is None: return None return fn(optional)[docs]class PackedSequence(PackedSequence_): r"""Holds the data and list of :attr:`batch_sizes` of a packed sequence. All RNN modules accept packed sequences as inputs. Note: Instances of this class should never be created manually. They are meant to be instantiated by functions like :func:`pack_padded_sequence`. Batch sizes represent the number elements at each sequence step in the batch, not the varying sequence lengths passed to :func:`pack_padded_sequence`. For instance, given data ``abc`` and ``x`` the :class:`PackedSequence` would contain data ``axbc`` with ``batch_sizes=[2,1,1]``. Attributes: data (Tensor): Tensor containing packed sequence batch_sizes (Tensor): Tensor of integers holding information about the batch size at each sequence step sorted_indices (Tensor, optional): Tensor of integers holding how this :class:`PackedSequence` is constructed from sequences. unsorted_indices (Tensor, optional): Tensor of integers holding how this to recover the original sequences with correct order. .. note:: :attr:`data` can be on arbitrary device and of arbitrary dtype. :attr:`sorted_indices` and :attr:`unsorted_indices` must be ``torch.int64`` tensors on the same device as :attr:`data`. However, :attr:`batch_sizes` should always be a CPU ``torch.int64`` tensor. This invariant is maintained throughout :class:`PackedSequence` class, and all functions that construct a :class:`PackedSequence` in PyTorch (i.e., they only pass in tensors conforming to this constraint). """ def __new__(cls, data, batch_sizes=None, sorted_indices=None, unsorted_indices=None): return super().__new__( cls, *_packed_sequence_init_args(data, batch_sizes, sorted_indices, unsorted_indices)) # NOTE [ device and dtype of a PackedSequence ] # # See the note above in doc string (starting with ":attr:`data` can be on # arbitrary device..."). def pin_memory(self): # Why not convert `batch_sizes`? # See NOTE [ device and dtype of a PackedSequence ] return type(self)(self.data.pin_memory(), self.batch_sizes, bind(self.sorted_indices, lambda t: t.pin_memory()), bind(self.unsorted_indices, lambda t: t.pin_memory())) def cuda(self, *args, **kwargs): # Tests to see if 'cuda' should be added to kwargs ex = torch.tensor((), dtype=self.data.dtype, device=self.data.device).to(*args, **kwargs) if ex.is_cuda: return self.to(*args, **kwargs) return self.to(*args, device='cuda', **kwargs) def cpu(self, *args, **kwargs): ex = torch.tensor((), dtype=self.data.dtype, device=self.data.device).to(*args, **kwargs) if ex.device.type == 'cpu': return self.to(*args, **kwargs) return self.to(*args, device='cpu', **kwargs) def double(self): return self.to(dtype=torch.double) def <span class