IRs¶
PyTorch 2.0 提供了两组 IR 供后端接口使用:Core Aten IR 和 Prims IR。
核心 Aten IR¶
Core aten ops 是 aten 操作符的核心子集,可用于组合其他操作符。 Core aten IR 是完全功能的,并且在此操作集中没有 inplace 或 _out 变体。 与 Prims IR 相比,core aten ops 重用了“native_functions.yaml”中的现有 aten ops, 并且它不会进一步将操作分解为显式的类型提升和广播操作。 此操作集旨在作为与后端接口的功能 IR。
警告
此操作集仍在积极开发中,未来将添加更多操作。
操作符 |
模式 |
|---|---|
|
_adaptive_avg_pool2d(Tensor self, SymInt[2] output_size) -> Tensor |
|
_adaptive_avg_pool2d_backward(Tensor grad_output, Tensor self) -> Tensor |
|
_adaptive_avg_pool3d(Tensor self, SymInt[3] output_size) -> Tensor |
|
_cdist_forward(Tensor x1, Tensor x2, float p, int? compute_mode) -> Tensor |
|
_embedding_bag(Tensor weight, Tensor indices, Tensor offsets, bool scale_grad_by_freq=False, int mode=0, bool sparse=False, Tensor? per_sample_weights=None, bool include_last_offset=False, int padding_idx=-1) -> (Tensor, Tensor, Tensor, Tensor) |
|
_local_scalar_dense(Tensor self) -> Scalar |
|
_log_softmax(Tensor self, int dim, bool half_to_float) -> Tensor |
|
_native_batch_norm_legit(Tensor input, Tensor? weight, Tensor? bias, Tensor(a!) running_mean, Tensor(b!) running_var, bool training, float momentum, float eps) -> (Tensor, Tensor, Tensor) |
|
_native_batch_norm_legit.no_stats(Tensor input, Tensor? weight, Tensor? bias, bool training, float momentum, float eps) -> (Tensor, Tensor, Tensor) |
|
_native_batch_norm_legit_no_training(Tensor input, Tensor? weight, Tensor? bias, Tensor running_mean, Tensor running_var, float momentum, float eps) -> (Tensor, Tensor, Tensor) |
|
_pdist_forward(Tensor self, float p=2) -> Tensor |
|
_softmax(Tensor self, int dim, bool half_to_float) -> Tensor |
|
_to_copy(Tensor self, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, bool non_blocking=False, MemoryFormat? memory_format=None) -> Tensor |
|
abs(Tensor self) -> Tensor |
|
acos(Tensor self) -> Tensor |
|
acosh(Tensor self) -> Tensor |
|
adaptive_avg_pool1d(Tensor self, int[1] output_size) -> Tensor |
|
add.Scalar(Tensor self, Scalar other, Scalar alpha=1) -> Tensor |
|
add.Tensor(Tensor self, Tensor other, *, Scalar alpha=1) -> Tensor |
|
addmm(Tensor self, Tensor mat1, Tensor mat2, *, Scalar beta=1, Scalar alpha=1) -> Tensor |
|
别名(Tensor(a) self) -> Tensor(a) |
|
amax(Tensor self, int[1] dim=[], bool keepdim=False) -> Tensor |
|
amin(Tensor self, int[1] dim=[], bool keepdim=False) -> Tensor |
|
any(Tensor self) -> Tensor |
|
any.dim(Tensor self, int dim, bool keepdim=False) -> Tensor |
|
any.dims(Tensor self, int[]? dim=None, bool keepdim=False) -> Tensor |
|
arange.start_step(标量 start, 标量 end, 标量 step=1, *, 标量类型? dtype=None, 布局? layout=None, 设备? device=None, 布尔值? pin_memory=None) -> 张量 |
|
argmax(Tensor self, int? dim=None, bool keepdim=False) -> Tensor |
|
argmin(Tensor self, int? dim=None, bool keepdim=False) -> Tensor |
|
as_strided(Tensor(a) self, SymInt[] size, SymInt[] stride, SymInt? storage_offset=None) -> Tensor(a) |
|
asin(Tensor self) -> Tensor |
|
asinh(Tensor self) -> Tensor |
|
atan(Tensor self) -> Tensor |
|
atan2(Tensor self, Tensor other) -> Tensor |
|
atan2.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) |
|
atanh(Tensor self) -> Tensor |
|
avg_pool1d(Tensor self, int[1] kernel_size, int[1] stride=[], int[1] padding=0, bool ceil_mode=False, bool count_include_pad=True) -> Tensor |
|
avg_pool2d(Tensor self, int[2] kernel_size, int[2] stride=[], int[2] padding=0, bool ceil_mode=False, bool count_include_pad=True, int? divisor_override=None) -> Tensor |
|
avg_pool2d_backward(Tensor grad_output, Tensor self, int[2] kernel_size, int[2] stride, int[2] padding, bool ceil_mode, bool count_include_pad, int? divisor_override) -> Tensor |
|
avg_pool3d(Tensor self, int[3] kernel_size, int[3] stride=[], int[3] padding=0, bool ceil_mode=False, bool count_include_pad=True, int? divisor_override=None) -> Tensor |
|
bitwise_and.Scalar(Tensor self, Scalar other) -> Tensor |
|
bitwise_and.Tensor(Tensor self, Tensor other) -> Tensor |
|
bitwise_not(Tensor self) -> Tensor |
|
bitwise_or.Scalar(Tensor self, Scalar other) -> Tensor |
|
bitwise_or.Tensor(Tensor self, Tensor other) -> Tensor |
|
bitwise_xor.Scalar(Tensor self, Scalar other) -> Tensor |
|
bitwise_xor.Tensor(Tensor self, Tensor other) -> Tensor |
|
bmm(Tensor self, Tensor mat2) -> Tensor |
|
cat(Tensor[] tensors, int dim=0) -> Tensor |
|
ceil(Tensor self) -> Tensor |
|
clamp(Tensor self, Scalar? min=None, Scalar? max=None) -> Tensor |
|
clamp.Tensor(Tensor self, Tensor? min=None, Tensor? max=None) -> Tensor |
|
克隆(Tensor self, *, MemoryFormat? memory_format=None) -> Tensor |
|
col2im(Tensor self, SymInt[2] output_size, int[2] kernel_size, int[2] dilation, int[2] padding, int[2] stride) -> Tensor |
|
constant_pad_nd(Tensor self, SymInt[] pad, Scalar value=0) -> Tensor |
|
卷积(Tensor input, Tensor weight, Tensor? bias, SymInt[] stride, SymInt[] padding, SymInt[] dilation, bool transposed, SymInt[] output_padding, SymInt groups) -> Tensor |
|
convolution_backward(Tensor grad_output, Tensor input, Tensor weight, SymInt[]? bias_sizes, SymInt[] stride, SymInt[] padding, SymInt[] dilation, bool transposed, SymInt[] output_padding, SymInt groups, bool[3] output_mask) -> (Tensor, Tensor, Tensor) |
|
复制(Tensor self, Tensor src, bool non_blocking=False) -> Tensor |
|
cos(Tensor self) -> Tensor |
|
cosh(Tensor self) -> Tensor |
|
cumsum(Tensor self, int dim, *, ScalarType? dtype=None) -> Tensor |
|
diagonal(Tensor(a) self, int offset=0, int dim1=0, int dim2=1) -> Tensor(a) |
|
div.Scalar(Tensor self, Scalar other) -> Tensor |
|
div.Scalar_mode(Tensor self, Scalar other, *, str? rounding_mode) -> Tensor |
|
div.Tensor(Tensor self, Tensor other) -> Tensor |
|
div.Tensor_mode(Tensor self, Tensor other, *, str? rounding_mode) -> Tensor |
|
embedding(Tensor weight, Tensor indices, SymInt padding_idx=-1, bool scale_grad_by_freq=False, bool sparse=False) -> Tensor |
|
embedding_dense_backward(Tensor grad_output, Tensor indices, SymInt num_weights, SymInt padding_idx, bool scale_grad_by_freq) -> Tensor |
|
empty.memory_format(SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> 张量 |
|
empty_strided(SymInt[] size, SymInt[] stride, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor |
|
eq.Scalar(Tensor self, Scalar other) -> Tensor |
|
eq.Tensor(Tensor self, Tensor other) -> Tensor |
|
erf(Tensor self) -> Tensor |
|
exp(Tensor self) -> Tensor |
|
expand(Tensor(a) self, SymInt[] size, *, bool implicit=False) -> Tensor(a) |
|
expm1(Tensor self) -> Tensor |
|
fill.Scalar(Tensor self, Scalar value) -> Tensor |
|
flip(Tensor self, int[] dims) -> Tensor |
|
floor(Tensor self) -> Tensor |
|
fmod.Scalar(Tensor self, Scalar other) -> Tensor |
|
fmod.Tensor(Tensor self, Tensor other) -> Tensor |
|
full(SymInt[] size, Scalar fill_value, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> 张量 |
|
gather(Tensor self, int dim, Tensor index, *, bool sparse_grad=False) -> Tensor |
|
ge.Scalar(Tensor self, Scalar other) -> Tensor |
|
ge.Tensor(Tensor self, Tensor other) -> Tensor |
|
gelu(Tensor self, *, str approximate=’none’) -> Tensor |
|
grid_sampler_2d(Tensor input, Tensor grid, int interpolation_mode, int padding_mode, bool align_corners) -> Tensor |
|
gt.Scalar(Tensor self, Scalar other) -> Tensor |
|
gt.Tensor(Tensor self, Tensor other) -> Tensor |
|
hardtanh(Tensor self, Scalar min_val=-1, Scalar max_val=1) -> Tensor |
|
index.Tensor(Tensor self, Tensor?[] indices) -> Tensor |
|
index_put(Tensor self, Tensor?[] indices, Tensor values, bool accumulate=False) -> Tensor |
|
index_select(Tensor self, int dim, Tensor index) -> Tensor |
|
isinf(Tensor self) -> Tensor |
|
isnan(Tensor self) -> Tensor |
|
le.Scalar(Tensor self, Scalar other) -> Tensor |
|
le.Tensor(Tensor self, Tensor other) -> Tensor |
|
leaky_relu(Tensor self, Scalar negative_slope=0.01) -> Tensor |
|
log(Tensor self) -> Tensor |
|
log10(Tensor self) -> Tensor |
|
log1p(Tensor self) -> Tensor |
|
log2(Tensor self) -> Tensor |
|
logical_and(Tensor self, Tensor other) -> Tensor |
|
logical_not(Tensor self) -> Tensor |
|
logical_or(Tensor self, Tensor other) -> Tensor |
|
logical_xor(Tensor self, Tensor other) -> Tensor |
|
lt.Scalar(Tensor self, Scalar other) -> Tensor |
|
lt.Tensor(Tensor self, Tensor other) -> Tensor |
|
max.dim(Tensor self, int dim, bool keepdim=False) -> (Tensor values, Tensor indices) |
|
max_pool2d_with_indices(Tensor self, int[2] kernel_size, int[2] stride=[], int[2] padding=0, int[2] dilation=1, bool ceil_mode=False) -> (Tensor, Tensor) |
|
max_pool2d_with_indices_backward(Tensor grad_output, Tensor self, int[2] kernel_size, int[2] stride, int[2] padding, int[2] dilation, bool ceil_mode, Tensor indices) -> Tensor |
|
max_pool3d_with_indices(Tensor self, int[3] kernel_size, int[3] stride=[], int[3] padding=0, int[3] dilation=1, bool ceil_mode=False) -> (Tensor, Tensor) |
|
maximum(Tensor self, Tensor other) -> Tensor |
|
mean(Tensor self, *, ScalarType? dtype=None) -> Tensor |
|
mean.dim(Tensor self, int[1]? dim, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor |
|
min.dim(Tensor self, int dim, bool keepdim=False) -> (Tensor values, Tensor indices) |
|
最小值(Tensor self, Tensor other) -> Tensor |
|
mm(Tensor self, Tensor mat2) -> Tensor |
|
mul.Scalar(Tensor self, Scalar other) -> Tensor |
|
mul.Tensor(Tensor self, Tensor other) -> Tensor |
|
native_dropout(Tensor input, float p, bool? train) -> (Tensor, Tensor) |
|
native_group_norm(Tensor input, Tensor? weight, Tensor? bias, SymInt N, SymInt C, SymInt HxW, int group, float eps) -> (Tensor, Tensor, Tensor) |
|
native_group_norm_backward(Tensor grad_out, Tensor input, Tensor mean, Tensor rstd, Tensor? weight, SymInt N, SymInt C, SymInt HxW, int group, bool[3] output_mask) -> (Tensor, Tensor, Tensor) |
|
native_layer_norm(Tensor input, SymInt[] normalized_shape, Tensor? weight, Tensor? bias, float eps) -> (Tensor, Tensor, Tensor) |
|
native_layer_norm_backward(Tensor grad_out, Tensor input, SymInt[] normalized_shape, Tensor mean, Tensor rstd, Tensor? weight, Tensor? bias, bool[3] output_mask) -> (Tensor, Tensor, Tensor) |
|
ne.Scalar(Tensor self, Scalar other) -> Tensor |
|
ne.Tensor(Tensor self, Tensor other) -> Tensor |
|
neg(Tensor self) -> Tensor |
|
nonzero(Tensor self) -> Tensor |
|
permute(Tensor(a) self, int[] dims) -> Tensor(a) |
|
pow.Scalar(Scalar self, Tensor exponent) -> Tensor |
|
pow.Tensor_Scalar(Tensor self, Scalar exponent) -> Tensor |
|
pow.Tensor_Tensor(Tensor self, Tensor exponent) -> Tensor |
|
prod(Tensor self, *, ScalarType? dtype=None) -> Tensor |
|
prod.dim_int(Tensor self, int dim, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor |
|
rand(SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> 张量 |
|
randn(SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> 张量 |
|
randperm(SymInt n, *, ScalarType? dtype=long, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> 张量 |
|
reciprocal(Tensor self) -> Tensor |
|
reflection_pad1d(Tensor self, SymInt[2] padding) -> Tensor |
|
reflection_pad2d(Tensor self, SymInt[4] padding) -> Tensor |
|
reflection_pad3d(Tensor self, SymInt[6] padding) -> Tensor |
|
relu(Tensor self) -> Tensor |
|
remainder.Scalar(Tensor self, Scalar other) -> Tensor |
|
remainder.Tensor(Tensor self, Tensor other) -> Tensor |
|
repeat(Tensor self, SymInt[] repeats) -> Tensor |
|
replication_pad2d(Tensor self, SymInt[4] padding) -> Tensor |
|
replication_pad3d(Tensor self, SymInt[6] padding) -> Tensor |
|
resize_(Tensor(a!) self, SymInt[] size, *, MemoryFormat? memory_format=None) -> Tensor(a!) |
|
round(Tensor self) -> Tensor |
|
rsqrt(Tensor self) -> Tensor |
|
scalar_tensor(标量 s, *, 标量类型? dtype=None, 布局? layout=None, 设备? device=None, 布尔值? pin_memory=None) -> 张量 |
|
scatter.src(Tensor self, int dim, Tensor index, Tensor src) -> Tensor |
|
scatter.value(Tensor self, int dim, Tensor index, Scalar value) -> Tensor |
|
scatter_add(Tensor self, int dim, Tensor index, Tensor src) -> Tensor |
|
scatter_reduce.two(Tensor self, int dim, Tensor index, Tensor src, str reduce, *, bool include_self=True) -> Tensor |
|
select.int(Tensor(a) self, int dim, SymInt index) -> Tensor(a) |
|
select_scatter(Tensor self, Tensor src, int dim, SymInt index) -> Tensor |
|
sigmoid(Tensor self) -> Tensor |
|
符号(张量 self) -> 张量 |
|
sin(Tensor self) -> Tensor |
|
sinh(Tensor self) -> Tensor |
|
slice.Tensor(Tensor(a) self, int dim=0, SymInt? start=None, SymInt? end=None, SymInt step=1) -> Tensor(a) |
|
slice_scatter(Tensor self, Tensor src, int dim=0, SymInt? start=None, SymInt? end=None, SymInt step=1) -> Tensor |
|
sort(Tensor self, int dim=-1, bool descending=False) -> (Tensor values, Tensor indices) |
|
split_with_sizes(Tensor(a -> *) self, SymInt[] split_sizes, int dim=0) -> Tensor(a)[] |
|
sqrt(Tensor self) -> Tensor |
|
squeeze.dim(Tensor(a) self, int dim) -> Tensor(a) |
|
squeeze.dims(Tensor(a) self, int[] dim) -> Tensor(a) |
|
sub.Scalar(Tensor self, Scalar other, Scalar alpha=1) -> Tensor |
|
sub.Tensor(Tensor self, Tensor other, *, Scalar alpha=1) -> Tensor |
|
sum.dim_IntList(Tensor self, int[1]? dim, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor |
|
sym_numel(Tensor self) -> SymInt |
|
sym_size.int(Tensor self, int dim) -> SymInt |
|
sym_storage_offset(Tensor self) -> SymInt |
|
sym_stride.int(Tensor self, int dim) -> SymInt |
|
tan(Tensor self) -> Tensor |
|
tanh(Tensor self) -> Tensor |
|
topk(Tensor self, SymInt k, int dim=-1, bool largest=True, bool sorted=True) -> (Tensor values, Tensor indices) |
|
trunc(Tensor self) -> Tensor |
|
unsqueeze(Tensor(a) self, int dim) -> Tensor(a) |
|
upsample_bilinear2d.vec(Tensor input, SymInt[]? output_size, bool align_corners, float[]? scale_factors) -> Tensor |
|
upsample_nearest2d.vec(Tensor input, SymInt[]? output_size, float[]? scale_factors) -> Tensor |
|
var.correction(Tensor self, int[1]? dim=None, *, Scalar? correction=None, bool keepdim=False) -> Tensor |
|
var.dim(Tensor self, int[1]? dim, bool unbiased=True, bool keepdim=False) -> Tensor |
|
view(Tensor(a) self, SymInt[] size) -> Tensor(a) |
|
where.self(Tensor condition, Tensor self, Tensor other) -> Tensor |
Prims IR¶
Prims IR 是一组可以用来组合其他操作符的基本操作符。 Prims IR 比核心 aten IR 的操作集更低级,它进一步将操作分解为显式的类型提升和广播操作:prims.convert_element_type 和 prims.broadcast_in_dim。 这个操作集旨在与编译器后端接口。
警告
此操作集仍在积极开发中,未来将添加更多操作。
操作符 |
模式 |
|---|---|
|
abs(Tensor self) -> Tensor |
|
acos(Tensor self) -> Tensor |
|
acosh(Tensor self) -> Tensor |
|
asin(Tensor self) -> Tensor |
|
asinh(Tensor self) -> Tensor |
|
atan(Tensor self) -> Tensor |
|
atanh(Tensor self) -> Tensor |
|
cos(Tensor self) -> Tensor |
|
cosh(Tensor self) -> Tensor |
|
bessel_i0(Tensor self) -> Tensor |
|
bessel_i0e(Tensor self) -> Tensor |
|
bessel_i1(Tensor self) -> Tensor |
|
bessel_i1e(Tensor self) -> Tensor |
|
bessel_j0(Tensor self) -> Tensor |
|
bessel_j1(Tensor self) -> Tensor |
|
bitwise_not(Tensor self) -> Tensor |
|
cbrt(Tensor self) -> Tensor |
|
ceil(Tensor self) -> Tensor |
|
conj_physical(Tensor self) -> Tensor |
|
digamma(Tensor self) -> Tensor |
|
erf(Tensor self) -> Tensor |
|
erf_inv(Tensor self) -> Tensor |
|
erfc(Tensor self) -> Tensor |
|
erfcx(Tensor self) -> Tensor |
|
exp(Tensor self) -> Tensor |
|
expm1(Tensor self) -> Tensor |
|
exp2(Tensor self) -> Tensor |
|
fill(Tensor self, Scalar value) -> Tensor |
|
floor(Tensor self) -> Tensor |
|
imag(Tensor self) -> Tensor |
|
isfinite(Tensor self) -> Tensor |
|
lgamma(Tensor self) -> Tensor |
|
log(Tensor self) -> Tensor |
|
log1p(Tensor self) -> Tensor |
|
log2(Tensor self) -> Tensor |
|
log10(Tensor self) -> Tensor |
|
ndtri(Tensor self) -> Tensor |
|
neg(Tensor self) -> Tensor |
|
real(Tensor self) -> Tensor |
|
reciprocal(Tensor self) -> Tensor |
|
round(Tensor self) -> Tensor |
|
符号(张量 self) -> 张量 |
|
signbit(Tensor self) -> Tensor |
|
sin(Tensor self) -> Tensor |
|
sinh(Tensor self) -> Tensor |
|
spherical_bessel_j0(Tensor self) -> Tensor |
|
sqrt(Tensor self) -> Tensor |
|
tan(Tensor self) -> Tensor |
|
tanh(Tensor self) -> Tensor |
|
trunc(Tensor self) -> Tensor |
|
add(Tensor self, Tensor other) -> Tensor |
|
atan2(Tensor self, Tensor other) -> Tensor |
|
按位与(Tensor self, Tensor other) -> Tensor |
|
按位或(Tensor self, Tensor other) -> Tensor |
|
按位异或(Tensor self, Tensor other) -> Tensor |
|
div(Tensor self, Tensor other) -> Tensor |
|
eq(Tensor self, Tensor other) -> Tensor |
|
fmax(Tensor self, Tensor other) -> Tensor |
|
fmin(Tensor self, Tensor other) -> Tensor |
|
fmod(Tensor self, Tensor other) -> Tensor |
|
frexp(Tensor self) -> (Tensor 尾数, Tensor 指数) |
|
gcd(张量 self, 张量 other) -> 张量 |
|
ge(Tensor self, Tensor other) -> Tensor |
|
gt(Tensor self, Tensor other) -> Tensor |
|
hypot(Tensor self, Tensor other) -> Tensor |
|
igamma(Tensor self, Tensor other) -> Tensor |
|
igammac(Tensor self, Tensor other) -> Tensor |
|
le(Tensor self, Tensor other) -> Tensor |
|
lt(Tensor self, Tensor other) -> Tensor |
|
maximum(Tensor self, Tensor other) -> Tensor |
|
最小值(Tensor self, Tensor other) -> Tensor |
|
mul(Tensor self, Tensor other) -> Tensor |
|
ne(Tensor self, Tensor other) -> Tensor |
|
nextafter(Tensor self, Tensor other) -> Tensor |
|
pow(Tensor self, Tensor other) -> Tensor |
|
余数(Tensor self, Tensor other) -> Tensor |
|
rsqrt(Tensor self) -> Tensor |
|
shift_left(Tensor self, Tensor other) -> Tensor |
|
算术右移(Tensor self, Tensor other) -> Tensor |
|
sub(Tensor self, Tensor other) -> Tensor |
|
zeta(Tensor self, Tensor other) -> Tensor |
|
as_strided(Tensor(a!) a, SymInt[] size, SymInt[] stride, SymInt storage_offset) -> Tensor(a!) |
|
broadcast_in_dim(Tensor(a) a, SymInt[] shape, int[] broadcast_dimensions) -> Tensor(a) |
|
collapse_view(Tensor(a) a, int start, int end) -> Tensor(a) |
|
conj(Tensor(a) a) -> Tensor(a) |
|
slice(Tensor(a) a, SymInt[] start_indices, SymInt[] limit_indices, SymInt[]? strides=None) -> Tensor(a) |
|
slice_in_dim(Tensor(a) a, SymInt start_index, SymInt limit_index, int stride=1, int axis=0) -> Tensor(a) |
|
split_dim(Tensor(a) a, int dim, SymInt outer_length) -> Tensor(a) |
|
squeeze(Tensor(a) a, int[] dimensions) -> Tensor(a) |
|
转置(Tensor(a) a, int[] 排列) -> Tensor(a) |
|
view_of(Tensor(a) a) -> Tensor |
|
view_of_dtype(Tensor(a) a, ScalarType dtype) -> Tensor |
|
as_strided_scatter(Tensor self, Tensor src, SymInt[] size, SymInt[] stride, SymInt storage_offset) -> Tensor |
|
collapse(Tensor a, int start, int end) -> Tensor |
|
cat(Tensor[] tensors, int dim) -> Tensor |
|
reshape(Tensor a, SymInt[] shape) -> Tensor |
|
rev(Tensor a, int[] dims) -> Tensor |
|
where(Tensor pred, Tensor a, Tensor b) -> Tensor |
|
克隆(Tensor self, *, MemoryFormat? memory_format=None) -> Tensor |
|
convert_element_type(Tensor a, ScalarType dtype) -> Tensor |
|
device_put(Tensor a, Device device) -> Tensor |
|
item(Tensor a) -> 标量 |
|
maximum_value(ScalarType dtype) -> Scalar |
|
minimum_value(ScalarType dtype) -> Scalar |
|
copy_strided(Tensor a, SymInt[] stride) -> Tensor |
|
copy_to(Tensor(a!) a, Tensor b) -> Tensor(a!) |
|
resize(Tensor(a!) a, SymInt[] shape) -> Tensor(a!) |
|
amax(Tensor inp, int[]? dims, *, ScalarType? output_dtype=None) -> Tensor |
|
amin(Tensor inp, int[]? dims, *, ScalarType? output_dtype=None) -> Tensor |
|
prod(Tensor inp, int[]? dims, *, ScalarType? output_dtype=None) -> Tensor |
|
sum(Tensor inp, int[]? dims, *, ScalarType? output_dtype=None) -> Tensor |
|
xor_sum(Tensor inp, int[]? dims, *, ScalarType? output_dtype=None) -> Tensor |
|
var(Tensor inp, int[]? dims, *, float correction, ScalarType? output_dtype=None) -> Tensor |
|
empty_strided(SymInt[] shape, SymInt[] strides, *, ScalarType dtype, Device device, bool requires_grad) -> 张量 |
|
empty_permuted(SymInt[] shape, int[] physical_layout, *, ScalarType dtype, Device device, bool requires_grad) -> 张量 |
|
scalar_tensor(标量 s, *, 标量类型? dtype=None, 设备? device=None) -> 张量 |
|
iota(SymInt 长度, *, SymInt 起始值, SymInt 步长, ScalarType 数据类型, Device 设备, bool 需要梯度) -> Tensor |
|
svd(张量 A, *, 布尔值 full_matrices) -> (张量 U, 张量 S, 张量 Vh) |
|
normal(SymInt[] shape, *, Scalar mean, Scalar std, ScalarType dtype, Device device, bool requires_grad, Generator? generator=None) -> Tensor |
|
uniform(SymInt[] shape, *, Scalar low, Scalar high, ScalarType dtype, Device device, Generator? generator=None) -> Tensor |
|
fft_r2c(Tensor self, *, int[] dim, bool onesided) -> Tensor |
|
fft_c2c(Tensor self, *, int[] dim, bool forward) -> Tensor |
|
fft_c2r(Tensor self, *, int[] dim, SymInt last_dim_size) -> Tensor |