#pragma once
|
|
// @generated by aten/src/ATen/gen.py
|
|
#include <c10/core/TensorOptions.h>
|
#include <c10/core/Scalar.h>
|
#include <c10/core/QScheme.h>
|
#include <c10/core/MemoryFormat.h>
|
#include <c10/util/ArrayRef.h>
|
#include <c10/util/intrusive_ptr.h>
|
#include <torch/csrc/WindowsTorchApiMacro.h>
|
#include <ATen/Dimname.h>
|
#include <ATen/core/EnableNamedTensor.h>
|
|
|
|
namespace c10 {
|
struct Storage;
|
}
|
|
namespace at {
|
|
class Tensor;
|
using TensorList = ArrayRef<Tensor>;
|
|
class Context;
|
struct Generator;
|
|
struct Quantizer;
|
// This is temporary typedef to enable Quantizer in aten native function API
|
// we'll remove them when we are actually exposing Quantizer class
|
// to frontend
|
using ConstQuantizerPtr = const c10::intrusive_ptr<Quantizer>&;
|
|
#ifdef USE_STATIC_DISPATCH
|
namespace CPUType {
|
Tensor & abs_(Tensor & self);
|
Tensor & abs_out(Tensor & out, const Tensor & self);
|
Tensor & acos_(Tensor & self);
|
Tensor & acos_out(Tensor & out, const Tensor & self);
|
Tensor add(const Tensor & self, const Tensor & other, Scalar alpha);
|
Tensor & add_(Tensor & self, const Tensor & other, Scalar alpha);
|
Tensor & add_out(Tensor & out, const Tensor & self, const Tensor & other, Scalar alpha);
|
Tensor addmv(const Tensor & self, const Tensor & mat, const Tensor & vec, Scalar beta, Scalar alpha);
|
Tensor & addmv_(Tensor & self, const Tensor & mat, const Tensor & vec, Scalar beta, Scalar alpha);
|
Tensor & addmv_out(Tensor & out, const Tensor & self, const Tensor & mat, const Tensor & vec, Scalar beta, Scalar alpha);
|
Tensor & arange_out(Tensor & out, Scalar start, Scalar end, Scalar step);
|
Tensor as_strided(const Tensor & self, IntArrayRef size, IntArrayRef stride, c10::optional<int64_t> storage_offset);
|
Tensor & asin_(Tensor & self);
|
Tensor & asin_out(Tensor & out, const Tensor & self);
|
Tensor & atan_(Tensor & self);
|
Tensor & atan_out(Tensor & out, const Tensor & self);
|
Tensor baddbmm(const Tensor & self, const Tensor & batch1, const Tensor & batch2, Scalar beta, Scalar alpha);
|
Tensor & baddbmm_(Tensor & self, const Tensor & batch1, const Tensor & batch2, Scalar beta, Scalar alpha);
|
Tensor & baddbmm_out(Tensor & out, const Tensor & self, const Tensor & batch1, const Tensor & batch2, Scalar beta, Scalar alpha);
|
Tensor & bernoulli_(Tensor & self, const Tensor & p, Generator * generator);
|
Tensor & bernoulli_(Tensor & self, double p, Generator * generator);
|
Tensor bincount(const Tensor & self, const Tensor & weights, int64_t minlength);
|
Tensor & bitwise_not_out(Tensor & out, const Tensor & self);
|
Tensor & logical_not_out(Tensor & out, const Tensor & self);
|
Tensor & logical_xor_out(Tensor & out, const Tensor & self, const Tensor & other);
|
Tensor bmm(const Tensor & self, const Tensor & mat2);
|
Tensor & bmm_out(Tensor & out, const Tensor & self, const Tensor & mat2);
|
Tensor & ceil_out(Tensor & out, const Tensor & self);
|
Tensor & clamp_(Tensor & self, c10::optional<Scalar> min, c10::optional<Scalar> max);
|
Tensor & clamp_out(Tensor & out, const Tensor & self, c10::optional<Scalar> min, c10::optional<Scalar> max);
|
Tensor & clamp_max_(Tensor & self, Scalar max);
|
Tensor & clamp_max_out(Tensor & out, const Tensor & self, Scalar max);
|
Tensor & clamp_min_(Tensor & self, Scalar min);
|
Tensor & clamp_min_out(Tensor & out, const Tensor & self, Scalar min);
|
Tensor & cos_(Tensor & self);
|
Tensor & cos_out(Tensor & out, const Tensor & self);
|
Tensor & cosh_(Tensor & self);
|
Tensor & cosh_out(Tensor & out, const Tensor & self);
|
std::tuple<Tensor,Tensor> _ctc_loss(const Tensor & log_probs, const Tensor & targets, IntArrayRef input_lengths, IntArrayRef target_lengths, int64_t blank, bool zero_infinity);
|
Tensor _ctc_loss_backward(const Tensor & grad, const Tensor & log_probs, const Tensor & targets, IntArrayRef input_lengths, IntArrayRef target_lengths, const Tensor & neg_log_likelihood, const Tensor & log_alpha, int64_t blank, bool zero_infinity);
|
Tensor div(const Tensor & self, const Tensor & other);
|
Tensor & div_(Tensor & self, const Tensor & other);
|
Tensor & div_out(Tensor & out, const Tensor & self, const Tensor & other);
|
Tensor dot(const Tensor & self, const Tensor & tensor);
|
Tensor embedding_dense_backward(const Tensor & grad_output, const Tensor & indices, int64_t num_weights, int64_t padding_idx, bool scale_grad_by_freq);
|
Tensor & embedding_renorm_(Tensor & self, const Tensor & indices, double max_norm, double norm_type);
|
std::tuple<Tensor,Tensor,Tensor,Tensor> _embedding_bag(const Tensor & weight, const Tensor & indices, const Tensor & offsets, bool scale_grad_by_freq, int64_t mode, bool sparse, const Tensor & per_sample_weights);
|
Tensor _embedding_bag_dense_backward(const Tensor & grad, const Tensor & indices, const Tensor & offsets, const Tensor & offset2bag, const Tensor & bag_size, const Tensor & maximum_indices, int64_t num_weights, bool scale_grad_by_freq, int64_t mode, const Tensor & per_sample_weights);
|
Tensor _embedding_bag_per_sample_weights_backward(const Tensor & grad, const Tensor & weight, const Tensor & indices, const Tensor & offsets, const Tensor & offset2bag, int64_t mode);
|
Tensor empty(IntArrayRef size, const TensorOptions & options, c10::optional<MemoryFormat> memory_format);
|
Tensor _empty_affine_quantized(IntArrayRef size, const TensorOptions & options, double scale, int64_t zero_point, c10::optional<MemoryFormat> memory_format);
|
Tensor _empty_per_channel_affine_quantized(IntArrayRef size, const Tensor & scales, const Tensor & zero_points, int64_t axis, const TensorOptions & options, c10::optional<MemoryFormat> memory_format);
|
Tensor & resize_(Tensor & self, IntArrayRef size);
|
Tensor empty_strided(IntArrayRef size, IntArrayRef stride, const TensorOptions & options);
|
Tensor & erf_(Tensor & self);
|
Tensor & erf_out(Tensor & out, const Tensor & self);
|
Tensor & erfc_(Tensor & self);
|
Tensor & erfc_out(Tensor & out, const Tensor & self);
|
Tensor & exp_(Tensor & self);
|
Tensor & exp_out(Tensor & out, const Tensor & self);
|
Tensor & expm1_out(Tensor & out, const Tensor & self);
|
Tensor & eye_out(Tensor & out, int64_t n);
|
Tensor & eye_out(Tensor & out, int64_t n, int64_t m);
|
Tensor & floor_out(Tensor & out, const Tensor & self);
|
Tensor & frac_(Tensor & self);
|
Tensor & frac_out(Tensor & out, const Tensor & self);
|
Tensor from_file(std::string filename, c10::optional<bool> shared, c10::optional<int64_t> size, const TensorOptions & options);
|
Tensor grid_sampler_2d(const Tensor & input, const Tensor & grid, int64_t interpolation_mode, int64_t padding_mode, bool align_corners);
|
std::tuple<Tensor,Tensor> grid_sampler_2d_backward(const Tensor & grad_output, const Tensor & input, const Tensor & grid, int64_t interpolation_mode, int64_t padding_mode, bool align_corners);
|
Tensor grid_sampler_3d(const Tensor & input, const Tensor & grid, int64_t interpolation_mode, int64_t padding_mode, bool align_corners);
|
std::tuple<Tensor,Tensor> grid_sampler_3d_backward(const Tensor & grad_output, const Tensor & input, const Tensor & grid, int64_t interpolation_mode, int64_t padding_mode, bool align_corners);
|
Tensor ger(const Tensor & self, const Tensor & vec2);
|
Tensor & ger_out(Tensor & out, const Tensor & self, const Tensor & vec2);
|
Tensor _fft_with_size(const Tensor & self, int64_t signal_ndim, bool complex_input, bool complex_output, bool inverse, IntArrayRef checked_signal_sizes, bool normalized, bool onesided, IntArrayRef output_sizes);
|
Tensor _inverse_helper(const Tensor & self);
|
Tensor kl_div_backward(const Tensor & grad_output, const Tensor & self, const Tensor & target, int64_t reduction);
|
std::tuple<Tensor &,Tensor &> kthvalue_out(Tensor & values, Tensor & indices, const Tensor & self, int64_t k, int64_t dim, bool keepdim);
|
std::tuple<Tensor,Tensor,Tensor> native_layer_norm(const Tensor & input, const Tensor & weight, const Tensor & bias, int64_t M, int64_t N, double eps);
|
std::tuple<Tensor,Tensor,Tensor> native_layer_norm_backward(const Tensor & grad_out, const Tensor & input, const Tensor & mean, const Tensor & rstd, const Tensor & weight, int64_t M, int64_t N, std::array<bool,3> output_mask);
|
std::tuple<Tensor,Tensor,Tensor> native_layer_norm_double_backward(const Tensor & ggI, const Tensor & ggW, const Tensor & ggb, const Tensor & gO, const Tensor & input, const Tensor & mean, const Tensor & rstd, const Tensor & weight, int64_t M, int64_t N, std::array<bool,3> output_mask);
|
Tensor & linspace_out(Tensor & out, Scalar start, Scalar end, int64_t steps);
|
Tensor & log_out(Tensor & out, const Tensor & self);
|
Tensor & log10_(Tensor & self);
|
Tensor & log10_out(Tensor & out, const Tensor & self);
|
Tensor & log1p_(Tensor & self);
|
Tensor & log1p_out(Tensor & out, const Tensor & self);
|
Tensor & log2_(Tensor & self);
|
Tensor & log2_out(Tensor & out, const Tensor & self);
|
Tensor & logspace_out(Tensor & out, Scalar start, Scalar end, int64_t steps, double base);
|
Tensor _log_softmax(const Tensor & self, int64_t dim, bool half_to_float);
|
Tensor _log_softmax_backward_data(const Tensor & grad_output, const Tensor & output, int64_t dim, const Tensor & self);
|
Tensor mean(const Tensor & self, c10::optional<ScalarType> dtype);
|
Tensor mean(const Tensor & self, IntArrayRef dim, bool keepdim, c10::optional<ScalarType> dtype);
|
Tensor & mean_out(Tensor & out, const Tensor & self, IntArrayRef dim, bool keepdim, c10::optional<ScalarType> dtype);
|
Tensor mean(const Tensor & self, DimnameList dim, bool keepdim, c10::optional<ScalarType> dtype);
|
Tensor & mean_out(Tensor & out, const Tensor & self, DimnameList dim, bool keepdim, c10::optional<ScalarType> dtype);
|
Tensor mm(const Tensor & self, const Tensor & mat2);
|
Tensor & mm_out(Tensor & out, const Tensor & self, const Tensor & mat2);
|
Tensor mul(const Tensor & self, const Tensor & other);
|
Tensor & mul_(Tensor & self, const Tensor & other);
|
Tensor & mul_out(Tensor & out, const Tensor & self, const Tensor & other);
|
Tensor mv(const Tensor & self, const Tensor & vec);
|
Tensor & mv_out(Tensor & out, const Tensor & self, const Tensor & vec);
|
Tensor narrow_copy(const Tensor & self, int64_t dim, int64_t start, int64_t length);
|
std::tuple<Tensor,Tensor,Tensor> native_batch_norm(const Tensor & input, const Tensor & weight, const Tensor & bias, const Tensor & running_mean, const Tensor & running_var, bool training, double momentum, double eps);
|
std::tuple<Tensor,Tensor,Tensor> native_batch_norm_backward(const Tensor & grad_out, const Tensor & input, const Tensor & weight, const Tensor & running_mean, const Tensor & running_var, const Tensor & save_mean, const Tensor & save_invstd, bool train, double eps, std::array<bool,3> output_mask);
|
std::tuple<Tensor,Tensor> batch_norm_update_stats(const Tensor & input, const Tensor & running_mean, const Tensor & running_var, double momentum);
|
Tensor & randperm_out(Tensor & out, int64_t n, Generator * generator);
|
Tensor & range_out(Tensor & out, Scalar start, Scalar end, Scalar step);
|
Tensor & reciprocal_(Tensor & self);
|
Tensor & reciprocal_out(Tensor & out, const Tensor & self);
|
Tensor & neg_out(Tensor & out, const Tensor & self);
|
Tensor repeat_interleave(const Tensor & repeats);
|
Tensor & round_out(Tensor & out, const Tensor & self);
|
Tensor relu(const Tensor & self);
|
Tensor & relu_(Tensor & self);
|
Tensor prelu(const Tensor & self, const Tensor & weight);
|
std::tuple<Tensor,Tensor> prelu_backward(const Tensor & grad_output, const Tensor & self, const Tensor & weight);
|
Tensor gelu(const Tensor & self);
|
Tensor gelu_backward(const Tensor & grad, const Tensor & self);
|
Tensor hardshrink(const Tensor & self, Scalar lambd);
|
Tensor hardshrink_backward(const Tensor & grad_out, const Tensor & self, Scalar lambd);
|
Tensor & rsqrt_out(Tensor & out, const Tensor & self);
|
Tensor sigmoid(const Tensor & self);
|
Tensor & sigmoid_(Tensor & self);
|
Tensor & sigmoid_out(Tensor & out, const Tensor & self);
|
Tensor & sin_(Tensor & self);
|
Tensor & sin_out(Tensor & out, const Tensor & self);
|
Tensor & sinh_(Tensor & self);
|
Tensor & sinh_out(Tensor & out, const Tensor & self);
|
Tensor _softmax(const Tensor & self, int64_t dim, bool half_to_float);
|
Tensor _softmax_backward_data(const Tensor & grad_output, const Tensor & output, int64_t dim, const Tensor & self);
|
Tensor & sspaddmm_out(Tensor & out, const Tensor & self, const Tensor & mat1, const Tensor & mat2, Scalar beta, Scalar alpha);
|
Tensor & sqrt_(Tensor & self);
|
Tensor & sqrt_out(Tensor & out, const Tensor & self);
|
Tensor & tan_(Tensor & self);
|
Tensor & tan_out(Tensor & out, const Tensor & self);
|
Tensor & tanh_(Tensor & self);
|
Tensor & tanh_out(Tensor & out, const Tensor & self);
|
Tensor flip(const Tensor & self, IntArrayRef dims);
|
Tensor roll(const Tensor & self, IntArrayRef shifts, IntArrayRef dims);
|
Tensor & trunc_out(Tensor & out, const Tensor & self);
|
std::tuple<Tensor,Tensor> _unique(const Tensor & self, bool sorted, bool return_inverse);
|
std::tuple<Tensor,Tensor,Tensor> unique_dim(const Tensor & self, int64_t dim, bool sorted, bool return_inverse, bool return_counts);
|
std::tuple<Tensor,Tensor,Tensor> unique_consecutive(const Tensor & self, bool return_inverse, bool return_counts, c10::optional<int64_t> dim);
|
std::tuple<Tensor,Tensor,Tensor> unique_dim_consecutive(const Tensor & self, int64_t dim, bool return_inverse, bool return_counts);
|
std::tuple<Tensor,Tensor,Tensor> _unique2(const Tensor & self, bool sorted, bool return_inverse, bool return_counts);
|
Tensor _s_where(const Tensor & condition, const Tensor & self, const Tensor & other);
|
Tensor _standard_gamma_grad(const Tensor & self, const Tensor & output);
|
Tensor _standard_gamma(const Tensor & self, Generator * generator);
|
Tensor _dirichlet_grad(const Tensor & x, const Tensor & alpha, const Tensor & total);
|
Tensor _sample_dirichlet(const Tensor & self, Generator * generator);
|
Tensor poisson(const Tensor & self, Generator * generator);
|
Tensor clone(const Tensor & self);
|
Tensor & pow_out(Tensor & out, const Tensor & self, Scalar exponent);
|
Tensor pow(const Tensor & self, Scalar exponent);
|
Tensor & zero_(Tensor & self);
|
Tensor & sub_out(Tensor & out, const Tensor & self, const Tensor & other, Scalar alpha);
|
Tensor sub(const Tensor & self, const Tensor & other, Scalar alpha);
|
Tensor & sub_(Tensor & self, const Tensor & other, Scalar alpha);
|
Tensor & addmm_out(Tensor & out, const Tensor & self, const Tensor & mat1, const Tensor & mat2, Scalar beta, Scalar alpha);
|
Tensor addmm(const Tensor & self, const Tensor & mat1, const Tensor & mat2, Scalar beta, Scalar alpha);
|
Tensor & addmm_(Tensor & self, const Tensor & mat1, const Tensor & mat2, Scalar beta, Scalar alpha);
|
Tensor to_sparse(const Tensor & self, int64_t sparse_dim);
|
Tensor to_sparse(const Tensor & self);
|
Tensor to_mkldnn(const Tensor & self);
|
Tensor quantize_per_tensor(const Tensor & self, double scale, int64_t zero_point, ScalarType dtype);
|
Tensor quantize_per_channel(const Tensor & self, const Tensor & scales, const Tensor & zero_points, int64_t axis, ScalarType dtype);
|
Tensor _make_per_tensor_quantized_tensor(const Tensor & self, double scale, int64_t zero_point);
|
Tensor _make_per_channel_quantized_tensor(const Tensor & self, const Tensor & scale, const Tensor & zero_point, int64_t axis);
|
Tensor fake_quantize_per_tensor_affine(const Tensor & self, double scale, int64_t zero_point, int64_t quant_min, int64_t quant_max);
|
Tensor fake_quantize_per_tensor_affine_backward(const Tensor & grad, const Tensor & self, double scale, int64_t zero_point, int64_t quant_min, int64_t quant_max);
|
Tensor fake_quantize_per_channel_affine(const Tensor & self, const Tensor & scale, const Tensor & zero_point, int64_t axis, int64_t quant_min, int64_t quant_max);
|
Tensor fake_quantize_per_channel_affine_backward(const Tensor & grad, const Tensor & self, const Tensor & scale, const Tensor & zero_point, int64_t axis, int64_t quant_min, int64_t quant_max);
|
Scalar _local_scalar_dense(const Tensor & self);
|
Tensor & set_(Tensor & self, Storage source);
|
Tensor & set_(Tensor & self, Storage source, int64_t storage_offset, IntArrayRef size, IntArrayRef stride);
|
Tensor & set_(Tensor & self, const Tensor & source);
|
Tensor & set_(Tensor & self);
|
bool is_set_to(const Tensor & self, const Tensor & tensor);
|
Tensor & masked_fill_(Tensor & self, const Tensor & mask, Scalar value);
|
Tensor & masked_fill_(Tensor & self, const Tensor & mask, const Tensor & value);
|
Tensor & masked_scatter_(Tensor & self, const Tensor & mask, const Tensor & source);
|
Tensor view(const Tensor & self, IntArrayRef size);
|
Tensor & put_(Tensor & self, const Tensor & index, const Tensor & source, bool accumulate);
|
Tensor & index_add_(Tensor & self, int64_t dim, const Tensor & index, const Tensor & source);
|
Tensor & index_fill_(Tensor & self, int64_t dim, const Tensor & index, Scalar value);
|
Tensor & index_fill_(Tensor & self, int64_t dim, const Tensor & index, const Tensor & value);
|
Tensor & scatter_(Tensor & self, int64_t dim, const Tensor & index, const Tensor & src);
|
Tensor & scatter_(Tensor & self, int64_t dim, const Tensor & index, Scalar value);
|
Tensor & scatter_add_(Tensor & self, int64_t dim, const Tensor & index, const Tensor & src);
|
Tensor __and__(const Tensor & self, Scalar other);
|
Tensor __and__(const Tensor & self, const Tensor & other);
|
Tensor & __iand__(Tensor & self, Scalar other);
|
Tensor & __iand__(Tensor & self, const Tensor & other);
|
Tensor __or__(const Tensor & self, Scalar other);
|
Tensor __or__(const Tensor & self, const Tensor & other);
|
Tensor & __ior__(Tensor & self, Scalar other);
|
Tensor & __ior__(Tensor & self, const Tensor & other);
|
Tensor __xor__(const Tensor & self, Scalar other);
|
Tensor __xor__(const Tensor & self, const Tensor & other);
|
Tensor & __ixor__(Tensor & self, Scalar other);
|
Tensor & __ixor__(Tensor & self, const Tensor & other);
|
Tensor __lshift__(const Tensor & self, Scalar other);
|
Tensor __lshift__(const Tensor & self, const Tensor & other);
|
Tensor & __ilshift__(Tensor & self, Scalar other);
|
Tensor & __ilshift__(Tensor & self, const Tensor & other);
|
Tensor __rshift__(const Tensor & self, Scalar other);
|
Tensor __rshift__(const Tensor & self, const Tensor & other);
|
Tensor & __irshift__(Tensor & self, Scalar other);
|
Tensor & __irshift__(Tensor & self, const Tensor & other);
|
Tensor & lgamma_(Tensor & self);
|
Tensor & tril_(Tensor & self, int64_t diagonal);
|
Tensor & triu_(Tensor & self, int64_t diagonal);
|
Tensor & renorm_(Tensor & self, Scalar p, int64_t dim, Scalar maxnorm);
|
Tensor & pow_(Tensor & self, Scalar exponent);
|
Tensor & pow_(Tensor & self, const Tensor & exponent);
|
Tensor & lerp_(Tensor & self, const Tensor & end, Scalar weight);
|
Tensor & lerp_(Tensor & self, const Tensor & end, const Tensor & weight);
|
Tensor & fmod_(Tensor & self, Scalar other);
|
Tensor & fmod_(Tensor & self, const Tensor & other);
|
Tensor & remainder_(Tensor & self, Scalar other);
|
Tensor & remainder_(Tensor & self, const Tensor & other);
|
Tensor & addbmm_(Tensor & self, const Tensor & batch1, const Tensor & batch2, Scalar beta, Scalar alpha);
|
Tensor & addbmm_out(Tensor & out, const Tensor & self, const Tensor & batch1, const Tensor & batch2, Scalar beta, Scalar alpha);
|
Tensor addbmm(const Tensor & self, const Tensor & batch1, const Tensor & batch2, Scalar beta, Scalar alpha);
|
Tensor & random_(Tensor & self, int64_t from, int64_t to, Generator * generator);
|
Tensor & random_(Tensor & self, int64_t to, Generator * generator);
|
Tensor & random_(Tensor & self, Generator * generator);
|
Tensor & uniform_(Tensor & self, double from, double to, Generator * generator);
|
Tensor & normal_(Tensor & self, double mean, double std, Generator * generator);
|
Tensor & cauchy_(Tensor & self, double median, double sigma, Generator * generator);
|
Tensor & log_normal_(Tensor & self, double mean, double std, Generator * generator);
|
Tensor & exponential_(Tensor & self, double lambd, Generator * generator);
|
Tensor & geometric_(Tensor & self, double p, Generator * generator);
|
Tensor & diag_out(Tensor & out, const Tensor & self, int64_t diagonal);
|
Tensor diag(const Tensor & self, int64_t diagonal);
|
Tensor & triu_out(Tensor & out, const Tensor & self, int64_t diagonal);
|
Tensor & tril_out(Tensor & out, const Tensor & self, int64_t diagonal);
|
Tensor tril_indices(int64_t row, int64_t col, int64_t offset, const TensorOptions & options);
|
Tensor triu_indices(int64_t row, int64_t col, int64_t offset, const TensorOptions & options);
|
Tensor trace(const Tensor & self);
|
Tensor & ne_out(Tensor & out, const Tensor & self, Scalar other);
|
Tensor ne(const Tensor & self, Scalar other);
|
Tensor & ne_out(Tensor & out, const Tensor & self, const Tensor & other);
|
Tensor ne(const Tensor & self, const Tensor & other);
|
Tensor & eq_out(Tensor & out, const Tensor & self, Scalar other);
|
Tensor eq(const Tensor & self, Scalar other);
|
Tensor & eq_out(Tensor & out, const Tensor & self, const Tensor & other);
|
Tensor eq(const Tensor & self, const Tensor & other);
|
Tensor & ge_out(Tensor & out, const Tensor & self, Scalar other);
|
Tensor ge(const Tensor & self, Scalar other);
|
Tensor & ge_out(Tensor & out, const Tensor & self, const Tensor & other);
|
Tensor ge(const Tensor & self, const Tensor & other);
|
Tensor & le_out(Tensor & out, const Tensor & self, Scalar other);
|
Tensor le(const Tensor & self, Scalar other);
|
Tensor & le_out(Tensor & out, const Tensor & self, const Tensor & other);
|
Tensor le(const Tensor & self, const Tensor & other);
|
Tensor & gt_out(Tensor & out, const Tensor & self, Scalar other);
|
Tensor gt(const Tensor & self, Scalar other);
|
Tensor & gt_out(Tensor & out, const Tensor & self, const Tensor & other);
|
Tensor gt(const Tensor & self, const Tensor & other);
|
Tensor & lt_out(Tensor & out, const Tensor & self, Scalar other);
|
Tensor lt(const Tensor & self, Scalar other);
|
Tensor & lt_out(Tensor & out, const Tensor & self, const Tensor & other);
|
Tensor lt(const Tensor & self, const Tensor & other);
|
Tensor & take_out(Tensor & out, const Tensor & self, const Tensor & index);
|
Tensor take(const Tensor & self, const Tensor & index);
|
Tensor & index_select_out(Tensor & out, const Tensor & self, int64_t dim, const Tensor & index);
|
Tensor index_select(const Tensor & self, int64_t dim, const Tensor & index);
|
Tensor & masked_select_out(Tensor & out, const Tensor & self, const Tensor & mask);
|
Tensor masked_select(const Tensor & self, const Tensor & mask);
|
Tensor & nonzero_out(Tensor & out, const Tensor & self);
|
Tensor nonzero(const Tensor & self);
|
Tensor & gather_out(Tensor & out, const Tensor & self, int64_t dim, const Tensor & index, bool sparse_grad);
|
Tensor gather(const Tensor & self, int64_t dim, const Tensor & index, bool sparse_grad);
|
std::tuple<Tensor &,Tensor &> lstsq_out(Tensor & X, Tensor & qr, const Tensor & self, const Tensor & A);
|
std::tuple<Tensor,Tensor> lstsq(const Tensor & self, const Tensor & A);
|
std::tuple<Tensor,Tensor> _triangular_solve_helper(const Tensor & self, const Tensor & A, bool upper, bool transpose, bool unitriangular);
|
std::tuple<Tensor,Tensor> _symeig_helper(const Tensor & self, bool eigenvectors, bool upper);
|
std::tuple<Tensor &,Tensor &> eig_out(Tensor & e, Tensor & v, const Tensor & self, bool eigenvectors);
|
std::tuple<Tensor,Tensor> eig(const Tensor & self, bool eigenvectors);
|
std::tuple<Tensor,Tensor,Tensor> _svd_helper(const Tensor & self, bool some, bool compute_uv);
|
Tensor _cholesky_helper(const Tensor & self, bool upper);
|
Tensor _cholesky_solve_helper(const Tensor & self, const Tensor & A, bool upper);
|
std::tuple<Tensor,Tensor> _solve_helper(const Tensor & self, const Tensor & A);
|
Tensor & cholesky_inverse_out(Tensor & out, const Tensor & self, bool upper);
|
Tensor cholesky_inverse(const Tensor & self, bool upper);
|
std::tuple<Tensor,Tensor> _qr_helper(const Tensor & self, bool some);
|
std::tuple<Tensor &,Tensor &> geqrf_out(Tensor & a, Tensor & tau, const Tensor & self);
|
std::tuple<Tensor,Tensor> geqrf(const Tensor & self);
|
Tensor & orgqr_out(Tensor & out, const Tensor & self, const Tensor & input2);
|
Tensor orgqr(const Tensor & self, const Tensor & input2);
|
Tensor & ormqr_out(Tensor & out, const Tensor & self, const Tensor & input2, const Tensor & input3, bool left, bool transpose);
|
Tensor ormqr(const Tensor & self, const Tensor & input2, const Tensor & input3, bool left, bool transpose);
|
std::tuple<Tensor,Tensor,Tensor> _lu_with_info(const Tensor & self, bool pivot, bool check_errors);
|
Tensor _lu_solve_helper(const Tensor & self, const Tensor & LU_data, const Tensor & LU_pivots);
|
Tensor & multinomial_out(Tensor & out, const Tensor & self, int64_t num_samples, bool replacement, Generator * generator);
|
Tensor multinomial(const Tensor & self, int64_t num_samples, bool replacement, Generator * generator);
|
std::tuple<Tensor,Tensor> _multinomial_alias_setup(const Tensor & probs);
|
Tensor _multinomial_alias_draw(const Tensor & J, const Tensor & q, int64_t num_samples, Generator * generator);
|
Tensor & lgamma_out(Tensor & out, const Tensor & self);
|
Tensor lgamma(const Tensor & self);
|
Tensor erfinv(const Tensor & self);
|
Tensor & erfinv_(Tensor & self);
|
Tensor & erfinv_out(Tensor & out, const Tensor & self);
|
Tensor & sign_out(Tensor & out, const Tensor & self);
|
Tensor dist(const Tensor & self, const Tensor & other, Scalar p);
|
Tensor & lerp_out(Tensor & out, const Tensor & self, const Tensor & end, Scalar weight);
|
Tensor & lerp_out(Tensor & out, const Tensor & self, const Tensor & end, const Tensor & weight);
|
Tensor lerp(const Tensor & self, const Tensor & end, Scalar weight);
|
Tensor lerp(const Tensor & self, const Tensor & end, const Tensor & weight);
|
Tensor & histc_out(Tensor & out, const Tensor & self, int64_t bins, Scalar min, Scalar max);
|
Tensor histc(const Tensor & self, int64_t bins, Scalar min, Scalar max);
|
Tensor & fmod_out(Tensor & out, const Tensor & self, Scalar other);
|
Tensor fmod(const Tensor & self, Scalar other);
|
Tensor & fmod_out(Tensor & out, const Tensor & self, const Tensor & other);
|
Tensor fmod(const Tensor & self, const Tensor & other);
|
Tensor & remainder_out(Tensor & out, const Tensor & self, Scalar other);
|
Tensor remainder(const Tensor & self, Scalar other);
|
Tensor & remainder_out(Tensor & out, const Tensor & self, const Tensor & other);
|
Tensor remainder(const Tensor & self, const Tensor & other);
|
Tensor & min_out(Tensor & out, const Tensor & self, const Tensor & other);
|
Tensor min(const Tensor & self, const Tensor & other);
|
Tensor min(const Tensor & self);
|
Tensor & max_out(Tensor & out, const Tensor & self, const Tensor & other);
|
Tensor max(const Tensor & self, const Tensor & other);
|
Tensor max(const Tensor & self);
|
Tensor median(const Tensor & self);
|
std::tuple<Tensor &,Tensor &> sort_out(Tensor & values, Tensor & indices, const Tensor & self, int64_t dim, bool descending);
|
std::tuple<Tensor,Tensor> sort(const Tensor & self, int64_t dim, bool descending);
|
std::tuple<Tensor &,Tensor &> topk_out(Tensor & values, Tensor & indices, const Tensor & self, int64_t k, int64_t dim, bool largest, bool sorted);
|
std::tuple<Tensor,Tensor> topk(const Tensor & self, int64_t k, int64_t dim, bool largest, bool sorted);
|
Tensor & renorm_out(Tensor & out, const Tensor & self, Scalar p, int64_t dim, Scalar maxnorm);
|
Tensor renorm(const Tensor & self, Scalar p, int64_t dim, Scalar maxnorm);
|
Tensor unfold(const Tensor & self, int64_t dimension, int64_t size, int64_t step);
|
bool equal(const Tensor & self, const Tensor & other);
|
Tensor & pow_out(Tensor & out, const Tensor & self, const Tensor & exponent);
|
Tensor pow(const Tensor & self, const Tensor & exponent);
|
Tensor & pow_out(Tensor & out, Scalar self, const Tensor & exponent);
|
Tensor pow(Scalar self, const Tensor & exponent);
|
Tensor & normal_out(Tensor & out, const Tensor & mean, double std, Generator * generator);
|
Tensor normal(const Tensor & mean, double std, Generator * generator);
|
Tensor & normal_out(Tensor & out, double mean, const Tensor & std, Generator * generator);
|
Tensor normal(double mean, const Tensor & std, Generator * generator);
|
Tensor & normal_out(Tensor & out, const Tensor & mean, const Tensor & std, Generator * generator);
|
Tensor normal(const Tensor & mean, const Tensor & std, Generator * generator);
|
Tensor _addr(const Tensor & self, const Tensor & vec1, const Tensor & vec2, Scalar beta, Scalar alpha);
|
Tensor & _addr_(Tensor & self, const Tensor & vec1, const Tensor & vec2, Scalar beta, Scalar alpha);
|
Tensor & _addr_out(Tensor & out, const Tensor & self, const Tensor & vec1, const Tensor & vec2, Scalar beta, Scalar alpha);
|
Tensor & _index_copy_(Tensor & self, int64_t dim, const Tensor & index, const Tensor & source);
|
Tensor _cumsum(const Tensor & self, int64_t dim);
|
Tensor & _cumsum_out(Tensor & out, const Tensor & self, int64_t dim);
|
Tensor _cumprod(const Tensor & self, int64_t dim);
|
Tensor & _cumprod_out(Tensor & out, const Tensor & self, int64_t dim);
|
Tensor _var(const Tensor & self, bool unbiased);
|
Tensor _std(const Tensor & self, bool unbiased);
|
Tensor _cat(TensorList tensors, int64_t dim);
|
Tensor & _cat_out(Tensor & out, TensorList tensors, int64_t dim);
|
std::tuple<Tensor,Tensor> _mode(const Tensor & self, int64_t dim, bool keepdim);
|
std::tuple<Tensor &,Tensor &> _mode_out(Tensor & values, Tensor & indices, const Tensor & self, int64_t dim, bool keepdim);
|
std::tuple<Tensor,Tensor> _max(const Tensor & self, int64_t dim, bool keepdim);
|
std::tuple<Tensor &,Tensor &> _max_out(Tensor & max, Tensor & max_indices, const Tensor & self, int64_t dim, bool keepdim);
|
std::tuple<Tensor,Tensor> _min(const Tensor & self, int64_t dim, bool keepdim);
|
std::tuple<Tensor &,Tensor &> _min_out(Tensor & min, Tensor & min_indices, const Tensor & self, int64_t dim, bool keepdim);
|
Tensor & binary_cross_entropy_out(Tensor & out, const Tensor & self, const Tensor & target, const Tensor & weight, int64_t reduction);
|
Tensor binary_cross_entropy(const Tensor & self, const Tensor & target, const Tensor & weight, int64_t reduction);
|
Tensor & binary_cross_entropy_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, const Tensor & target, const Tensor & weight, int64_t reduction);
|
Tensor binary_cross_entropy_backward(const Tensor & grad_output, const Tensor & self, const Tensor & target, const Tensor & weight, int64_t reduction);
|
Tensor & mse_loss_out(Tensor & out, const Tensor & self, const Tensor & target, int64_t reduction);
|
Tensor mse_loss(const Tensor & self, const Tensor & target, int64_t reduction);
|
Tensor & mse_loss_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, const Tensor & target, int64_t reduction);
|
Tensor mse_loss_backward(const Tensor & grad_output, const Tensor & self, const Tensor & target, int64_t reduction);
|
Tensor & l1_loss_out(Tensor & out, const Tensor & self, const Tensor & target, int64_t reduction);
|
Tensor l1_loss(const Tensor & self, const Tensor & target, int64_t reduction);
|
Tensor & l1_loss_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, const Tensor & target, int64_t reduction);
|
Tensor l1_loss_backward(const Tensor & grad_output, const Tensor & self, const Tensor & target, int64_t reduction);
|
Tensor & multi_margin_loss_out(Tensor & out, const Tensor & self, const Tensor & target, Scalar p, Scalar margin, const Tensor & weight, int64_t reduction);
|
Tensor multi_margin_loss(const Tensor & self, const Tensor & target, Scalar p, Scalar margin, const Tensor & weight, int64_t reduction);
|
Tensor & multi_margin_loss_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, const Tensor & target, Scalar p, Scalar margin, const Tensor & weight, int64_t reduction);
|
Tensor multi_margin_loss_backward(const Tensor & grad_output, const Tensor & self, const Tensor & target, Scalar p, Scalar margin, const Tensor & weight, int64_t reduction);
|
std::tuple<Tensor &,Tensor &> multilabel_margin_loss_forward_out(Tensor & output, Tensor & is_target, const Tensor & self, const Tensor & target, int64_t reduction);
|
std::tuple<Tensor,Tensor> multilabel_margin_loss_forward(const Tensor & self, const Tensor & target, int64_t reduction);
|
Tensor & multilabel_margin_loss_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, const Tensor & target, int64_t reduction, const Tensor & is_target);
|
Tensor multilabel_margin_loss_backward(const Tensor & grad_output, const Tensor & self, const Tensor & target, int64_t reduction, const Tensor & is_target);
|
std::tuple<Tensor &,Tensor &> nll_loss_forward_out(Tensor & output, Tensor & total_weight, const Tensor & self, const Tensor & target, const Tensor & weight, int64_t reduction, int64_t ignore_index);
|
std::tuple<Tensor,Tensor> nll_loss_forward(const Tensor & self, const Tensor & target, const Tensor & weight, int64_t reduction, int64_t ignore_index);
|
Tensor & nll_loss_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, const Tensor & target, const Tensor & weight, int64_t reduction, int64_t ignore_index, const Tensor & total_weight);
|
Tensor nll_loss_backward(const Tensor & grad_output, const Tensor & self, const Tensor & target, const Tensor & weight, int64_t reduction, int64_t ignore_index, const Tensor & total_weight);
|
std::tuple<Tensor &,Tensor &> nll_loss2d_forward_out(Tensor & output, Tensor & total_weight, const Tensor & self, const Tensor & target, const Tensor & weight, int64_t reduction, int64_t ignore_index);
|
std::tuple<Tensor,Tensor> nll_loss2d_forward(const Tensor & self, const Tensor & target, const Tensor & weight, int64_t reduction, int64_t ignore_index);
|
Tensor & nll_loss2d_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, const Tensor & target, const Tensor & weight, int64_t reduction, int64_t ignore_index, const Tensor & total_weight);
|
Tensor nll_loss2d_backward(const Tensor & grad_output, const Tensor & self, const Tensor & target, const Tensor & weight, int64_t reduction, int64_t ignore_index, const Tensor & total_weight);
|
Tensor & smooth_l1_loss_out(Tensor & out, const Tensor & self, const Tensor & target, int64_t reduction);
|
Tensor smooth_l1_loss(const Tensor & self, const Tensor & target, int64_t reduction);
|
Tensor & smooth_l1_loss_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, const Tensor & target, int64_t reduction);
|
Tensor smooth_l1_loss_backward(const Tensor & grad_output, const Tensor & self, const Tensor & target, int64_t reduction);
|
Tensor & soft_margin_loss_out(Tensor & out, const Tensor & self, const Tensor & target, int64_t reduction);
|
Tensor soft_margin_loss(const Tensor & self, const Tensor & target, int64_t reduction);
|
Tensor & soft_margin_loss_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, const Tensor & target, int64_t reduction);
|
Tensor soft_margin_loss_backward(const Tensor & grad_output, const Tensor & self, const Tensor & target, int64_t reduction);
|
Tensor & elu_out(Tensor & out, const Tensor & self, Scalar alpha, Scalar scale, Scalar input_scale);
|
Tensor elu(const Tensor & self, Scalar alpha, Scalar scale, Scalar input_scale);
|
Tensor & elu_backward_out(Tensor & grad_input, const Tensor & grad_output, Scalar alpha, Scalar scale, Scalar input_scale, const Tensor & output);
|
Tensor elu_backward(const Tensor & grad_output, Scalar alpha, Scalar scale, Scalar input_scale, const Tensor & output);
|
Tensor & elu_(Tensor & self, Scalar alpha, Scalar scale, Scalar input_scale);
|
Tensor & glu_out(Tensor & out, const Tensor & self, int64_t dim);
|
Tensor glu(const Tensor & self, int64_t dim);
|
Tensor & glu_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, int64_t dim);
|
Tensor glu_backward(const Tensor & grad_output, const Tensor & self, int64_t dim);
|
Tensor & hardtanh_out(Tensor & out, const Tensor & self, Scalar min_val, Scalar max_val);
|
Tensor hardtanh(const Tensor & self, Scalar min_val, Scalar max_val);
|
Tensor & hardtanh_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, Scalar min_val, Scalar max_val);
|
Tensor hardtanh_backward(const Tensor & grad_output, const Tensor & self, Scalar min_val, Scalar max_val);
|
Tensor & hardtanh_(Tensor & self, Scalar min_val, Scalar max_val);
|
Tensor & leaky_relu_out(Tensor & out, const Tensor & self, Scalar negative_slope);
|
Tensor leaky_relu(const Tensor & self, Scalar negative_slope);
|
Tensor & leaky_relu_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, Scalar negative_slope);
|
Tensor leaky_relu_backward(const Tensor & grad_output, const Tensor & self, Scalar negative_slope);
|
Tensor & leaky_relu_(Tensor & self, Scalar negative_slope);
|
std::tuple<Tensor &,Tensor &> log_sigmoid_forward_out(Tensor & output, Tensor & buffer, const Tensor & self);
|
std::tuple<Tensor,Tensor> log_sigmoid_forward(const Tensor & self);
|
Tensor & log_sigmoid_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, const Tensor & buffer);
|
Tensor log_sigmoid_backward(const Tensor & grad_output, const Tensor & self, const Tensor & buffer);
|
Tensor & rrelu_with_noise_out(Tensor & out, const Tensor & self, const Tensor & noise, Scalar lower, Scalar upper, bool training, Generator * generator);
|
Tensor rrelu_with_noise(const Tensor & self, const Tensor & noise, Scalar lower, Scalar upper, bool training, Generator * generator);
|
Tensor & rrelu_with_noise_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, const Tensor & noise, Scalar lower, Scalar upper, bool training);
|
Tensor rrelu_with_noise_backward(const Tensor & grad_output, const Tensor & self, const Tensor & noise, Scalar lower, Scalar upper, bool training);
|
Tensor & rrelu_with_noise_(Tensor & self, const Tensor & noise, Scalar lower, Scalar upper, bool training, Generator * generator);
|
Tensor & softplus_out(Tensor & out, const Tensor & self, Scalar beta, Scalar threshold);
|
Tensor softplus(const Tensor & self, Scalar beta, Scalar threshold);
|
Tensor & softplus_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, Scalar beta, Scalar threshold, const Tensor & output);
|
Tensor softplus_backward(const Tensor & grad_output, const Tensor & self, Scalar beta, Scalar threshold, const Tensor & output);
|
Tensor & softshrink_out(Tensor & out, const Tensor & self, Scalar lambd);
|
Tensor softshrink(const Tensor & self, Scalar lambd);
|
Tensor & softshrink_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, Scalar lambd);
|
Tensor softshrink_backward(const Tensor & grad_output, const Tensor & self, Scalar lambd);
|
Tensor & adaptive_avg_pool2d_out(Tensor & out, const Tensor & self, IntArrayRef output_size);
|
Tensor _adaptive_avg_pool2d(const Tensor & self, IntArrayRef output_size);
|
Tensor _adaptive_avg_pool2d_backward(const Tensor & grad_output, const Tensor & self);
|
Tensor & adaptive_avg_pool3d_out(Tensor & out, const Tensor & self, IntArrayRef output_size);
|
Tensor adaptive_avg_pool3d(const Tensor & self, IntArrayRef output_size);
|
Tensor & adaptive_avg_pool3d_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self);
|
Tensor adaptive_avg_pool3d_backward(const Tensor & grad_output, const Tensor & self);
|
std::tuple<Tensor &,Tensor &> adaptive_max_pool2d_out(Tensor & out, Tensor & indices, const Tensor & self, IntArrayRef output_size);
|
std::tuple<Tensor,Tensor> adaptive_max_pool2d(const Tensor & self, IntArrayRef output_size);
|
Tensor & adaptive_max_pool2d_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, const Tensor & indices);
|
Tensor adaptive_max_pool2d_backward(const Tensor & grad_output, const Tensor & self, const Tensor & indices);
|
std::tuple<Tensor &,Tensor &> adaptive_max_pool3d_out(Tensor & out, Tensor & indices, const Tensor & self, IntArrayRef output_size);
|
std::tuple<Tensor,Tensor> adaptive_max_pool3d(const Tensor & self, IntArrayRef output_size);
|
Tensor & adaptive_max_pool3d_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, const Tensor & indices);
|
Tensor adaptive_max_pool3d_backward(const Tensor & grad_output, const Tensor & self, const Tensor & indices);
|
Tensor & avg_pool2d_out(Tensor & out, const Tensor & self, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, bool ceil_mode, bool count_include_pad, c10::optional<int64_t> divisor_override);
|
Tensor avg_pool2d(const Tensor & self, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, bool ceil_mode, bool count_include_pad, c10::optional<int64_t> divisor_override);
|
Tensor & avg_pool2d_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, bool ceil_mode, bool count_include_pad, c10::optional<int64_t> divisor_override);
|
Tensor avg_pool2d_backward(const Tensor & grad_output, const Tensor & self, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, bool ceil_mode, bool count_include_pad, c10::optional<int64_t> divisor_override);
|
Tensor & avg_pool3d_out(Tensor & out, const Tensor & self, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, bool ceil_mode, bool count_include_pad, c10::optional<int64_t> divisor_override);
|
Tensor avg_pool3d(const Tensor & self, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, bool ceil_mode, bool count_include_pad, c10::optional<int64_t> divisor_override);
|
Tensor & avg_pool3d_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, bool ceil_mode, bool count_include_pad, c10::optional<int64_t> divisor_override);
|
Tensor avg_pool3d_backward(const Tensor & grad_output, const Tensor & self, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, bool ceil_mode, bool count_include_pad, c10::optional<int64_t> divisor_override);
|
std::tuple<Tensor &,Tensor &> fractional_max_pool2d_out(Tensor & output, Tensor & indices, const Tensor & self, IntArrayRef kernel_size, IntArrayRef output_size, const Tensor & random_samples);
|
std::tuple<Tensor,Tensor> fractional_max_pool2d(const Tensor & self, IntArrayRef kernel_size, IntArrayRef output_size, const Tensor & random_samples);
|
Tensor & fractional_max_pool2d_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, IntArrayRef kernel_size, IntArrayRef output_size, const Tensor & indices);
|
Tensor fractional_max_pool2d_backward(const Tensor & grad_output, const Tensor & self, IntArrayRef kernel_size, IntArrayRef output_size, const Tensor & indices);
|
std::tuple<Tensor &,Tensor &> fractional_max_pool3d_out(Tensor & output, Tensor & indices, const Tensor & self, IntArrayRef kernel_size, IntArrayRef output_size, const Tensor & random_samples);
|
std::tuple<Tensor,Tensor> fractional_max_pool3d(const Tensor & self, IntArrayRef kernel_size, IntArrayRef output_size, const Tensor & random_samples);
|
Tensor & fractional_max_pool3d_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, IntArrayRef kernel_size, IntArrayRef output_size, const Tensor & indices);
|
Tensor fractional_max_pool3d_backward(const Tensor & grad_output, const Tensor & self, IntArrayRef kernel_size, IntArrayRef output_size, const Tensor & indices);
|
std::tuple<Tensor &,Tensor &> max_pool2d_with_indices_out(Tensor & out, Tensor & indices, const Tensor & self, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, bool ceil_mode);
|
std::tuple<Tensor,Tensor> max_pool2d_with_indices(const Tensor & self, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, bool ceil_mode);
|
Tensor & max_pool2d_with_indices_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, bool ceil_mode, const Tensor & indices);
|
Tensor max_pool2d_with_indices_backward(const Tensor & grad_output, const Tensor & self, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, bool ceil_mode, const Tensor & indices);
|
std::tuple<Tensor &,Tensor &> max_pool3d_with_indices_out(Tensor & out, Tensor & indices, const Tensor & self, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, bool ceil_mode);
|
std::tuple<Tensor,Tensor> max_pool3d_with_indices(const Tensor & self, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, bool ceil_mode);
|
Tensor & max_pool3d_with_indices_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, bool ceil_mode, const Tensor & indices);
|
Tensor max_pool3d_with_indices_backward(const Tensor & grad_output, const Tensor & self, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, bool ceil_mode, const Tensor & indices);
|
Tensor & max_unpool2d_out(Tensor & out, const Tensor & self, const Tensor & indices, IntArrayRef output_size);
|
Tensor max_unpool2d(const Tensor & self, const Tensor & indices, IntArrayRef output_size);
|
Tensor & max_unpool2d_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, const Tensor & indices, IntArrayRef output_size);
|
Tensor max_unpool2d_backward(const Tensor & grad_output, const Tensor & self, const Tensor & indices, IntArrayRef output_size);
|
Tensor & max_unpool3d_out(Tensor & out, const Tensor & self, const Tensor & indices, IntArrayRef output_size, IntArrayRef stride, IntArrayRef padding);
|
Tensor max_unpool3d(const Tensor & self, const Tensor & indices, IntArrayRef output_size, IntArrayRef stride, IntArrayRef padding);
|
Tensor & max_unpool3d_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, const Tensor & indices, IntArrayRef output_size, IntArrayRef stride, IntArrayRef padding);
|
Tensor max_unpool3d_backward(const Tensor & grad_output, const Tensor & self, const Tensor & indices, IntArrayRef output_size, IntArrayRef stride, IntArrayRef padding);
|
Tensor & reflection_pad1d_out(Tensor & out, const Tensor & self, IntArrayRef padding);
|
Tensor reflection_pad1d(const Tensor & self, IntArrayRef padding);
|
Tensor & reflection_pad1d_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, IntArrayRef padding);
|
Tensor reflection_pad1d_backward(const Tensor & grad_output, const Tensor & self, IntArrayRef padding);
|
Tensor & reflection_pad2d_out(Tensor & out, const Tensor & self, IntArrayRef padding);
|
Tensor reflection_pad2d(const Tensor & self, IntArrayRef padding);
|
Tensor & reflection_pad2d_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, IntArrayRef padding);
|
Tensor reflection_pad2d_backward(const Tensor & grad_output, const Tensor & self, IntArrayRef padding);
|
Tensor & replication_pad1d_out(Tensor & out, const Tensor & self, IntArrayRef padding);
|
Tensor replication_pad1d(const Tensor & self, IntArrayRef padding);
|
Tensor & replication_pad1d_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, IntArrayRef padding);
|
Tensor replication_pad1d_backward(const Tensor & grad_output, const Tensor & self, IntArrayRef padding);
|
Tensor & replication_pad2d_out(Tensor & out, const Tensor & self, IntArrayRef padding);
|
Tensor replication_pad2d(const Tensor & self, IntArrayRef padding);
|
Tensor & replication_pad2d_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, IntArrayRef padding);
|
Tensor replication_pad2d_backward(const Tensor & grad_output, const Tensor & self, IntArrayRef padding);
|
Tensor & replication_pad3d_out(Tensor & out, const Tensor & self, IntArrayRef padding);
|
Tensor replication_pad3d(const Tensor & self, IntArrayRef padding);
|
Tensor & replication_pad3d_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, IntArrayRef padding);
|
Tensor replication_pad3d_backward(const Tensor & grad_output, const Tensor & self, IntArrayRef padding);
|
Tensor & upsample_linear1d_out(Tensor & out, const Tensor & self, IntArrayRef output_size, bool align_corners);
|
Tensor upsample_linear1d(const Tensor & self, IntArrayRef output_size, bool align_corners);
|
Tensor & upsample_linear1d_backward_out(Tensor & grad_input, const Tensor & grad_output, IntArrayRef output_size, IntArrayRef input_size, bool align_corners);
|
Tensor upsample_linear1d_backward(const Tensor & grad_output, IntArrayRef output_size, IntArrayRef input_size, bool align_corners);
|
Tensor & upsample_bilinear2d_out(Tensor & out, const Tensor & self, IntArrayRef output_size, bool align_corners);
|
Tensor upsample_bilinear2d(const Tensor & self, IntArrayRef output_size, bool align_corners);
|
Tensor & upsample_bilinear2d_backward_out(Tensor & grad_input, const Tensor & grad_output, IntArrayRef output_size, IntArrayRef input_size, bool align_corners);
|
Tensor upsample_bilinear2d_backward(const Tensor & grad_output, IntArrayRef output_size, IntArrayRef input_size, bool align_corners);
|
Tensor & upsample_bicubic2d_out(Tensor & out, const Tensor & self, IntArrayRef output_size, bool align_corners);
|
Tensor upsample_bicubic2d(const Tensor & self, IntArrayRef output_size, bool align_corners);
|
Tensor & upsample_bicubic2d_backward_out(Tensor & grad_input, const Tensor & grad_output, IntArrayRef output_size, IntArrayRef input_size, bool align_corners);
|
Tensor upsample_bicubic2d_backward(const Tensor & grad_output, IntArrayRef output_size, IntArrayRef input_size, bool align_corners);
|
Tensor & upsample_trilinear3d_out(Tensor & out, const Tensor & self, IntArrayRef output_size, bool align_corners);
|
Tensor upsample_trilinear3d(const Tensor & self, IntArrayRef output_size, bool align_corners);
|
Tensor & upsample_trilinear3d_backward_out(Tensor & grad_input, const Tensor & grad_output, IntArrayRef output_size, IntArrayRef input_size, bool align_corners);
|
Tensor upsample_trilinear3d_backward(const Tensor & grad_output, IntArrayRef output_size, IntArrayRef input_size, bool align_corners);
|
Tensor & upsample_nearest1d_out(Tensor & out, const Tensor & self, IntArrayRef output_size);
|
Tensor upsample_nearest1d(const Tensor & self, IntArrayRef output_size);
|
Tensor & upsample_nearest1d_backward_out(Tensor & grad_input, const Tensor & grad_output, IntArrayRef output_size, IntArrayRef input_size);
|
Tensor upsample_nearest1d_backward(const Tensor & grad_output, IntArrayRef output_size, IntArrayRef input_size);
|
Tensor & upsample_nearest2d_out(Tensor & out, const Tensor & self, IntArrayRef output_size);
|
Tensor upsample_nearest2d(const Tensor & self, IntArrayRef output_size);
|
Tensor & upsample_nearest2d_backward_out(Tensor & grad_input, const Tensor & grad_output, IntArrayRef output_size, IntArrayRef input_size);
|
Tensor upsample_nearest2d_backward(const Tensor & grad_output, IntArrayRef output_size, IntArrayRef input_size);
|
Tensor & upsample_nearest3d_out(Tensor & out, const Tensor & self, IntArrayRef output_size);
|
Tensor upsample_nearest3d(const Tensor & self, IntArrayRef output_size);
|
Tensor & upsample_nearest3d_backward_out(Tensor & grad_input, const Tensor & grad_output, IntArrayRef output_size, IntArrayRef input_size);
|
Tensor upsample_nearest3d_backward(const Tensor & grad_output, IntArrayRef output_size, IntArrayRef input_size);
|
Tensor & sigmoid_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & output);
|
Tensor sigmoid_backward(const Tensor & grad_output, const Tensor & output);
|
Tensor & tanh_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & output);
|
Tensor tanh_backward(const Tensor & grad_output, const Tensor & output);
|
Tensor & slow_conv_transpose2d_out(Tensor & out, const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const Tensor & bias, IntArrayRef stride, IntArrayRef padding, IntArrayRef output_padding, IntArrayRef dilation);
|
Tensor slow_conv_transpose2d(const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const Tensor & bias, IntArrayRef stride, IntArrayRef padding, IntArrayRef output_padding, IntArrayRef dilation);
|
std::tuple<Tensor &,Tensor &,Tensor &> slow_conv_transpose2d_backward_out(Tensor & grad_input, Tensor & grad_weight, Tensor & grad_bias, const Tensor & grad_output, const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef output_padding, IntArrayRef dilation, const Tensor & columns, const Tensor & ones);
|
std::tuple<Tensor,Tensor,Tensor> slow_conv_transpose2d_backward(const Tensor & grad_output, const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef output_padding, IntArrayRef dilation, const Tensor & columns, const Tensor & ones, std::array<bool,3> output_mask);
|
Tensor & slow_conv_transpose3d_out(Tensor & out, const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const Tensor & bias, IntArrayRef stride, IntArrayRef padding, IntArrayRef output_padding, IntArrayRef dilation);
|
Tensor slow_conv_transpose3d(const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const Tensor & bias, IntArrayRef stride, IntArrayRef padding, IntArrayRef output_padding, IntArrayRef dilation);
|
std::tuple<Tensor &,Tensor &,Tensor &> slow_conv_transpose3d_backward_out(Tensor & grad_input, Tensor & grad_weight, Tensor & grad_bias, const Tensor & grad_output, const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef output_padding, IntArrayRef dilation, const Tensor & finput, const Tensor & fgrad_input);
|
std::tuple<Tensor,Tensor,Tensor> slow_conv_transpose3d_backward(const Tensor & grad_output, const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef output_padding, IntArrayRef dilation, const Tensor & finput, const Tensor & fgrad_input, std::array<bool,3> output_mask);
|
std::tuple<Tensor &,Tensor &,Tensor &> thnn_conv2d_forward_out(Tensor & output, Tensor & finput, Tensor & fgrad_input, const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const Tensor & bias, IntArrayRef stride, IntArrayRef padding);
|
std::tuple<Tensor,Tensor,Tensor> thnn_conv2d_forward(const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const Tensor & bias, IntArrayRef stride, IntArrayRef padding);
|
std::tuple<Tensor &,Tensor &,Tensor &> thnn_conv2d_backward_out(Tensor & grad_input, Tensor & grad_weight, Tensor & grad_bias, const Tensor & grad_output, const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, const Tensor & finput, const Tensor & fgrad_input);
|
std::tuple<Tensor,Tensor,Tensor> thnn_conv2d_backward(const Tensor & grad_output, const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, const Tensor & finput, const Tensor & fgrad_input, std::array<bool,3> output_mask);
|
std::tuple<Tensor &,Tensor &,Tensor &> thnn_conv3d_forward_out(Tensor & output, Tensor & finput, Tensor & fgrad_input, const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const Tensor & bias, IntArrayRef stride, IntArrayRef padding);
|
std::tuple<Tensor,Tensor,Tensor> thnn_conv3d_forward(const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const Tensor & bias, IntArrayRef stride, IntArrayRef padding);
|
std::tuple<Tensor &,Tensor &,Tensor &> thnn_conv3d_backward_out(Tensor & grad_input, Tensor & grad_weight, Tensor & grad_bias, const Tensor & grad_output, const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, const Tensor & finput, const Tensor & fgrad_input);
|
std::tuple<Tensor,Tensor,Tensor> thnn_conv3d_backward(const Tensor & grad_output, const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, const Tensor & finput, const Tensor & fgrad_input, std::array<bool,3> output_mask);
|
Tensor slow_conv_dilated2d(const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const Tensor & bias, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation);
|
std::tuple<Tensor,Tensor,Tensor> slow_conv_dilated2d_backward(const Tensor & grad_output, const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, std::array<bool,3> output_mask);
|
Tensor slow_conv_dilated3d(const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const Tensor & bias, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation);
|
std::tuple<Tensor,Tensor,Tensor> slow_conv_dilated3d_backward(const Tensor & grad_output, const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, std::array<bool,3> output_mask);
|
Tensor & col2im_out(Tensor & out, const Tensor & self, IntArrayRef output_size, IntArrayRef kernel_size, IntArrayRef dilation, IntArrayRef padding, IntArrayRef stride);
|
Tensor col2im(const Tensor & self, IntArrayRef output_size, IntArrayRef kernel_size, IntArrayRef dilation, IntArrayRef padding, IntArrayRef stride);
|
Tensor & col2im_backward_out(Tensor & grad_input, const Tensor & grad_output, IntArrayRef kernel_size, IntArrayRef dilation, IntArrayRef padding, IntArrayRef stride);
|
Tensor col2im_backward(const Tensor & grad_output, IntArrayRef kernel_size, IntArrayRef dilation, IntArrayRef padding, IntArrayRef stride);
|
Tensor & im2col_out(Tensor & out, const Tensor & self, IntArrayRef kernel_size, IntArrayRef dilation, IntArrayRef padding, IntArrayRef stride);
|
Tensor im2col(const Tensor & self, IntArrayRef kernel_size, IntArrayRef dilation, IntArrayRef padding, IntArrayRef stride);
|
Tensor & im2col_backward_out(Tensor & grad_input, const Tensor & grad_output, IntArrayRef input_size, IntArrayRef kernel_size, IntArrayRef dilation, IntArrayRef padding, IntArrayRef stride);
|
Tensor im2col_backward(const Tensor & grad_output, IntArrayRef input_size, IntArrayRef kernel_size, IntArrayRef dilation, IntArrayRef padding, IntArrayRef stride);
|
}
|
#endif
|
|
} // namespace at
|