#pragma once // @generated by aten/src/ATen/gen.py #include #include #include #include #include #include #include #include #include #include #include #include #include namespace c10 { struct Storage; } namespace at { class Tensor; using TensorList = ArrayRef; class Context; struct Generator; struct Quantizer; // This is temporary typedef to enable Quantizer in aten native function API // we'll remove them when we are actually exposing Quantizer class // to frontend using ConstQuantizerPtr = const c10::intrusive_ptr&; #ifdef USE_STATIC_DISPATCH namespace CUDAType { std::tuple _cudnn_ctc_loss(const Tensor & log_probs, const Tensor & targets, IntArrayRef input_lengths, IntArrayRef target_lengths, int64_t blank, bool deterministic, bool zero_infinity); Tensor _cudnn_rnn_flatten_weight(TensorList weight_arr, int64_t weight_stride0, int64_t input_size, int64_t mode, int64_t hidden_size, int64_t num_layers, bool batch_first, bool bidirectional); std::tuple _cudnn_rnn(const Tensor & input, TensorList weight, int64_t weight_stride0, const Tensor & weight_buf, const Tensor & hx, const Tensor & cx, int64_t mode, int64_t hidden_size, int64_t num_layers, bool batch_first, double dropout, bool train, bool bidirectional, IntArrayRef batch_sizes, const Tensor & dropout_state); std::tuple> _cudnn_rnn_backward(const Tensor & input, TensorList weight, int64_t weight_stride0, const Tensor & weight_buf, const Tensor & hx, const Tensor & cx, const Tensor & output, const Tensor & grad_output, const Tensor & grad_hy, const Tensor & grad_cy, int64_t mode, int64_t hidden_size, int64_t num_layers, bool batch_first, double dropout, bool train, bool bidirectional, IntArrayRef batch_sizes, const Tensor & dropout_state, const Tensor & reserve, std::array output_mask); Tensor _cudnn_init_dropout_state(double dropout, bool train, int64_t dropout_seed, const TensorOptions & options); std::tuple _fused_dropout(const Tensor & self, double p, Generator * generator); Tensor _masked_scale(const Tensor & self, const Tensor & mask, double scale); Tensor & abs_(Tensor & self); Tensor & abs_out(Tensor & out, const Tensor & self); Tensor & acos_(Tensor & self); Tensor & acos_out(Tensor & out, const Tensor & self); Tensor add(const Tensor & self, const Tensor & other, Scalar alpha); Tensor & add_(Tensor & self, const Tensor & other, Scalar alpha); Tensor & add_out(Tensor & out, const Tensor & self, const Tensor & other, Scalar alpha); Tensor addmv(const Tensor & self, const Tensor & mat, const Tensor & vec, Scalar beta, Scalar alpha); Tensor & addmv_(Tensor & self, const Tensor & mat, const Tensor & vec, Scalar beta, Scalar alpha); Tensor & addmv_out(Tensor & out, const Tensor & self, const Tensor & mat, const Tensor & vec, Scalar beta, Scalar alpha); Tensor & arange_out(Tensor & out, Scalar start, Scalar end, Scalar step); Tensor as_strided(const Tensor & self, IntArrayRef size, IntArrayRef stride, c10::optional storage_offset); Tensor & asin_(Tensor & self); Tensor & asin_out(Tensor & out, const Tensor & self); Tensor & atan_(Tensor & self); Tensor & atan_out(Tensor & out, const Tensor & self); Tensor baddbmm(const Tensor & self, const Tensor & batch1, const Tensor & batch2, Scalar beta, Scalar alpha); Tensor & baddbmm_(Tensor & self, const Tensor & batch1, const Tensor & batch2, Scalar beta, Scalar alpha); Tensor & baddbmm_out(Tensor & out, const Tensor & self, const Tensor & batch1, const Tensor & batch2, Scalar beta, Scalar alpha); Tensor & bernoulli_(Tensor & self, const Tensor & p, Generator * generator); Tensor & bernoulli_(Tensor & self, double p, Generator * generator); Tensor bincount(const Tensor & self, const Tensor & weights, int64_t minlength); Tensor & bitwise_not_out(Tensor & out, const Tensor & self); Tensor & logical_not_out(Tensor & out, const Tensor & self); Tensor & logical_xor_out(Tensor & out, const Tensor & self, const Tensor & other); Tensor bmm(const Tensor & self, const Tensor & mat2); Tensor & bmm_out(Tensor & out, const Tensor & self, const Tensor & mat2); Tensor & ceil_out(Tensor & out, const Tensor & self); Tensor & clamp_(Tensor & self, c10::optional min, c10::optional max); Tensor & clamp_out(Tensor & out, const Tensor & self, c10::optional min, c10::optional max); Tensor & clamp_max_(Tensor & self, Scalar max); Tensor & clamp_max_out(Tensor & out, const Tensor & self, Scalar max); Tensor & clamp_min_(Tensor & self, Scalar min); Tensor & clamp_min_out(Tensor & out, const Tensor & self, Scalar min); Tensor & cos_(Tensor & self); Tensor & cos_out(Tensor & out, const Tensor & self); Tensor & cosh_(Tensor & self); Tensor & cosh_out(Tensor & out, const Tensor & self); Tensor cudnn_affine_grid_generator(const Tensor & theta, int64_t N, int64_t C, int64_t H, int64_t W); Tensor cudnn_affine_grid_generator_backward(const Tensor & grad, int64_t N, int64_t C, int64_t H, int64_t W); std::tuple cudnn_batch_norm(const Tensor & input, const Tensor & weight, const Tensor & bias, const Tensor & running_mean, const Tensor & running_var, bool training, double exponential_average_factor, double epsilon); std::tuple cudnn_batch_norm_backward(const Tensor & input, const Tensor & grad_output, const Tensor & weight, const Tensor & running_mean, const Tensor & running_var, const Tensor & save_mean, const Tensor & save_var, double epsilon); Tensor cudnn_convolution(const Tensor & self, const Tensor & weight, const Tensor & bias, IntArrayRef padding, IntArrayRef stride, IntArrayRef dilation, int64_t groups, bool benchmark, bool deterministic); Tensor cudnn_convolution_backward_input(IntArrayRef self_size, const Tensor & grad_output, const Tensor & weight, IntArrayRef padding, IntArrayRef stride, IntArrayRef dilation, int64_t groups, bool benchmark, bool deterministic); std::tuple cudnn_convolution_backward(const Tensor & self, const Tensor & grad_output, const Tensor & weight, IntArrayRef padding, IntArrayRef stride, IntArrayRef dilation, int64_t groups, bool benchmark, bool deterministic, std::array output_mask); Tensor cudnn_convolution_backward_bias(const Tensor & grad_output); Tensor cudnn_convolution_backward_weight(IntArrayRef weight_size, const Tensor & grad_output, const Tensor & self, IntArrayRef padding, IntArrayRef stride, IntArrayRef dilation, int64_t groups, bool benchmark, bool deterministic); Tensor cudnn_convolution_transpose(const Tensor & self, const Tensor & weight, const Tensor & bias, IntArrayRef padding, IntArrayRef output_padding, IntArrayRef stride, IntArrayRef dilation, int64_t groups, bool benchmark, bool deterministic); std::tuple cudnn_convolution_transpose_backward(const Tensor & self, const Tensor & grad_output, const Tensor & weight, IntArrayRef padding, IntArrayRef output_padding, IntArrayRef stride, IntArrayRef dilation, int64_t groups, bool benchmark, bool deterministic, std::array output_mask); Tensor cudnn_convolution_transpose_backward_bias(const Tensor & grad_output); Tensor cudnn_convolution_transpose_backward_input(const Tensor & grad_output, const Tensor & weight, IntArrayRef padding, IntArrayRef stride, IntArrayRef dilation, int64_t groups, bool benchmark, bool deterministic); Tensor cudnn_convolution_transpose_backward_weight(IntArrayRef weight_size, const Tensor & grad_output, const Tensor & self, IntArrayRef padding, IntArrayRef stride, IntArrayRef dilation, int64_t groups, bool benchmark, bool deterministic); Tensor cudnn_grid_sampler(const Tensor & self, const Tensor & grid); std::tuple cudnn_grid_sampler_backward(const Tensor & self, const Tensor & grid, const Tensor & grad_output); std::tuple _ctc_loss(const Tensor & log_probs, const Tensor & targets, IntArrayRef input_lengths, IntArrayRef target_lengths, int64_t blank, bool zero_infinity); Tensor _ctc_loss_backward(const Tensor & grad, const Tensor & log_probs, const Tensor & targets, IntArrayRef input_lengths, IntArrayRef target_lengths, const Tensor & neg_log_likelihood, const Tensor & log_alpha, int64_t blank, bool zero_infinity); Tensor div(const Tensor & self, const Tensor & other); Tensor & div_(Tensor & self, const Tensor & other); Tensor & div_out(Tensor & out, const Tensor & self, const Tensor & other); Tensor dot(const Tensor & self, const Tensor & tensor); Tensor embedding_dense_backward(const Tensor & grad_output, const Tensor & indices, int64_t num_weights, int64_t padding_idx, bool scale_grad_by_freq); Tensor & embedding_renorm_(Tensor & self, const Tensor & indices, double max_norm, double norm_type); std::tuple _embedding_bag(const Tensor & weight, const Tensor & indices, const Tensor & offsets, bool scale_grad_by_freq, int64_t mode, bool sparse, const Tensor & per_sample_weights); Tensor _embedding_bag_dense_backward(const Tensor & grad, const Tensor & indices, const Tensor & offsets, const Tensor & offset2bag, const Tensor & bag_size, const Tensor & maximum_indices, int64_t num_weights, bool scale_grad_by_freq, int64_t mode, const Tensor & per_sample_weights); Tensor _embedding_bag_per_sample_weights_backward(const Tensor & grad, const Tensor & weight, const Tensor & indices, const Tensor & offsets, const Tensor & offset2bag, int64_t mode); Tensor empty(IntArrayRef size, const TensorOptions & options, c10::optional memory_format); Tensor & resize_(Tensor & self, IntArrayRef size); Tensor empty_strided(IntArrayRef size, IntArrayRef stride, const TensorOptions & options); Tensor & erf_(Tensor & self); Tensor & erf_out(Tensor & out, const Tensor & self); Tensor & erfc_(Tensor & self); Tensor & erfc_out(Tensor & out, const Tensor & self); Tensor & exp_(Tensor & self); Tensor & exp_out(Tensor & out, const Tensor & self); Tensor & expm1_out(Tensor & out, const Tensor & self); Tensor & eye_out(Tensor & out, int64_t n); Tensor & eye_out(Tensor & out, int64_t n, int64_t m); Tensor & floor_out(Tensor & out, const Tensor & self); Tensor & frac_(Tensor & self); Tensor & frac_out(Tensor & out, const Tensor & self); Tensor grid_sampler_2d(const Tensor & input, const Tensor & grid, int64_t interpolation_mode, int64_t padding_mode, bool align_corners); std::tuple grid_sampler_2d_backward(const Tensor & grad_output, const Tensor & input, const Tensor & grid, int64_t interpolation_mode, int64_t padding_mode, bool align_corners); Tensor grid_sampler_3d(const Tensor & input, const Tensor & grid, int64_t interpolation_mode, int64_t padding_mode, bool align_corners); std::tuple grid_sampler_3d_backward(const Tensor & grad_output, const Tensor & input, const Tensor & grid, int64_t interpolation_mode, int64_t padding_mode, bool align_corners); Tensor ger(const Tensor & self, const Tensor & vec2); Tensor & ger_out(Tensor & out, const Tensor & self, const Tensor & vec2); Tensor _fft_with_size(const Tensor & self, int64_t signal_ndim, bool complex_input, bool complex_output, bool inverse, IntArrayRef checked_signal_sizes, bool normalized, bool onesided, IntArrayRef output_sizes); Tensor _inverse_helper(const Tensor & self); Tensor kl_div_backward(const Tensor & grad_output, const Tensor & self, const Tensor & target, int64_t reduction); std::tuple kthvalue_out(Tensor & values, Tensor & indices, const Tensor & self, int64_t k, int64_t dim, bool keepdim); Tensor & linspace_out(Tensor & out, Scalar start, Scalar end, int64_t steps); Tensor & log_out(Tensor & out, const Tensor & self); Tensor & log10_(Tensor & self); Tensor & log10_out(Tensor & out, const Tensor & self); Tensor & log1p_(Tensor & self); Tensor & log1p_out(Tensor & out, const Tensor & self); Tensor & log2_(Tensor & self); Tensor & log2_out(Tensor & out, const Tensor & self); Tensor & logspace_out(Tensor & out, Scalar start, Scalar end, int64_t steps, double base); Tensor _log_softmax(const Tensor & self, int64_t dim, bool half_to_float); Tensor _log_softmax_backward_data(const Tensor & grad_output, const Tensor & output, int64_t dim, const Tensor & self); Tensor mean(const Tensor & self, c10::optional dtype); Tensor mean(const Tensor & self, IntArrayRef dim, bool keepdim, c10::optional dtype); Tensor & mean_out(Tensor & out, const Tensor & self, IntArrayRef dim, bool keepdim, c10::optional dtype); Tensor mean(const Tensor & self, DimnameList dim, bool keepdim, c10::optional dtype); Tensor & mean_out(Tensor & out, const Tensor & self, DimnameList dim, bool keepdim, c10::optional dtype); std::tuple miopen_batch_norm(const Tensor & input, const Tensor & weight, const Tensor & bias, const Tensor & running_mean, const Tensor & running_var, bool training, double exponential_average_factor, double epsilon); std::tuple miopen_batch_norm_backward(const Tensor & input, const Tensor & grad_output, const Tensor & weight, const Tensor & running_mean, const Tensor & running_var, const Tensor & save_mean, const Tensor & save_var, double epsilon); Tensor miopen_convolution(const Tensor & self, const Tensor & weight, const Tensor & bias, IntArrayRef padding, IntArrayRef stride, IntArrayRef dilation, int64_t groups, bool benchmark, bool deterministic); Tensor miopen_convolution_backward_input(IntArrayRef self_size, const Tensor & grad_output, const Tensor & weight, IntArrayRef padding, IntArrayRef stride, IntArrayRef dilation, int64_t groups, bool benchmark, bool deterministic); std::tuple miopen_convolution_backward(const Tensor & self, const Tensor & grad_output, const Tensor & weight, IntArrayRef padding, IntArrayRef stride, IntArrayRef dilation, int64_t groups, bool benchmark, bool deterministic, std::array output_mask); Tensor miopen_convolution_backward_bias(const Tensor & grad_output); Tensor miopen_convolution_backward_weight(IntArrayRef weight_size, const Tensor & grad_output, const Tensor & self, IntArrayRef padding, IntArrayRef stride, IntArrayRef dilation, int64_t groups, bool benchmark, bool deterministic); Tensor miopen_convolution_transpose(const Tensor & self, const Tensor & weight, const Tensor & bias, IntArrayRef padding, IntArrayRef output_padding, IntArrayRef stride, IntArrayRef dilation, int64_t groups, bool benchmark, bool deterministic); std::tuple miopen_convolution_transpose_backward(const Tensor & self, const Tensor & grad_output, const Tensor & weight, IntArrayRef padding, IntArrayRef output_padding, IntArrayRef stride, IntArrayRef dilation, int64_t groups, bool benchmark, bool deterministic, std::array output_mask); Tensor miopen_convolution_transpose_backward_input(const Tensor & grad_output, const Tensor & weight, IntArrayRef padding, IntArrayRef stride, IntArrayRef dilation, int64_t groups, bool benchmark, bool deterministic); Tensor miopen_convolution_transpose_backward_weight(IntArrayRef weight_size, const Tensor & grad_output, const Tensor & self, IntArrayRef padding, IntArrayRef stride, IntArrayRef dilation, int64_t groups, bool benchmark, bool deterministic); Tensor miopen_depthwise_convolution(const Tensor & self, const Tensor & weight, const Tensor & bias, IntArrayRef padding, IntArrayRef stride, IntArrayRef dilation, int64_t groups, bool benchmark, bool deterministic); Tensor miopen_depthwise_convolution_backward_input(IntArrayRef self_size, const Tensor & grad_output, const Tensor & weight, IntArrayRef padding, IntArrayRef stride, IntArrayRef dilation, int64_t groups, bool benchmark, bool deterministic); std::tuple miopen_depthwise_convolution_backward(const Tensor & self, const Tensor & grad_output, const Tensor & weight, IntArrayRef padding, IntArrayRef stride, IntArrayRef dilation, int64_t groups, bool benchmark, bool deterministic, std::array output_mask); Tensor miopen_depthwise_convolution_backward_weight(IntArrayRef weight_size, const Tensor & grad_output, const Tensor & self, IntArrayRef padding, IntArrayRef stride, IntArrayRef dilation, int64_t groups, bool benchmark, bool deterministic); std::tuple miopen_rnn(const Tensor & input, TensorList weight, int64_t weight_stride0, const Tensor & hx, const Tensor & cx, int64_t mode, int64_t hidden_size, int64_t num_layers, bool batch_first, double dropout, bool train, bool bidirectional, IntArrayRef batch_sizes, const Tensor & dropout_state); std::tuple> miopen_rnn_backward(const Tensor & input, TensorList weight, int64_t weight_stride0, const Tensor & weight_buf, const Tensor & hx, const Tensor & cx, const Tensor & output, const Tensor & grad_output, const Tensor & grad_hy, const Tensor & grad_cy, int64_t mode, int64_t hidden_size, int64_t num_layers, bool batch_first, double dropout, bool train, bool bidirectional, IntArrayRef batch_sizes, const Tensor & dropout_state, const Tensor & reserve, std::array output_mask); Tensor mm(const Tensor & self, const Tensor & mat2); Tensor & mm_out(Tensor & out, const Tensor & self, const Tensor & mat2); Tensor mul(const Tensor & self, const Tensor & other); Tensor & mul_(Tensor & self, const Tensor & other); Tensor & mul_out(Tensor & out, const Tensor & self, const Tensor & other); Tensor mv(const Tensor & self, const Tensor & vec); Tensor & mv_out(Tensor & out, const Tensor & self, const Tensor & vec); Tensor narrow_copy(const Tensor & self, int64_t dim, int64_t start, int64_t length); std::tuple native_batch_norm(const Tensor & input, const Tensor & weight, const Tensor & bias, const Tensor & running_mean, const Tensor & running_var, bool training, double momentum, double eps); std::tuple batch_norm_stats(const Tensor & input, double eps); Tensor batch_norm_elemt(const Tensor & input, const Tensor & weight, const Tensor & bias, const Tensor & mean, const Tensor & invstd, double eps); std::tuple batch_norm_gather_stats(const Tensor & input, const Tensor & mean, const Tensor & invstd, const Tensor & running_mean, const Tensor & running_var, double momentum, double eps, int64_t count); std::tuple batch_norm_gather_stats_with_counts(const Tensor & input, const Tensor & mean, const Tensor & invstd, const Tensor & running_mean, const Tensor & running_var, double momentum, double eps, IntArrayRef counts); std::tuple native_batch_norm_backward(const Tensor & grad_out, const Tensor & input, const Tensor & weight, const Tensor & running_mean, const Tensor & running_var, const Tensor & save_mean, const Tensor & save_invstd, bool train, double eps, std::array output_mask); std::tuple batch_norm_backward_reduce(const Tensor & grad_out, const Tensor & input, const Tensor & mean, const Tensor & invstd, const Tensor & weight, bool input_g, bool weight_g, bool bias_g); Tensor batch_norm_backward_elemt(const Tensor & grad_out, const Tensor & input, const Tensor & mean, const Tensor & invstd, const Tensor & weight, const Tensor & mean_dy, const Tensor & mean_dy_xmu); std::tuple batch_norm_update_stats(const Tensor & input, const Tensor & running_mean, const Tensor & running_var, double momentum); Tensor & randperm_out(Tensor & out, int64_t n, Generator * generator); Tensor & range_out(Tensor & out, Scalar start, Scalar end, Scalar step); Tensor & reciprocal_(Tensor & self); Tensor & reciprocal_out(Tensor & out, const Tensor & self); Tensor & neg_out(Tensor & out, const Tensor & self); Tensor repeat_interleave(const Tensor & repeats); Tensor & round_out(Tensor & out, const Tensor & self); Tensor relu(const Tensor & self); Tensor & relu_(Tensor & self); Tensor prelu(const Tensor & self, const Tensor & weight); std::tuple prelu_backward(const Tensor & grad_output, const Tensor & self, const Tensor & weight); Tensor gelu(const Tensor & self); Tensor gelu_backward(const Tensor & grad, const Tensor & self); Tensor hardshrink(const Tensor & self, Scalar lambd); Tensor hardshrink_backward(const Tensor & grad_out, const Tensor & self, Scalar lambd); Tensor & rsqrt_out(Tensor & out, const Tensor & self); Tensor sigmoid(const Tensor & self); Tensor & sigmoid_(Tensor & self); Tensor & sigmoid_out(Tensor & out, const Tensor & self); Tensor & sin_(Tensor & self); Tensor & sin_out(Tensor & out, const Tensor & self); Tensor & sinh_(Tensor & self); Tensor & sinh_out(Tensor & out, const Tensor & self); Tensor _softmax(const Tensor & self, int64_t dim, bool half_to_float); Tensor _softmax_backward_data(const Tensor & grad_output, const Tensor & output, int64_t dim, const Tensor & self); Tensor & sspaddmm_out(Tensor & out, const Tensor & self, const Tensor & mat1, const Tensor & mat2, Scalar beta, Scalar alpha); Tensor & sqrt_(Tensor & self); Tensor & sqrt_out(Tensor & out, const Tensor & self); Tensor & tan_(Tensor & self); Tensor & tan_out(Tensor & out, const Tensor & self); Tensor & tanh_(Tensor & self); Tensor & tanh_out(Tensor & out, const Tensor & self); Tensor flip(const Tensor & self, IntArrayRef dims); Tensor roll(const Tensor & self, IntArrayRef shifts, IntArrayRef dims); Tensor & trunc_out(Tensor & out, const Tensor & self); std::tuple _unique(const Tensor & self, bool sorted, bool return_inverse); std::tuple unique_dim(const Tensor & self, int64_t dim, bool sorted, bool return_inverse, bool return_counts); std::tuple unique_consecutive(const Tensor & self, bool return_inverse, bool return_counts, c10::optional dim); std::tuple unique_dim_consecutive(const Tensor & self, int64_t dim, bool return_inverse, bool return_counts); std::tuple _unique2(const Tensor & self, bool sorted, bool return_inverse, bool return_counts); Tensor _s_where(const Tensor & condition, const Tensor & self, const Tensor & other); std::tuple _weight_norm_cuda_interface(const Tensor & v, const Tensor & g, int64_t dim); std::tuple _weight_norm_cuda_interface_backward(const Tensor & grad_w, const Tensor & saved_v, const Tensor & saved_g, const Tensor & saved_norms, int64_t dim); Tensor _standard_gamma_grad(const Tensor & self, const Tensor & output); Tensor _standard_gamma(const Tensor & self, Generator * generator); Tensor _dirichlet_grad(const Tensor & x, const Tensor & alpha, const Tensor & total); Tensor _sample_dirichlet(const Tensor & self, Generator * generator); Tensor poisson(const Tensor & self, Generator * generator); Tensor clone(const Tensor & self); Tensor & pow_out(Tensor & out, const Tensor & self, Scalar exponent); Tensor pow(const Tensor & self, Scalar exponent); Tensor & zero_(Tensor & self); Tensor & sub_out(Tensor & out, const Tensor & self, const Tensor & other, Scalar alpha); Tensor sub(const Tensor & self, const Tensor & other, Scalar alpha); Tensor & sub_(Tensor & self, const Tensor & other, Scalar alpha); Tensor & addmm_out(Tensor & out, const Tensor & self, const Tensor & mat1, const Tensor & mat2, Scalar beta, Scalar alpha); Tensor addmm(const Tensor & self, const Tensor & mat1, const Tensor & mat2, Scalar beta, Scalar alpha); Tensor & addmm_(Tensor & self, const Tensor & mat1, const Tensor & mat2, Scalar beta, Scalar alpha); Tensor to_sparse(const Tensor & self, int64_t sparse_dim); Tensor to_sparse(const Tensor & self); Tensor fake_quantize_per_tensor_affine(const Tensor & self, double scale, int64_t zero_point, int64_t quant_min, int64_t quant_max); Tensor fake_quantize_per_tensor_affine_backward(const Tensor & grad, const Tensor & self, double scale, int64_t zero_point, int64_t quant_min, int64_t quant_max); Tensor fake_quantize_per_channel_affine(const Tensor & self, const Tensor & scale, const Tensor & zero_point, int64_t axis, int64_t quant_min, int64_t quant_max); Tensor fake_quantize_per_channel_affine_backward(const Tensor & grad, const Tensor & self, const Tensor & scale, const Tensor & zero_point, int64_t axis, int64_t quant_min, int64_t quant_max); Scalar _local_scalar_dense(const Tensor & self); std::tuple _thnn_fused_lstm_cell(const Tensor & input_gates, const Tensor & hidden_gates, const Tensor & cx, const Tensor & input_bias, const Tensor & hidden_bias); std::tuple _thnn_fused_lstm_cell_backward(const Tensor & grad_hy, const Tensor & grad_cy, const Tensor & cx, const Tensor & cy, const Tensor & workspace, bool has_bias); std::tuple _thnn_fused_gru_cell(const Tensor & input_gates, const Tensor & hidden_gates, const Tensor & hx, const Tensor & input_bias, const Tensor & hidden_bias); std::tuple _thnn_fused_gru_cell_backward(const Tensor & grad_hy, const Tensor & workspace, bool has_bias); Tensor & set_(Tensor & self, Storage source); Tensor & set_(Tensor & self, Storage source, int64_t storage_offset, IntArrayRef size, IntArrayRef stride); Tensor & set_(Tensor & self, const Tensor & source); Tensor & set_(Tensor & self); bool is_set_to(const Tensor & self, const Tensor & tensor); Tensor & masked_fill_(Tensor & self, const Tensor & mask, Scalar value); Tensor & masked_fill_(Tensor & self, const Tensor & mask, const Tensor & value); Tensor & masked_scatter_(Tensor & self, const Tensor & mask, const Tensor & source); Tensor view(const Tensor & self, IntArrayRef size); Tensor & put_(Tensor & self, const Tensor & index, const Tensor & source, bool accumulate); Tensor & index_add_(Tensor & self, int64_t dim, const Tensor & index, const Tensor & source); Tensor & index_fill_(Tensor & self, int64_t dim, const Tensor & index, Scalar value); Tensor & index_fill_(Tensor & self, int64_t dim, const Tensor & index, const Tensor & value); Tensor & scatter_(Tensor & self, int64_t dim, const Tensor & index, const Tensor & src); Tensor & scatter_(Tensor & self, int64_t dim, const Tensor & index, Scalar value); Tensor & scatter_add_(Tensor & self, int64_t dim, const Tensor & index, const Tensor & src); Tensor __and__(const Tensor & self, Scalar other); Tensor __and__(const Tensor & self, const Tensor & other); Tensor & __iand__(Tensor & self, Scalar other); Tensor & __iand__(Tensor & self, const Tensor & other); Tensor __or__(const Tensor & self, Scalar other); Tensor __or__(const Tensor & self, const Tensor & other); Tensor & __ior__(Tensor & self, Scalar other); Tensor & __ior__(Tensor & self, const Tensor & other); Tensor __xor__(const Tensor & self, Scalar other); Tensor __xor__(const Tensor & self, const Tensor & other); Tensor & __ixor__(Tensor & self, Scalar other); Tensor & __ixor__(Tensor & self, const Tensor & other); Tensor __lshift__(const Tensor & self, Scalar other); Tensor __lshift__(const Tensor & self, const Tensor & other); Tensor & __ilshift__(Tensor & self, Scalar other); Tensor & __ilshift__(Tensor & self, const Tensor & other); Tensor __rshift__(const Tensor & self, Scalar other); Tensor __rshift__(const Tensor & self, const Tensor & other); Tensor & __irshift__(Tensor & self, Scalar other); Tensor & __irshift__(Tensor & self, const Tensor & other); Tensor & lgamma_(Tensor & self); Tensor & tril_(Tensor & self, int64_t diagonal); Tensor & triu_(Tensor & self, int64_t diagonal); Tensor & renorm_(Tensor & self, Scalar p, int64_t dim, Scalar maxnorm); Tensor & pow_(Tensor & self, Scalar exponent); Tensor & pow_(Tensor & self, const Tensor & exponent); Tensor & lerp_(Tensor & self, const Tensor & end, Scalar weight); Tensor & lerp_(Tensor & self, const Tensor & end, const Tensor & weight); Tensor & fmod_(Tensor & self, Scalar other); Tensor & fmod_(Tensor & self, const Tensor & other); Tensor & remainder_(Tensor & self, Scalar other); Tensor & remainder_(Tensor & self, const Tensor & other); Tensor & addbmm_(Tensor & self, const Tensor & batch1, const Tensor & batch2, Scalar beta, Scalar alpha); Tensor & addbmm_out(Tensor & out, const Tensor & self, const Tensor & batch1, const Tensor & batch2, Scalar beta, Scalar alpha); Tensor addbmm(const Tensor & self, const Tensor & batch1, const Tensor & batch2, Scalar beta, Scalar alpha); Tensor & random_(Tensor & self, int64_t from, int64_t to, Generator * generator); Tensor & random_(Tensor & self, int64_t to, Generator * generator); Tensor & random_(Tensor & self, Generator * generator); Tensor & uniform_(Tensor & self, double from, double to, Generator * generator); Tensor & normal_(Tensor & self, double mean, double std, Generator * generator); Tensor & cauchy_(Tensor & self, double median, double sigma, Generator * generator); Tensor & log_normal_(Tensor & self, double mean, double std, Generator * generator); Tensor & exponential_(Tensor & self, double lambd, Generator * generator); Tensor & geometric_(Tensor & self, double p, Generator * generator); Tensor & diag_out(Tensor & out, const Tensor & self, int64_t diagonal); Tensor diag(const Tensor & self, int64_t diagonal); Tensor & triu_out(Tensor & out, const Tensor & self, int64_t diagonal); Tensor & tril_out(Tensor & out, const Tensor & self, int64_t diagonal); Tensor tril_indices(int64_t row, int64_t col, int64_t offset, const TensorOptions & options); Tensor triu_indices(int64_t row, int64_t col, int64_t offset, const TensorOptions & options); Tensor trace(const Tensor & self); Tensor & ne_out(Tensor & out, const Tensor & self, Scalar other); Tensor ne(const Tensor & self, Scalar other); Tensor & ne_out(Tensor & out, const Tensor & self, const Tensor & other); Tensor ne(const Tensor & self, const Tensor & other); Tensor & eq_out(Tensor & out, const Tensor & self, Scalar other); Tensor eq(const Tensor & self, Scalar other); Tensor & eq_out(Tensor & out, const Tensor & self, const Tensor & other); Tensor eq(const Tensor & self, const Tensor & other); Tensor & ge_out(Tensor & out, const Tensor & self, Scalar other); Tensor ge(const Tensor & self, Scalar other); Tensor & ge_out(Tensor & out, const Tensor & self, const Tensor & other); Tensor ge(const Tensor & self, const Tensor & other); Tensor & le_out(Tensor & out, const Tensor & self, Scalar other); Tensor le(const Tensor & self, Scalar other); Tensor & le_out(Tensor & out, const Tensor & self, const Tensor & other); Tensor le(const Tensor & self, const Tensor & other); Tensor & gt_out(Tensor & out, const Tensor & self, Scalar other); Tensor gt(const Tensor & self, Scalar other); Tensor & gt_out(Tensor & out, const Tensor & self, const Tensor & other); Tensor gt(const Tensor & self, const Tensor & other); Tensor & lt_out(Tensor & out, const Tensor & self, Scalar other); Tensor lt(const Tensor & self, Scalar other); Tensor & lt_out(Tensor & out, const Tensor & self, const Tensor & other); Tensor lt(const Tensor & self, const Tensor & other); Tensor & take_out(Tensor & out, const Tensor & self, const Tensor & index); Tensor take(const Tensor & self, const Tensor & index); Tensor & index_select_out(Tensor & out, const Tensor & self, int64_t dim, const Tensor & index); Tensor index_select(const Tensor & self, int64_t dim, const Tensor & index); Tensor & masked_select_out(Tensor & out, const Tensor & self, const Tensor & mask); Tensor masked_select(const Tensor & self, const Tensor & mask); Tensor & nonzero_out(Tensor & out, const Tensor & self); Tensor nonzero(const Tensor & self); Tensor & gather_out(Tensor & out, const Tensor & self, int64_t dim, const Tensor & index, bool sparse_grad); Tensor gather(const Tensor & self, int64_t dim, const Tensor & index, bool sparse_grad); std::tuple lstsq_out(Tensor & X, Tensor & qr, const Tensor & self, const Tensor & A); std::tuple lstsq(const Tensor & self, const Tensor & A); std::tuple _triangular_solve_helper(const Tensor & self, const Tensor & A, bool upper, bool transpose, bool unitriangular); std::tuple _symeig_helper(const Tensor & self, bool eigenvectors, bool upper); std::tuple eig_out(Tensor & e, Tensor & v, const Tensor & self, bool eigenvectors); std::tuple eig(const Tensor & self, bool eigenvectors); std::tuple _svd_helper(const Tensor & self, bool some, bool compute_uv); Tensor _cholesky_helper(const Tensor & self, bool upper); Tensor _cholesky_solve_helper(const Tensor & self, const Tensor & A, bool upper); std::tuple _solve_helper(const Tensor & self, const Tensor & A); Tensor & cholesky_inverse_out(Tensor & out, const Tensor & self, bool upper); Tensor cholesky_inverse(const Tensor & self, bool upper); std::tuple _qr_helper(const Tensor & self, bool some); std::tuple geqrf_out(Tensor & a, Tensor & tau, const Tensor & self); std::tuple geqrf(const Tensor & self); std::tuple _lu_with_info(const Tensor & self, bool pivot, bool check_errors); Tensor _lu_solve_helper(const Tensor & self, const Tensor & LU_data, const Tensor & LU_pivots); Tensor & multinomial_out(Tensor & out, const Tensor & self, int64_t num_samples, bool replacement, Generator * generator); Tensor multinomial(const Tensor & self, int64_t num_samples, bool replacement, Generator * generator); std::tuple _multinomial_alias_setup(const Tensor & probs); Tensor _multinomial_alias_draw(const Tensor & J, const Tensor & q, int64_t num_samples, Generator * generator); Tensor & lgamma_out(Tensor & out, const Tensor & self); Tensor lgamma(const Tensor & self); Tensor erfinv(const Tensor & self); Tensor & erfinv_(Tensor & self); Tensor & erfinv_out(Tensor & out, const Tensor & self); Tensor & sign_out(Tensor & out, const Tensor & self); Tensor dist(const Tensor & self, const Tensor & other, Scalar p); Tensor & lerp_out(Tensor & out, const Tensor & self, const Tensor & end, Scalar weight); Tensor & lerp_out(Tensor & out, const Tensor & self, const Tensor & end, const Tensor & weight); Tensor lerp(const Tensor & self, const Tensor & end, Scalar weight); Tensor lerp(const Tensor & self, const Tensor & end, const Tensor & weight); Tensor & histc_out(Tensor & out, const Tensor & self, int64_t bins, Scalar min, Scalar max); Tensor histc(const Tensor & self, int64_t bins, Scalar min, Scalar max); Tensor & fmod_out(Tensor & out, const Tensor & self, Scalar other); Tensor fmod(const Tensor & self, Scalar other); Tensor & fmod_out(Tensor & out, const Tensor & self, const Tensor & other); Tensor fmod(const Tensor & self, const Tensor & other); Tensor & remainder_out(Tensor & out, const Tensor & self, Scalar other); Tensor remainder(const Tensor & self, Scalar other); Tensor & remainder_out(Tensor & out, const Tensor & self, const Tensor & other); Tensor remainder(const Tensor & self, const Tensor & other); Tensor & min_out(Tensor & out, const Tensor & self, const Tensor & other); Tensor min(const Tensor & self, const Tensor & other); Tensor min(const Tensor & self); Tensor & max_out(Tensor & out, const Tensor & self, const Tensor & other); Tensor max(const Tensor & self, const Tensor & other); Tensor max(const Tensor & self); Tensor median(const Tensor & self); std::tuple sort_out(Tensor & values, Tensor & indices, const Tensor & self, int64_t dim, bool descending); std::tuple sort(const Tensor & self, int64_t dim, bool descending); std::tuple topk_out(Tensor & values, Tensor & indices, const Tensor & self, int64_t k, int64_t dim, bool largest, bool sorted); std::tuple topk(const Tensor & self, int64_t k, int64_t dim, bool largest, bool sorted); Tensor & renorm_out(Tensor & out, const Tensor & self, Scalar p, int64_t dim, Scalar maxnorm); Tensor renorm(const Tensor & self, Scalar p, int64_t dim, Scalar maxnorm); Tensor unfold(const Tensor & self, int64_t dimension, int64_t size, int64_t step); bool equal(const Tensor & self, const Tensor & other); Tensor & pow_out(Tensor & out, const Tensor & self, const Tensor & exponent); Tensor pow(const Tensor & self, const Tensor & exponent); Tensor & pow_out(Tensor & out, Scalar self, const Tensor & exponent); Tensor pow(Scalar self, const Tensor & exponent); Tensor & normal_out(Tensor & out, const Tensor & mean, double std, Generator * generator); Tensor normal(const Tensor & mean, double std, Generator * generator); Tensor & normal_out(Tensor & out, double mean, const Tensor & std, Generator * generator); Tensor normal(double mean, const Tensor & std, Generator * generator); Tensor & normal_out(Tensor & out, const Tensor & mean, const Tensor & std, Generator * generator); Tensor normal(const Tensor & mean, const Tensor & std, Generator * generator); Tensor _addr(const Tensor & self, const Tensor & vec1, const Tensor & vec2, Scalar beta, Scalar alpha); Tensor & _addr_(Tensor & self, const Tensor & vec1, const Tensor & vec2, Scalar beta, Scalar alpha); Tensor & _addr_out(Tensor & out, const Tensor & self, const Tensor & vec1, const Tensor & vec2, Scalar beta, Scalar alpha); Tensor & _index_copy_(Tensor & self, int64_t dim, const Tensor & index, const Tensor & source); Tensor _cumsum(const Tensor & self, int64_t dim); Tensor & _cumsum_out(Tensor & out, const Tensor & self, int64_t dim); Tensor _cumprod(const Tensor & self, int64_t dim); Tensor & _cumprod_out(Tensor & out, const Tensor & self, int64_t dim); Tensor _var(const Tensor & self, bool unbiased); Tensor _std(const Tensor & self, bool unbiased); Tensor _cat(TensorList tensors, int64_t dim); Tensor & _cat_out(Tensor & out, TensorList tensors, int64_t dim); std::tuple _mode(const Tensor & self, int64_t dim, bool keepdim); std::tuple _mode_out(Tensor & values, Tensor & indices, const Tensor & self, int64_t dim, bool keepdim); std::tuple _max(const Tensor & self, int64_t dim, bool keepdim); std::tuple _max_out(Tensor & max, Tensor & max_indices, const Tensor & self, int64_t dim, bool keepdim); std::tuple _min(const Tensor & self, int64_t dim, bool keepdim); std::tuple _min_out(Tensor & min, Tensor & min_indices, const Tensor & self, int64_t dim, bool keepdim); Tensor & binary_cross_entropy_out(Tensor & out, const Tensor & self, const Tensor & target, const Tensor & weight, int64_t reduction); Tensor binary_cross_entropy(const Tensor & self, const Tensor & target, const Tensor & weight, int64_t reduction); Tensor & binary_cross_entropy_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, const Tensor & target, const Tensor & weight, int64_t reduction); Tensor binary_cross_entropy_backward(const Tensor & grad_output, const Tensor & self, const Tensor & target, const Tensor & weight, int64_t reduction); Tensor & mse_loss_out(Tensor & out, const Tensor & self, const Tensor & target, int64_t reduction); Tensor mse_loss(const Tensor & self, const Tensor & target, int64_t reduction); Tensor & mse_loss_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, const Tensor & target, int64_t reduction); Tensor mse_loss_backward(const Tensor & grad_output, const Tensor & self, const Tensor & target, int64_t reduction); Tensor & l1_loss_out(Tensor & out, const Tensor & self, const Tensor & target, int64_t reduction); Tensor l1_loss(const Tensor & self, const Tensor & target, int64_t reduction); Tensor & l1_loss_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, const Tensor & target, int64_t reduction); Tensor l1_loss_backward(const Tensor & grad_output, const Tensor & self, const Tensor & target, int64_t reduction); Tensor & multi_margin_loss_out(Tensor & out, const Tensor & self, const Tensor & target, Scalar p, Scalar margin, const Tensor & weight, int64_t reduction); Tensor multi_margin_loss(const Tensor & self, const Tensor & target, Scalar p, Scalar margin, const Tensor & weight, int64_t reduction); Tensor & multi_margin_loss_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, const Tensor & target, Scalar p, Scalar margin, const Tensor & weight, int64_t reduction); Tensor multi_margin_loss_backward(const Tensor & grad_output, const Tensor & self, const Tensor & target, Scalar p, Scalar margin, const Tensor & weight, int64_t reduction); std::tuple multilabel_margin_loss_forward_out(Tensor & output, Tensor & is_target, const Tensor & self, const Tensor & target, int64_t reduction); std::tuple multilabel_margin_loss_forward(const Tensor & self, const Tensor & target, int64_t reduction); Tensor & multilabel_margin_loss_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, const Tensor & target, int64_t reduction, const Tensor & is_target); Tensor multilabel_margin_loss_backward(const Tensor & grad_output, const Tensor & self, const Tensor & target, int64_t reduction, const Tensor & is_target); std::tuple nll_loss_forward_out(Tensor & output, Tensor & total_weight, const Tensor & self, const Tensor & target, const Tensor & weight, int64_t reduction, int64_t ignore_index); std::tuple nll_loss_forward(const Tensor & self, const Tensor & target, const Tensor & weight, int64_t reduction, int64_t ignore_index); Tensor & nll_loss_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, const Tensor & target, const Tensor & weight, int64_t reduction, int64_t ignore_index, const Tensor & total_weight); Tensor nll_loss_backward(const Tensor & grad_output, const Tensor & self, const Tensor & target, const Tensor & weight, int64_t reduction, int64_t ignore_index, const Tensor & total_weight); std::tuple nll_loss2d_forward_out(Tensor & output, Tensor & total_weight, const Tensor & self, const Tensor & target, const Tensor & weight, int64_t reduction, int64_t ignore_index); std::tuple nll_loss2d_forward(const Tensor & self, const Tensor & target, const Tensor & weight, int64_t reduction, int64_t ignore_index); Tensor & nll_loss2d_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, const Tensor & target, const Tensor & weight, int64_t reduction, int64_t ignore_index, const Tensor & total_weight); Tensor nll_loss2d_backward(const Tensor & grad_output, const Tensor & self, const Tensor & target, const Tensor & weight, int64_t reduction, int64_t ignore_index, const Tensor & total_weight); Tensor & smooth_l1_loss_out(Tensor & out, const Tensor & self, const Tensor & target, int64_t reduction); Tensor smooth_l1_loss(const Tensor & self, const Tensor & target, int64_t reduction); Tensor & smooth_l1_loss_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, const Tensor & target, int64_t reduction); Tensor smooth_l1_loss_backward(const Tensor & grad_output, const Tensor & self, const Tensor & target, int64_t reduction); Tensor & soft_margin_loss_out(Tensor & out, const Tensor & self, const Tensor & target, int64_t reduction); Tensor soft_margin_loss(const Tensor & self, const Tensor & target, int64_t reduction); Tensor & soft_margin_loss_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, const Tensor & target, int64_t reduction); Tensor soft_margin_loss_backward(const Tensor & grad_output, const Tensor & self, const Tensor & target, int64_t reduction); Tensor & elu_out(Tensor & out, const Tensor & self, Scalar alpha, Scalar scale, Scalar input_scale); Tensor elu(const Tensor & self, Scalar alpha, Scalar scale, Scalar input_scale); Tensor & elu_backward_out(Tensor & grad_input, const Tensor & grad_output, Scalar alpha, Scalar scale, Scalar input_scale, const Tensor & output); Tensor elu_backward(const Tensor & grad_output, Scalar alpha, Scalar scale, Scalar input_scale, const Tensor & output); Tensor & elu_(Tensor & self, Scalar alpha, Scalar scale, Scalar input_scale); Tensor & glu_out(Tensor & out, const Tensor & self, int64_t dim); Tensor glu(const Tensor & self, int64_t dim); Tensor & glu_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, int64_t dim); Tensor glu_backward(const Tensor & grad_output, const Tensor & self, int64_t dim); Tensor & hardtanh_out(Tensor & out, const Tensor & self, Scalar min_val, Scalar max_val); Tensor hardtanh(const Tensor & self, Scalar min_val, Scalar max_val); Tensor & hardtanh_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, Scalar min_val, Scalar max_val); Tensor hardtanh_backward(const Tensor & grad_output, const Tensor & self, Scalar min_val, Scalar max_val); Tensor & hardtanh_(Tensor & self, Scalar min_val, Scalar max_val); Tensor & leaky_relu_out(Tensor & out, const Tensor & self, Scalar negative_slope); Tensor leaky_relu(const Tensor & self, Scalar negative_slope); Tensor & leaky_relu_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, Scalar negative_slope); Tensor leaky_relu_backward(const Tensor & grad_output, const Tensor & self, Scalar negative_slope); Tensor & leaky_relu_(Tensor & self, Scalar negative_slope); std::tuple log_sigmoid_forward_out(Tensor & output, Tensor & buffer, const Tensor & self); std::tuple log_sigmoid_forward(const Tensor & self); Tensor & log_sigmoid_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, const Tensor & buffer); Tensor log_sigmoid_backward(const Tensor & grad_output, const Tensor & self, const Tensor & buffer); Tensor & rrelu_with_noise_out(Tensor & out, const Tensor & self, const Tensor & noise, Scalar lower, Scalar upper, bool training, Generator * generator); Tensor rrelu_with_noise(const Tensor & self, const Tensor & noise, Scalar lower, Scalar upper, bool training, Generator * generator); Tensor & rrelu_with_noise_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, const Tensor & noise, Scalar lower, Scalar upper, bool training); Tensor rrelu_with_noise_backward(const Tensor & grad_output, const Tensor & self, const Tensor & noise, Scalar lower, Scalar upper, bool training); Tensor & rrelu_with_noise_(Tensor & self, const Tensor & noise, Scalar lower, Scalar upper, bool training, Generator * generator); Tensor & softplus_out(Tensor & out, const Tensor & self, Scalar beta, Scalar threshold); Tensor softplus(const Tensor & self, Scalar beta, Scalar threshold); Tensor & softplus_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, Scalar beta, Scalar threshold, const Tensor & output); Tensor softplus_backward(const Tensor & grad_output, const Tensor & self, Scalar beta, Scalar threshold, const Tensor & output); Tensor & softshrink_out(Tensor & out, const Tensor & self, Scalar lambd); Tensor softshrink(const Tensor & self, Scalar lambd); Tensor & softshrink_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, Scalar lambd); Tensor softshrink_backward(const Tensor & grad_output, const Tensor & self, Scalar lambd); Tensor & adaptive_avg_pool2d_out(Tensor & out, const Tensor & self, IntArrayRef output_size); Tensor _adaptive_avg_pool2d(const Tensor & self, IntArrayRef output_size); Tensor _adaptive_avg_pool2d_backward(const Tensor & grad_output, const Tensor & self); Tensor & adaptive_avg_pool3d_out(Tensor & out, const Tensor & self, IntArrayRef output_size); Tensor adaptive_avg_pool3d(const Tensor & self, IntArrayRef output_size); Tensor & adaptive_avg_pool3d_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self); Tensor adaptive_avg_pool3d_backward(const Tensor & grad_output, const Tensor & self); std::tuple adaptive_max_pool2d_out(Tensor & out, Tensor & indices, const Tensor & self, IntArrayRef output_size); std::tuple adaptive_max_pool2d(const Tensor & self, IntArrayRef output_size); Tensor & adaptive_max_pool2d_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, const Tensor & indices); Tensor adaptive_max_pool2d_backward(const Tensor & grad_output, const Tensor & self, const Tensor & indices); std::tuple adaptive_max_pool3d_out(Tensor & out, Tensor & indices, const Tensor & self, IntArrayRef output_size); std::tuple adaptive_max_pool3d(const Tensor & self, IntArrayRef output_size); Tensor & adaptive_max_pool3d_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, const Tensor & indices); Tensor adaptive_max_pool3d_backward(const Tensor & grad_output, const Tensor & self, const Tensor & indices); Tensor & avg_pool2d_out(Tensor & out, const Tensor & self, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, bool ceil_mode, bool count_include_pad, c10::optional divisor_override); Tensor avg_pool2d(const Tensor & self, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, bool ceil_mode, bool count_include_pad, c10::optional divisor_override); Tensor & avg_pool2d_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, bool ceil_mode, bool count_include_pad, c10::optional divisor_override); Tensor avg_pool2d_backward(const Tensor & grad_output, const Tensor & self, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, bool ceil_mode, bool count_include_pad, c10::optional divisor_override); Tensor & avg_pool3d_out(Tensor & out, const Tensor & self, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, bool ceil_mode, bool count_include_pad, c10::optional divisor_override); Tensor avg_pool3d(const Tensor & self, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, bool ceil_mode, bool count_include_pad, c10::optional divisor_override); Tensor & avg_pool3d_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, bool ceil_mode, bool count_include_pad, c10::optional divisor_override); Tensor avg_pool3d_backward(const Tensor & grad_output, const Tensor & self, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, bool ceil_mode, bool count_include_pad, c10::optional divisor_override); std::tuple fractional_max_pool2d_out(Tensor & output, Tensor & indices, const Tensor & self, IntArrayRef kernel_size, IntArrayRef output_size, const Tensor & random_samples); std::tuple fractional_max_pool2d(const Tensor & self, IntArrayRef kernel_size, IntArrayRef output_size, const Tensor & random_samples); Tensor & fractional_max_pool2d_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, IntArrayRef kernel_size, IntArrayRef output_size, const Tensor & indices); Tensor fractional_max_pool2d_backward(const Tensor & grad_output, const Tensor & self, IntArrayRef kernel_size, IntArrayRef output_size, const Tensor & indices); std::tuple fractional_max_pool3d_out(Tensor & output, Tensor & indices, const Tensor & self, IntArrayRef kernel_size, IntArrayRef output_size, const Tensor & random_samples); std::tuple fractional_max_pool3d(const Tensor & self, IntArrayRef kernel_size, IntArrayRef output_size, const Tensor & random_samples); Tensor & fractional_max_pool3d_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, IntArrayRef kernel_size, IntArrayRef output_size, const Tensor & indices); Tensor fractional_max_pool3d_backward(const Tensor & grad_output, const Tensor & self, IntArrayRef kernel_size, IntArrayRef output_size, const Tensor & indices); std::tuple max_pool2d_with_indices_out(Tensor & out, Tensor & indices, const Tensor & self, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, bool ceil_mode); std::tuple max_pool2d_with_indices(const Tensor & self, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, bool ceil_mode); Tensor & max_pool2d_with_indices_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, bool ceil_mode, const Tensor & indices); Tensor max_pool2d_with_indices_backward(const Tensor & grad_output, const Tensor & self, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, bool ceil_mode, const Tensor & indices); std::tuple max_pool3d_with_indices_out(Tensor & out, Tensor & indices, const Tensor & self, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, bool ceil_mode); std::tuple max_pool3d_with_indices(const Tensor & self, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, bool ceil_mode); Tensor & max_pool3d_with_indices_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, bool ceil_mode, const Tensor & indices); Tensor max_pool3d_with_indices_backward(const Tensor & grad_output, const Tensor & self, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, bool ceil_mode, const Tensor & indices); Tensor & max_unpool2d_out(Tensor & out, const Tensor & self, const Tensor & indices, IntArrayRef output_size); Tensor max_unpool2d(const Tensor & self, const Tensor & indices, IntArrayRef output_size); Tensor & max_unpool2d_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, const Tensor & indices, IntArrayRef output_size); Tensor max_unpool2d_backward(const Tensor & grad_output, const Tensor & self, const Tensor & indices, IntArrayRef output_size); Tensor & max_unpool3d_out(Tensor & out, const Tensor & self, const Tensor & indices, IntArrayRef output_size, IntArrayRef stride, IntArrayRef padding); Tensor max_unpool3d(const Tensor & self, const Tensor & indices, IntArrayRef output_size, IntArrayRef stride, IntArrayRef padding); Tensor & max_unpool3d_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, const Tensor & indices, IntArrayRef output_size, IntArrayRef stride, IntArrayRef padding); Tensor max_unpool3d_backward(const Tensor & grad_output, const Tensor & self, const Tensor & indices, IntArrayRef output_size, IntArrayRef stride, IntArrayRef padding); Tensor & reflection_pad1d_out(Tensor & out, const Tensor & self, IntArrayRef padding); Tensor reflection_pad1d(const Tensor & self, IntArrayRef padding); Tensor & reflection_pad1d_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, IntArrayRef padding); Tensor reflection_pad1d_backward(const Tensor & grad_output, const Tensor & self, IntArrayRef padding); Tensor & reflection_pad2d_out(Tensor & out, const Tensor & self, IntArrayRef padding); Tensor reflection_pad2d(const Tensor & self, IntArrayRef padding); Tensor & reflection_pad2d_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, IntArrayRef padding); Tensor reflection_pad2d_backward(const Tensor & grad_output, const Tensor & self, IntArrayRef padding); Tensor & replication_pad1d_out(Tensor & out, const Tensor & self, IntArrayRef padding); Tensor replication_pad1d(const Tensor & self, IntArrayRef padding); Tensor & replication_pad1d_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, IntArrayRef padding); Tensor replication_pad1d_backward(const Tensor & grad_output, const Tensor & self, IntArrayRef padding); Tensor & replication_pad2d_out(Tensor & out, const Tensor & self, IntArrayRef padding); Tensor replication_pad2d(const Tensor & self, IntArrayRef padding); Tensor & replication_pad2d_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, IntArrayRef padding); Tensor replication_pad2d_backward(const Tensor & grad_output, const Tensor & self, IntArrayRef padding); Tensor & replication_pad3d_out(Tensor & out, const Tensor & self, IntArrayRef padding); Tensor replication_pad3d(const Tensor & self, IntArrayRef padding); Tensor & replication_pad3d_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, IntArrayRef padding); Tensor replication_pad3d_backward(const Tensor & grad_output, const Tensor & self, IntArrayRef padding); Tensor & upsample_linear1d_out(Tensor & out, const Tensor & self, IntArrayRef output_size, bool align_corners); Tensor upsample_linear1d(const Tensor & self, IntArrayRef output_size, bool align_corners); Tensor & upsample_linear1d_backward_out(Tensor & grad_input, const Tensor & grad_output, IntArrayRef output_size, IntArrayRef input_size, bool align_corners); Tensor upsample_linear1d_backward(const Tensor & grad_output, IntArrayRef output_size, IntArrayRef input_size, bool align_corners); Tensor & upsample_bilinear2d_out(Tensor & out, const Tensor & self, IntArrayRef output_size, bool align_corners); Tensor upsample_bilinear2d(const Tensor & self, IntArrayRef output_size, bool align_corners); Tensor & upsample_bilinear2d_backward_out(Tensor & grad_input, const Tensor & grad_output, IntArrayRef output_size, IntArrayRef input_size, bool align_corners); Tensor upsample_bilinear2d_backward(const Tensor & grad_output, IntArrayRef output_size, IntArrayRef input_size, bool align_corners); Tensor & upsample_bicubic2d_out(Tensor & out, const Tensor & self, IntArrayRef output_size, bool align_corners); Tensor upsample_bicubic2d(const Tensor & self, IntArrayRef output_size, bool align_corners); Tensor & upsample_bicubic2d_backward_out(Tensor & grad_input, const Tensor & grad_output, IntArrayRef output_size, IntArrayRef input_size, bool align_corners); Tensor upsample_bicubic2d_backward(const Tensor & grad_output, IntArrayRef output_size, IntArrayRef input_size, bool align_corners); Tensor & upsample_trilinear3d_out(Tensor & out, const Tensor & self, IntArrayRef output_size, bool align_corners); Tensor upsample_trilinear3d(const Tensor & self, IntArrayRef output_size, bool align_corners); Tensor & upsample_trilinear3d_backward_out(Tensor & grad_input, const Tensor & grad_output, IntArrayRef output_size, IntArrayRef input_size, bool align_corners); Tensor upsample_trilinear3d_backward(const Tensor & grad_output, IntArrayRef output_size, IntArrayRef input_size, bool align_corners); Tensor & upsample_nearest1d_out(Tensor & out, const Tensor & self, IntArrayRef output_size); Tensor upsample_nearest1d(const Tensor & self, IntArrayRef output_size); Tensor & upsample_nearest1d_backward_out(Tensor & grad_input, const Tensor & grad_output, IntArrayRef output_size, IntArrayRef input_size); Tensor upsample_nearest1d_backward(const Tensor & grad_output, IntArrayRef output_size, IntArrayRef input_size); Tensor & upsample_nearest2d_out(Tensor & out, const Tensor & self, IntArrayRef output_size); Tensor upsample_nearest2d(const Tensor & self, IntArrayRef output_size); Tensor & upsample_nearest2d_backward_out(Tensor & grad_input, const Tensor & grad_output, IntArrayRef output_size, IntArrayRef input_size); Tensor upsample_nearest2d_backward(const Tensor & grad_output, IntArrayRef output_size, IntArrayRef input_size); Tensor & upsample_nearest3d_out(Tensor & out, const Tensor & self, IntArrayRef output_size); Tensor upsample_nearest3d(const Tensor & self, IntArrayRef output_size); Tensor & upsample_nearest3d_backward_out(Tensor & grad_input, const Tensor & grad_output, IntArrayRef output_size, IntArrayRef input_size); Tensor upsample_nearest3d_backward(const Tensor & grad_output, IntArrayRef output_size, IntArrayRef input_size); Tensor & sigmoid_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & output); Tensor sigmoid_backward(const Tensor & grad_output, const Tensor & output); Tensor & tanh_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & output); Tensor tanh_backward(const Tensor & grad_output, const Tensor & output); Tensor & slow_conv_transpose2d_out(Tensor & out, const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const Tensor & bias, IntArrayRef stride, IntArrayRef padding, IntArrayRef output_padding, IntArrayRef dilation); Tensor slow_conv_transpose2d(const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const Tensor & bias, IntArrayRef stride, IntArrayRef padding, IntArrayRef output_padding, IntArrayRef dilation); std::tuple slow_conv_transpose2d_backward_out(Tensor & grad_input, Tensor & grad_weight, Tensor & grad_bias, const Tensor & grad_output, const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef output_padding, IntArrayRef dilation, const Tensor & columns, const Tensor & ones); std::tuple slow_conv_transpose2d_backward(const Tensor & grad_output, const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef output_padding, IntArrayRef dilation, const Tensor & columns, const Tensor & ones, std::array output_mask); Tensor & slow_conv_transpose3d_out(Tensor & out, const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const Tensor & bias, IntArrayRef stride, IntArrayRef padding, IntArrayRef output_padding, IntArrayRef dilation); Tensor slow_conv_transpose3d(const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const Tensor & bias, IntArrayRef stride, IntArrayRef padding, IntArrayRef output_padding, IntArrayRef dilation); std::tuple slow_conv_transpose3d_backward_out(Tensor & grad_input, Tensor & grad_weight, Tensor & grad_bias, const Tensor & grad_output, const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef output_padding, IntArrayRef dilation, const Tensor & finput, const Tensor & fgrad_input); std::tuple slow_conv_transpose3d_backward(const Tensor & grad_output, const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef output_padding, IntArrayRef dilation, const Tensor & finput, const Tensor & fgrad_input, std::array output_mask); std::tuple thnn_conv2d_forward_out(Tensor & output, Tensor & finput, Tensor & fgrad_input, const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const Tensor & bias, IntArrayRef stride, IntArrayRef padding); std::tuple thnn_conv2d_forward(const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const Tensor & bias, IntArrayRef stride, IntArrayRef padding); std::tuple thnn_conv2d_backward_out(Tensor & grad_input, Tensor & grad_weight, Tensor & grad_bias, const Tensor & grad_output, const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, const Tensor & finput, const Tensor & fgrad_input); std::tuple thnn_conv2d_backward(const Tensor & grad_output, const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, const Tensor & finput, const Tensor & fgrad_input, std::array output_mask); Tensor & thnn_conv_depthwise2d_forward_out(Tensor & out, const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const Tensor & bias, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation); Tensor thnn_conv_depthwise2d_forward(const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const Tensor & bias, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation); std::tuple thnn_conv_depthwise2d_backward_out(Tensor & grad_input, Tensor & grad_weight, const Tensor & grad_output, const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation); std::tuple thnn_conv_depthwise2d_backward(const Tensor & grad_output, const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, std::array output_mask); Tensor slow_conv_dilated2d(const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const Tensor & bias, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation); std::tuple slow_conv_dilated2d_backward(const Tensor & grad_output, const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, std::array output_mask); Tensor slow_conv_dilated3d(const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const Tensor & bias, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation); std::tuple slow_conv_dilated3d_backward(const Tensor & grad_output, const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, std::array output_mask); Tensor & col2im_out(Tensor & out, const Tensor & self, IntArrayRef output_size, IntArrayRef kernel_size, IntArrayRef dilation, IntArrayRef padding, IntArrayRef stride); Tensor col2im(const Tensor & self, IntArrayRef output_size, IntArrayRef kernel_size, IntArrayRef dilation, IntArrayRef padding, IntArrayRef stride); Tensor & col2im_backward_out(Tensor & grad_input, const Tensor & grad_output, IntArrayRef kernel_size, IntArrayRef dilation, IntArrayRef padding, IntArrayRef stride); Tensor col2im_backward(const Tensor & grad_output, IntArrayRef kernel_size, IntArrayRef dilation, IntArrayRef padding, IntArrayRef stride); Tensor & im2col_out(Tensor & out, const Tensor & self, IntArrayRef kernel_size, IntArrayRef dilation, IntArrayRef padding, IntArrayRef stride); Tensor im2col(const Tensor & self, IntArrayRef kernel_size, IntArrayRef dilation, IntArrayRef padding, IntArrayRef stride); Tensor & im2col_backward_out(Tensor & grad_input, const Tensor & grad_output, IntArrayRef input_size, IntArrayRef kernel_size, IntArrayRef dilation, IntArrayRef padding, IntArrayRef stride); Tensor im2col_backward(const Tensor & grad_output, IntArrayRef input_size, IntArrayRef kernel_size, IntArrayRef dilation, IntArrayRef padding, IntArrayRef stride); } #endif } // namespace at