#pragma once // @generated by aten/src/ATen/gen.py #include #include #include #include #include #include #include #include #include namespace c10 { struct Storage; } namespace at { class Tensor; using TensorList = ArrayRef; class Context; struct Generator; struct Quantizer; // This is temporary typedef to enable Quantizer in aten native function API // we'll remove them when we are actually exposing Quantizer class // to frontend using ConstQuantizerPtr = const c10::intrusive_ptr&; namespace TypeDefault { Tensor _cast_Byte(const Tensor & self, bool non_blocking); Tensor _cast_Char(const Tensor & self, bool non_blocking); Tensor _cast_Double(const Tensor & self, bool non_blocking); Tensor _cast_Float(const Tensor & self, bool non_blocking); Tensor _cast_Int(const Tensor & self, bool non_blocking); Tensor _cast_Long(const Tensor & self, bool non_blocking); Tensor _cast_Short(const Tensor & self, bool non_blocking); Tensor _cast_Half(const Tensor & self, bool non_blocking); void backward(const Tensor & self, const Tensor & gradient, bool keep_graph, bool create_graph); void set_data(const Tensor & self, const Tensor & new_data); Tensor data(const Tensor & self); bool is_leaf(const Tensor & self); int64_t output_nr(const Tensor & self); int64_t _version(const Tensor & self); #ifdef BUILD_NAMEDTENSOR Tensor & rename_(Tensor & self, c10::optional names); #endif #ifdef BUILD_NAMEDTENSOR Tensor rename(const Tensor & self, c10::optional names); #endif #ifdef BUILD_NAMEDTENSOR Tensor align_to(const Tensor & self, DimnameList names); #endif #ifdef BUILD_NAMEDTENSOR Tensor align_as(const Tensor & self, const Tensor & other); #endif #ifdef BUILD_NAMEDTENSOR std::vector align_tensors(TensorList tensors); #endif #ifdef BUILD_NAMEDTENSOR Tensor refine_names(const Tensor & self, DimnameList names); #endif #ifdef BUILD_NAMEDTENSOR Tensor unflatten(const Tensor & self, Dimname dim, IntArrayRef sizes, DimnameList names); #endif #ifdef BUILD_NAMEDTENSOR Tensor unflatten(const Tensor & self, int64_t dim, IntArrayRef sizes, DimnameList names); #endif int64_t _debug_has_internal_overlap(const Tensor & self); std::tuple _sobol_engine_draw(const Tensor & quasi, int64_t n, const Tensor & sobolstate, int64_t dimension, int64_t num_generated, c10::optional dtype); Tensor & _sobol_engine_ff_(Tensor & self, int64_t n, const Tensor & sobolstate, int64_t dimension, int64_t num_generated); Tensor & _sobol_engine_scramble_(Tensor & self, const Tensor & ltm, int64_t dimension); Tensor & _sobol_engine_initialize_state_(Tensor & self, int64_t dimension); Tensor _reshape_from_tensor(const Tensor & self, const Tensor & shape); Tensor _shape_as_tensor(const Tensor & self); Tensor dropout(const Tensor & input, double p, bool train); Tensor & dropout_(Tensor & self, double p, bool train); Tensor feature_dropout(const Tensor & input, double p, bool train); Tensor & feature_dropout_(Tensor & self, double p, bool train); Tensor alpha_dropout(const Tensor & input, double p, bool train); Tensor & alpha_dropout_(Tensor & self, double p, bool train); Tensor feature_alpha_dropout(const Tensor & input, double p, bool train); Tensor & feature_alpha_dropout_(Tensor & self, double p, bool train); Tensor abs(const Tensor & self); Tensor acos(const Tensor & self); Tensor avg_pool1d(const Tensor & self, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, bool ceil_mode, bool count_include_pad); Tensor adaptive_avg_pool1d(const Tensor & self, IntArrayRef output_size); std::tuple adaptive_max_pool1d(const Tensor & self, IntArrayRef output_size); Tensor add(const Tensor & self, Scalar other, Scalar alpha); Tensor & add_(Tensor & self, Scalar other, Scalar alpha); Tensor addr(const Tensor & self, const Tensor & vec1, const Tensor & vec2, Scalar beta, Scalar alpha); Tensor & addr_(Tensor & self, const Tensor & vec1, const Tensor & vec2, Scalar beta, Scalar alpha); Tensor & addr_out(Tensor & out, const Tensor & self, const Tensor & vec1, const Tensor & vec2, Scalar beta, Scalar alpha); Tensor affine_grid_generator(const Tensor & theta, IntArrayRef size, bool align_corners); Tensor affine_grid_generator_backward(const Tensor & grad, IntArrayRef size, bool align_corners); Tensor all(const Tensor & self, int64_t dim, bool keepdim); Tensor & all_out(Tensor & out, const Tensor & self, int64_t dim, bool keepdim); #ifdef BUILD_NAMEDTENSOR Tensor all(const Tensor & self, Dimname dim, bool keepdim); #endif #ifdef BUILD_NAMEDTENSOR Tensor & all_out(Tensor & out, const Tensor & self, Dimname dim, bool keepdim); #endif bool allclose(const Tensor & self, const Tensor & other, double rtol, double atol, bool equal_nan); Tensor any(const Tensor & self, int64_t dim, bool keepdim); Tensor & any_out(Tensor & out, const Tensor & self, int64_t dim, bool keepdim); #ifdef BUILD_NAMEDTENSOR Tensor any(const Tensor & self, Dimname dim, bool keepdim); #endif #ifdef BUILD_NAMEDTENSOR Tensor & any_out(Tensor & out, const Tensor & self, Dimname dim, bool keepdim); #endif Tensor arange(Scalar end, const TensorOptions & options); Tensor arange(Scalar start, Scalar end, const TensorOptions & options); Tensor arange(Scalar start, Scalar end, Scalar step, const TensorOptions & options); Tensor & arange_out(Tensor & out, Scalar end); Tensor _dim_arange(const Tensor & like, int64_t dim); Tensor argmax(const Tensor & self, c10::optional dim, bool keepdim); Tensor argmin(const Tensor & self, c10::optional dim, bool keepdim); Tensor & as_strided_(Tensor & self, IntArrayRef size, IntArrayRef stride, c10::optional storage_offset); Tensor asin(const Tensor & self); Tensor atan(const Tensor & self); Tensor & _baddbmm_mkl_(Tensor & self, const Tensor & batch1, const Tensor & batch2, Scalar beta, Scalar alpha); Tensor bartlett_window(int64_t window_length, const TensorOptions & options); Tensor bartlett_window(int64_t window_length, bool periodic, const TensorOptions & options); Tensor batch_norm(const Tensor & input, const Tensor & weight, const Tensor & bias, const Tensor & running_mean, const Tensor & running_var, bool training, double momentum, double eps, bool cudnn_enabled); std::tuple _batch_norm_impl_index(const Tensor & input, const Tensor & weight, const Tensor & bias, const Tensor & running_mean, const Tensor & running_var, bool training, double momentum, double eps, bool cudnn_enabled); std::tuple _batch_norm_impl_index_backward(int64_t impl_index, const Tensor & input, const Tensor & grad_output, const Tensor & weight, const Tensor & running_mean, const Tensor & running_var, const Tensor & save_mean, const Tensor & save_var_transform, bool train, double eps, std::array output_mask); Tensor bernoulli(const Tensor & self, Generator * generator); Tensor & bernoulli_out(Tensor & out, const Tensor & self, Generator * generator); Tensor bernoulli(const Tensor & self, double p, Generator * generator); Tensor bilinear(const Tensor & input1, const Tensor & input2, const Tensor & weight, const Tensor & bias); Tensor binary_cross_entropy_with_logits(const Tensor & self, const Tensor & target, const Tensor & weight, const Tensor & pos_weight, int64_t reduction); Tensor binary_cross_entropy_with_logits_backward(const Tensor & grad_output, const Tensor & self, const Tensor & target, const Tensor & weight, const Tensor & pos_weight, int64_t reduction); Tensor bitwise_not(const Tensor & self); Tensor & bitwise_not_(Tensor & self); Tensor logical_not(const Tensor & self); Tensor & logical_not_(Tensor & self); Tensor logical_xor(const Tensor & self, const Tensor & other); Tensor & logical_xor_(Tensor & self, const Tensor & other); Tensor blackman_window(int64_t window_length, const TensorOptions & options); Tensor blackman_window(int64_t window_length, bool periodic, const TensorOptions & options); std::vector broadcast_tensors(TensorList tensors); Tensor cat(TensorList tensors, int64_t dim); Tensor & cat_out(Tensor & out, TensorList tensors, int64_t dim); #ifdef BUILD_NAMEDTENSOR Tensor cat(TensorList tensors, Dimname dim); #endif #ifdef BUILD_NAMEDTENSOR Tensor & cat_out(Tensor & out, TensorList tensors, Dimname dim); #endif Tensor ceil(const Tensor & self); Tensor & ceil_(Tensor & self); Tensor chain_matmul(TensorList matrices); std::vector chunk(const Tensor & self, int64_t chunks, int64_t dim); Tensor clamp(const Tensor & self, c10::optional min, c10::optional max); Tensor clamp_max(const Tensor & self, Scalar max); Tensor clamp_min(const Tensor & self, Scalar min); bool cudnn_is_acceptable(const Tensor & self); Tensor constant_pad_nd(const Tensor & self, IntArrayRef pad, Scalar value); Tensor contiguous(const Tensor & self, MemoryFormat memory_format); Tensor convolution(const Tensor & input, const Tensor & weight, const Tensor & bias, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, bool transposed, IntArrayRef output_padding, int64_t groups); Tensor convolution_overrideable(const Tensor & input, const Tensor & weight, const Tensor & bias, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, bool transposed, IntArrayRef output_padding, int64_t groups); std::tuple convolution_backward_overrideable(const Tensor & grad_output, const Tensor & input, const Tensor & weight, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, bool transposed, IntArrayRef output_padding, int64_t groups, std::array output_mask); Tensor _convolution(const Tensor & input, const Tensor & weight, const Tensor & bias, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, bool transposed, IntArrayRef output_padding, int64_t groups, bool benchmark, bool deterministic, bool cudnn_enabled); Tensor _convolution_nogroup(const Tensor & input, const Tensor & weight, const Tensor & bias, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, bool transposed, IntArrayRef output_padding); std::tuple _convolution_double_backward(const Tensor & ggI, const Tensor & ggW, const Tensor & ggb, const Tensor & gO, const Tensor & weight, const Tensor & self, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, bool transposed, IntArrayRef output_padding, int64_t groups, bool benchmark, bool deterministic, bool cudnn_enabled, std::array output_mask); Tensor conv1d(const Tensor & input, const Tensor & weight, const Tensor & bias, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, int64_t groups); Tensor conv2d(const Tensor & input, const Tensor & weight, const Tensor & bias, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, int64_t groups); Tensor conv3d(const Tensor & input, const Tensor & weight, const Tensor & bias, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, int64_t groups); Tensor conv_tbc(const Tensor & self, const Tensor & weight, const Tensor & bias, int64_t pad); std::tuple conv_tbc_backward(const Tensor & self, const Tensor & input, const Tensor & weight, const Tensor & bias, int64_t pad); Tensor conv_transpose1d(const Tensor & input, const Tensor & weight, const Tensor & bias, IntArrayRef stride, IntArrayRef padding, IntArrayRef output_padding, int64_t groups, IntArrayRef dilation); Tensor conv_transpose2d(const Tensor & input, const Tensor & weight, const Tensor & bias, IntArrayRef stride, IntArrayRef padding, IntArrayRef output_padding, int64_t groups, IntArrayRef dilation); Tensor conv_transpose3d(const Tensor & input, const Tensor & weight, const Tensor & bias, IntArrayRef stride, IntArrayRef padding, IntArrayRef output_padding, int64_t groups, IntArrayRef dilation); Tensor & copy_(Tensor & self, const Tensor & src, bool non_blocking); Tensor cos(const Tensor & self); Tensor cosh(const Tensor & self); Tensor cosine_embedding_loss(const Tensor & input1, const Tensor & input2, const Tensor & target, double margin, int64_t reduction); Tensor cumsum(const Tensor & self, int64_t dim, c10::optional dtype); Tensor & cumsum_out(Tensor & out, const Tensor & self, int64_t dim, c10::optional dtype); #ifdef BUILD_NAMEDTENSOR Tensor cumsum(const Tensor & self, Dimname dim, c10::optional dtype); #endif #ifdef BUILD_NAMEDTENSOR Tensor & cumsum_out(Tensor & out, const Tensor & self, Dimname dim, c10::optional dtype); #endif Tensor cumprod(const Tensor & self, int64_t dim, c10::optional dtype); Tensor & cumprod_out(Tensor & out, const Tensor & self, int64_t dim, c10::optional dtype); #ifdef BUILD_NAMEDTENSOR Tensor cumprod(const Tensor & self, Dimname dim, c10::optional dtype); #endif #ifdef BUILD_NAMEDTENSOR Tensor & cumprod_out(Tensor & out, const Tensor & self, Dimname dim, c10::optional dtype); #endif Tensor ctc_loss(const Tensor & log_probs, const Tensor & targets, IntArrayRef input_lengths, IntArrayRef target_lengths, int64_t blank, int64_t reduction, bool zero_infinity); Tensor ctc_loss(const Tensor & log_probs, const Tensor & targets, const Tensor & input_lengths, const Tensor & target_lengths, int64_t blank, int64_t reduction, bool zero_infinity); Tensor det(const Tensor & self); Tensor diag_embed(const Tensor & self, int64_t offset, int64_t dim1, int64_t dim2); Tensor diagflat(const Tensor & self, int64_t offset); Tensor diagonal(const Tensor & self, int64_t offset, int64_t dim1, int64_t dim2); Tensor & fill_diagonal_(Tensor & self, Scalar fill_value, bool wrap); Tensor div(const Tensor & self, Scalar other); Tensor & div_(Tensor & self, Scalar other); Tensor & dot_out(Tensor & out, const Tensor & self, const Tensor & tensor); Tensor einsum(std::string equation, TensorList tensors); Tensor embedding(const Tensor & weight, const Tensor & indices, int64_t padding_idx, bool scale_grad_by_freq, bool sparse); Tensor embedding_backward(const Tensor & grad, const Tensor & indices, int64_t num_weights, int64_t padding_idx, bool scale_grad_by_freq, bool sparse); Tensor embedding_sparse_backward(const Tensor & grad, const Tensor & indices, int64_t num_weights, int64_t padding_idx, bool scale_grad_by_freq); std::tuple embedding_bag(const Tensor & weight, const Tensor & indices, const Tensor & offsets, bool scale_grad_by_freq, int64_t mode, bool sparse, const Tensor & per_sample_weights); Tensor _embedding_bag_backward(const Tensor & grad, const Tensor & indices, const Tensor & offsets, const Tensor & offset2bag, const Tensor & bag_size, const Tensor & maximum_indices, int64_t num_weights, bool scale_grad_by_freq, int64_t mode, bool sparse, const Tensor & per_sample_weights); Tensor _embedding_bag_sparse_backward(const Tensor & grad, const Tensor & indices, const Tensor & offsets, const Tensor & offset2bag, const Tensor & bag_size, int64_t num_weights, bool scale_grad_by_freq, int64_t mode, const Tensor & per_sample_weights); #ifdef BUILD_NAMEDTENSOR Tensor empty(IntArrayRef size, c10::optional names, const TensorOptions & options, c10::optional memory_format); #endif Tensor new_empty(const Tensor & self, IntArrayRef size, const TensorOptions & options); Tensor new_full(const Tensor & self, IntArrayRef size, Scalar fill_value, const TensorOptions & options); Tensor & empty_out(Tensor & out, IntArrayRef size, c10::optional memory_format); Tensor empty_like(const Tensor & self); Tensor empty_like(const Tensor & self, const TensorOptions & options, c10::optional memory_format); Tensor erf(const Tensor & self); Tensor erfc(const Tensor & self); Tensor exp(const Tensor & self); Tensor expm1(const Tensor & self); Tensor & expm1_(Tensor & self); Tensor expand(const Tensor & self, IntArrayRef size, bool implicit); Tensor expand_as(const Tensor & self, const Tensor & other); Tensor eye(int64_t n, const TensorOptions & options); Tensor eye(int64_t n, int64_t m, const TensorOptions & options); Tensor flatten(const Tensor & self, int64_t start_dim, int64_t end_dim); #ifdef BUILD_NAMEDTENSOR Tensor flatten(const Tensor & self, int64_t start_dim, int64_t end_dim, Dimname out_dim); #endif #ifdef BUILD_NAMEDTENSOR Tensor flatten(const Tensor & self, Dimname start_dim, Dimname end_dim, Dimname out_dim); #endif #ifdef BUILD_NAMEDTENSOR Tensor flatten(const Tensor & self, DimnameList dims, Dimname out_dim); #endif Tensor & fill_(Tensor & self, Scalar value); Tensor & fill_(Tensor & self, const Tensor & value); Tensor floor(const Tensor & self); Tensor & floor_(Tensor & self); Tensor frac(const Tensor & self); #ifdef BUILD_NAMEDTENSOR Tensor full(IntArrayRef size, Scalar fill_value, c10::optional names, const TensorOptions & options); #endif Tensor full(IntArrayRef size, Scalar fill_value, const TensorOptions & options); Tensor & full_out(Tensor & out, IntArrayRef size, Scalar fill_value); Tensor full_like(const Tensor & self, Scalar fill_value); Tensor full_like(const Tensor & self, Scalar fill_value, const TensorOptions & options); Tensor grid_sampler(const Tensor & input, const Tensor & grid, int64_t interpolation_mode, int64_t padding_mode, bool align_corners); Tensor hann_window(int64_t window_length, const TensorOptions & options); Tensor hann_window(int64_t window_length, bool periodic, const TensorOptions & options); Tensor hamming_window(int64_t window_length, const TensorOptions & options); Tensor hamming_window(int64_t window_length, bool periodic, const TensorOptions & options); Tensor hamming_window(int64_t window_length, bool periodic, double alpha, const TensorOptions & options); Tensor hamming_window(int64_t window_length, bool periodic, double alpha, double beta, const TensorOptions & options); Tensor hinge_embedding_loss(const Tensor & self, const Tensor & target, double margin, int64_t reduction); Tensor group_norm(const Tensor & input, int64_t num_groups, const Tensor & weight, const Tensor & bias, double eps, bool cudnn_enabled); Tensor fft(const Tensor & self, int64_t signal_ndim, bool normalized); Tensor ifft(const Tensor & self, int64_t signal_ndim, bool normalized); Tensor rfft(const Tensor & self, int64_t signal_ndim, bool normalized, bool onesided); Tensor irfft(const Tensor & self, int64_t signal_ndim, bool normalized, bool onesided, IntArrayRef signal_sizes); int64_t _cufft_get_plan_cache_size(int64_t device_index); int64_t _cufft_get_plan_cache_max_size(int64_t device_index); void _cufft_set_plan_cache_max_size(int64_t device_index, int64_t max_size); void _cufft_clear_plan_cache(int64_t device_index); Tensor index(const Tensor & self, TensorList indices); Tensor & index_copy_(Tensor & self, int64_t dim, const Tensor & index, const Tensor & source); Tensor index_copy(const Tensor & self, int64_t dim, const Tensor & index, const Tensor & source); #ifdef BUILD_NAMEDTENSOR Tensor & index_copy_(Tensor & self, Dimname dim, const Tensor & index, const Tensor & source); #endif #ifdef BUILD_NAMEDTENSOR Tensor index_copy(const Tensor & self, Dimname dim, const Tensor & index, const Tensor & source); #endif Tensor & index_put_(Tensor & self, TensorList indices, const Tensor & values, bool accumulate); Tensor index_put(const Tensor & self, TensorList indices, const Tensor & values, bool accumulate); Tensor & _index_put_impl_(Tensor & self, TensorList indices, const Tensor & values, bool accumulate, bool unsafe); Tensor instance_norm(const Tensor & input, const Tensor & weight, const Tensor & bias, const Tensor & running_mean, const Tensor & running_var, bool use_input_stats, double momentum, double eps, bool cudnn_enabled); Tensor inverse(const Tensor & self); Tensor & inverse_out(Tensor & out, const Tensor & self); Tensor isclose(const Tensor & self, const Tensor & other, double rtol, double atol, bool equal_nan); Tensor isnan(const Tensor & self); bool is_distributed(const Tensor & self); bool is_floating_point(const Tensor & self); bool is_complex(const Tensor & self); bool is_nonzero(const Tensor & self); bool is_same_size(const Tensor & self, const Tensor & other); bool is_signed(const Tensor & self); Tensor kl_div(const Tensor & self, const Tensor & target, int64_t reduction); std::tuple kthvalue(const Tensor & self, int64_t k, int64_t dim, bool keepdim); #ifdef BUILD_NAMEDTENSOR std::tuple kthvalue(const Tensor & self, int64_t k, Dimname dim, bool keepdim); #endif #ifdef BUILD_NAMEDTENSOR std::tuple kthvalue_out(Tensor & values, Tensor & indices, const Tensor & self, int64_t k, Dimname dim, bool keepdim); #endif Tensor layer_norm(const Tensor & input, IntArrayRef normalized_shape, const Tensor & weight, const Tensor & bias, double eps, bool cudnn_enable); Tensor linear(const Tensor & input, const Tensor & weight, const Tensor & bias); Tensor fbgemm_linear_int8_weight_fp32_activation(const Tensor & input, const Tensor & weight, const Tensor & packed, const Tensor & col_offsets, Scalar weight_scale, Scalar weight_zero_point, const Tensor & bias); Tensor fbgemm_linear_int8_weight(const Tensor & input, const Tensor & weight, const Tensor & packed, const Tensor & col_offsets, Scalar weight_scale, Scalar weight_zero_point, const Tensor & bias); std::tuple fbgemm_linear_quantize_weight(const Tensor & input); Tensor fbgemm_pack_gemm_matrix_fp16(const Tensor & input); Tensor fbgemm_linear_fp16_weight_fp32_activation(const Tensor & input, const Tensor & packed_weight, const Tensor & bias); Tensor fbgemm_linear_fp16_weight(const Tensor & input, const Tensor & packed_weight, const Tensor & bias); Tensor fbgemm_pack_quantized_matrix(const Tensor & input); Tensor fbgemm_pack_quantized_matrix(const Tensor & input, int64_t K, int64_t N); Tensor linspace(Scalar start, Scalar end, int64_t steps, const TensorOptions & options); Tensor log(const Tensor & self); Tensor & log_(Tensor & self); Tensor log10(const Tensor & self); Tensor log1p(const Tensor & self); Tensor log2(const Tensor & self); Tensor logdet(const Tensor & self); Tensor logspace(Scalar start, Scalar end, int64_t steps, double base, const TensorOptions & options); Tensor log_softmax(const Tensor & self, int64_t dim, c10::optional dtype); #ifdef BUILD_NAMEDTENSOR Tensor log_softmax(const Tensor & self, Dimname dim, c10::optional dtype); #endif Tensor logsumexp(const Tensor & self, IntArrayRef dim, bool keepdim); Tensor & logsumexp_out(Tensor & out, const Tensor & self, IntArrayRef dim, bool keepdim); #ifdef BUILD_NAMEDTENSOR Tensor logsumexp(const Tensor & self, DimnameList dim, bool keepdim); #endif #ifdef BUILD_NAMEDTENSOR Tensor & logsumexp_out(Tensor & out, const Tensor & self, DimnameList dim, bool keepdim); #endif Tensor margin_ranking_loss(const Tensor & input1, const Tensor & input2, const Tensor & target, double margin, int64_t reduction); Tensor matmul(const Tensor & self, const Tensor & other); Tensor & matmul_out(Tensor & out, const Tensor & self, const Tensor & other); Tensor matrix_rank(const Tensor & self, double tol, bool symmetric); Tensor matrix_rank(const Tensor & self, bool symmetric); Tensor matrix_power(const Tensor & self, int64_t n); std::tuple max(const Tensor & self, int64_t dim, bool keepdim); std::tuple max_out(Tensor & max, Tensor & max_values, const Tensor & self, int64_t dim, bool keepdim); Tensor max_values(const Tensor & self, IntArrayRef dim, bool keepdim); #ifdef BUILD_NAMEDTENSOR std::tuple max(const Tensor & self, Dimname dim, bool keepdim); #endif #ifdef BUILD_NAMEDTENSOR std::tuple max_out(Tensor & max, Tensor & max_values, const Tensor & self, Dimname dim, bool keepdim); #endif #ifdef BUILD_NAMEDTENSOR Tensor max_values(const Tensor & self, DimnameList dim, bool keepdim); #endif std::tuple max_pool1d_with_indices(const Tensor & self, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, bool ceil_mode); Tensor max_pool1d(const Tensor & self, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, bool ceil_mode); Tensor max_pool2d(const Tensor & self, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, bool ceil_mode); Tensor max_pool3d(const Tensor & self, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, bool ceil_mode); std::tuple median(const Tensor & self, int64_t dim, bool keepdim); std::tuple median_out(Tensor & values, Tensor & indices, const Tensor & self, int64_t dim, bool keepdim); #ifdef BUILD_NAMEDTENSOR std::tuple median(const Tensor & self, Dimname dim, bool keepdim); #endif #ifdef BUILD_NAMEDTENSOR std::tuple median_out(Tensor & values, Tensor & indices, const Tensor & self, Dimname dim, bool keepdim); #endif std::tuple min(const Tensor & self, int64_t dim, bool keepdim); std::tuple min_out(Tensor & min, Tensor & min_indices, const Tensor & self, int64_t dim, bool keepdim); Tensor min_values(const Tensor & self, IntArrayRef dim, bool keepdim); #ifdef BUILD_NAMEDTENSOR std::tuple min(const Tensor & self, Dimname dim, bool keepdim); #endif #ifdef BUILD_NAMEDTENSOR std::tuple min_out(Tensor & min, Tensor & min_indices, const Tensor & self, Dimname dim, bool keepdim); #endif #ifdef BUILD_NAMEDTENSOR Tensor min_values(const Tensor & self, DimnameList dim, bool keepdim); #endif Tensor mkldnn_convolution(const Tensor & self, const Tensor & weight, const Tensor & bias, IntArrayRef padding, IntArrayRef stride, IntArrayRef dilation, int64_t groups); Tensor mkldnn_convolution_backward_input(IntArrayRef self_size, const Tensor & grad_output, const Tensor & weight, IntArrayRef padding, IntArrayRef stride, IntArrayRef dilation, int64_t groups, bool bias_defined); std::tuple mkldnn_convolution_backward_weights(IntArrayRef weight_size, const Tensor & grad_output, const Tensor & self, IntArrayRef padding, IntArrayRef stride, IntArrayRef dilation, int64_t groups, bool bias_defined); std::tuple mkldnn_convolution_backward(const Tensor & self, const Tensor & grad_output, const Tensor & weight, IntArrayRef padding, IntArrayRef stride, IntArrayRef dilation, int64_t groups, std::array output_mask); Tensor _sparse_mm(const Tensor & sparse, const Tensor & dense); std::tuple mode(const Tensor & self, int64_t dim, bool keepdim); std::tuple mode_out(Tensor & values, Tensor & indices, const Tensor & self, int64_t dim, bool keepdim); #ifdef BUILD_NAMEDTENSOR std::tuple mode(const Tensor & self, Dimname dim, bool keepdim); #endif #ifdef BUILD_NAMEDTENSOR std::tuple mode_out(Tensor & values, Tensor & indices, const Tensor & self, Dimname dim, bool keepdim); #endif Tensor mul(const Tensor & self, Scalar other); Tensor & mul_(Tensor & self, Scalar other); Tensor mvlgamma(const Tensor & self, int64_t p); Tensor & mvlgamma_(Tensor & self, int64_t p); Tensor narrow(const Tensor & self, int64_t dim, int64_t start, int64_t length); bool _nnpack_available(); Tensor _nnpack_spatial_convolution(const Tensor & input, const Tensor & weight, const Tensor & bias, IntArrayRef padding); std::tuple _nnpack_spatial_convolution_backward(const Tensor & input, const Tensor & grad_output, const Tensor & weight, IntArrayRef padding, std::array output_mask); Tensor _nnpack_spatial_convolution_backward_input(const Tensor & input, const Tensor & grad_output, const Tensor & weight, IntArrayRef padding); Tensor _nnpack_spatial_convolution_backward_weight(const Tensor & input, IntArrayRef weightsize, const Tensor & grad_output, IntArrayRef padding); #ifdef BUILD_NAMEDTENSOR Tensor ones(IntArrayRef size, c10::optional names, const TensorOptions & options); #endif Tensor ones(IntArrayRef size, const TensorOptions & options); Tensor & ones_out(Tensor & out, IntArrayRef size); Tensor ones_like(const Tensor & self); Tensor ones_like(const Tensor & self, const TensorOptions & options); Tensor pairwise_distance(const Tensor & x1, const Tensor & x2, double p, double eps, bool keepdim); Tensor cdist(const Tensor & x1, const Tensor & x2, double p); Tensor _cdist_backward(const Tensor & grad, const Tensor & x1, const Tensor & x2, double p, const Tensor & cdist); Tensor pdist(const Tensor & self, double p); Tensor _pdist_forward(const Tensor & self, double p); Tensor _pdist_backward(const Tensor & grad, const Tensor & self, double p, const Tensor & pdist); Tensor cosine_similarity(const Tensor & x1, const Tensor & x2, int64_t dim, double eps); Tensor permute(const Tensor & self, IntArrayRef dims); Tensor numpy_T(const Tensor & self); Tensor pixel_shuffle(const Tensor & self, int64_t upscale_factor); bool is_pinned(const Tensor & self); Tensor pin_memory(const Tensor & self); Tensor pinverse(const Tensor & self, double rcond); Tensor poisson_nll_loss(const Tensor & input, const Tensor & target, bool log_input, bool full, double eps, int64_t reduction); Tensor scalar_tensor(Scalar s, const TensorOptions & options); #ifdef BUILD_NAMEDTENSOR Tensor rand(IntArrayRef size, c10::optional names, const TensorOptions & options); #endif #ifdef BUILD_NAMEDTENSOR Tensor rand(IntArrayRef size, Generator * generator, c10::optional names, const TensorOptions & options); #endif Tensor rand(IntArrayRef size, const TensorOptions & options); Tensor rand(IntArrayRef size, Generator * generator, const TensorOptions & options); Tensor & rand_out(Tensor & out, IntArrayRef size); Tensor & rand_out(Tensor & out, IntArrayRef size, Generator * generator); Tensor rand_like(const Tensor & self); Tensor rand_like(const Tensor & self, const TensorOptions & options); Tensor randint(int64_t high, IntArrayRef size, const TensorOptions & options); Tensor randint(int64_t high, IntArrayRef size, Generator * generator, const TensorOptions & options); Tensor randint(int64_t low, int64_t high, IntArrayRef size, const TensorOptions & options); Tensor randint(int64_t low, int64_t high, IntArrayRef size, Generator * generator, const TensorOptions & options); Tensor & randint_out(Tensor & out, int64_t high, IntArrayRef size); Tensor & randint_out(Tensor & out, int64_t high, IntArrayRef size, Generator * generator); Tensor & randint_out(Tensor & out, int64_t low, int64_t high, IntArrayRef size); Tensor & randint_out(Tensor & out, int64_t low, int64_t high, IntArrayRef size, Generator * generator); Tensor randint_like(const Tensor & self, int64_t high); Tensor randint_like(const Tensor & self, int64_t low, int64_t high); Tensor randint_like(const Tensor & self, int64_t high, const TensorOptions & options); Tensor randint_like(const Tensor & self, int64_t low, int64_t high, const TensorOptions & options); Tensor randn(IntArrayRef size, const TensorOptions & options); Tensor randn(IntArrayRef size, Generator * generator, const TensorOptions & options); #ifdef BUILD_NAMEDTENSOR Tensor randn(IntArrayRef size, c10::optional names, const TensorOptions & options); #endif #ifdef BUILD_NAMEDTENSOR Tensor randn(IntArrayRef size, Generator * generator, c10::optional names, const TensorOptions & options); #endif Tensor & randn_out(Tensor & out, IntArrayRef size); Tensor & randn_out(Tensor & out, IntArrayRef size, Generator * generator); Tensor randn_like(const Tensor & self); Tensor randn_like(const Tensor & self, const TensorOptions & options); Tensor randperm(int64_t n, const TensorOptions & options); Tensor randperm(int64_t n, Generator * generator, const TensorOptions & options); Tensor & randperm_out(Tensor & out, int64_t n); Tensor range(Scalar start, Scalar end, Scalar step, const TensorOptions & options); Tensor range(Scalar start, Scalar end, const TensorOptions & options); Tensor reciprocal(const Tensor & self); Tensor neg(const Tensor & self); Tensor & neg_(Tensor & self); Tensor repeat(const Tensor & self, IntArrayRef repeats); Tensor repeat_interleave(const Tensor & self, const Tensor & repeats, c10::optional dim); Tensor repeat_interleave(const Tensor & self, int64_t repeats, c10::optional dim); Tensor reshape(const Tensor & self, IntArrayRef shape); Tensor reshape_as(const Tensor & self, const Tensor & other); Tensor round(const Tensor & self); Tensor & round_(Tensor & self); Tensor rrelu(const Tensor & self, Scalar lower, Scalar upper, bool training, Generator * generator); Tensor & rrelu_(Tensor & self, Scalar lower, Scalar upper, bool training, Generator * generator); Tensor rsqrt(const Tensor & self); Tensor & rsqrt_(Tensor & self); #ifdef BUILD_NAMEDTENSOR Tensor select(const Tensor & self, Dimname dim, int64_t index); #endif Tensor select(const Tensor & self, int64_t dim, int64_t index); Tensor selu(const Tensor & self); Tensor & selu_(Tensor & self); Tensor celu(const Tensor & self, Scalar alpha); Tensor & celu_(Tensor & self, Scalar alpha); Tensor sin(const Tensor & self); Tensor sinh(const Tensor & self); Tensor detach(const Tensor & self); Tensor & detach_(Tensor & self); int64_t size(const Tensor & self, int64_t dim); #ifdef BUILD_NAMEDTENSOR int64_t size(const Tensor & self, Dimname dim); #endif Tensor slice(const Tensor & self, int64_t dim, int64_t start, int64_t end, int64_t step); std::tuple slogdet(const Tensor & self); Tensor smm(const Tensor & self, const Tensor & mat2); Tensor softmax(const Tensor & self, int64_t dim, c10::optional dtype); #ifdef BUILD_NAMEDTENSOR Tensor softmax(const Tensor & self, Dimname dim, c10::optional dtype); #endif std::vector split(const Tensor & self, int64_t split_size, int64_t dim); std::vector split_with_sizes(const Tensor & self, IntArrayRef split_sizes, int64_t dim); Tensor squeeze(const Tensor & self); Tensor squeeze(const Tensor & self, int64_t dim); #ifdef BUILD_NAMEDTENSOR Tensor squeeze(const Tensor & self, Dimname dim); #endif Tensor & squeeze_(Tensor & self); Tensor & squeeze_(Tensor & self, int64_t dim); #ifdef BUILD_NAMEDTENSOR Tensor & squeeze_(Tensor & self, Dimname dim); #endif Tensor sspaddmm(const Tensor & self, const Tensor & mat1, const Tensor & mat2, Scalar beta, Scalar alpha); Tensor stack(TensorList tensors, int64_t dim); Tensor & stack_out(Tensor & out, TensorList tensors, int64_t dim); Tensor stft(const Tensor & self, int64_t n_fft, c10::optional hop_length, c10::optional win_length, const Tensor & window, bool normalized, bool onesided); int64_t stride(const Tensor & self, int64_t dim); #ifdef BUILD_NAMEDTENSOR int64_t stride(const Tensor & self, Dimname dim); #endif Tensor sum(const Tensor & self, c10::optional dtype); Tensor sum(const Tensor & self, IntArrayRef dim, bool keepdim, c10::optional dtype); #ifdef BUILD_NAMEDTENSOR Tensor sum(const Tensor & self, DimnameList dim, bool keepdim, c10::optional dtype); #endif Tensor & sum_out(Tensor & out, const Tensor & self, IntArrayRef dim, bool keepdim, c10::optional dtype); #ifdef BUILD_NAMEDTENSOR Tensor & sum_out(Tensor & out, const Tensor & self, DimnameList dim, bool keepdim, c10::optional dtype); #endif Tensor sum_to_size(const Tensor & self, IntArrayRef size); Tensor sqrt(const Tensor & self); Tensor std(const Tensor & self, bool unbiased); Tensor std(const Tensor & self, IntArrayRef dim, bool unbiased, bool keepdim); std::tuple std_mean(const Tensor & self, bool unbiased); std::tuple std_mean(const Tensor & self, IntArrayRef dim, bool unbiased, bool keepdim); #ifdef BUILD_NAMEDTENSOR std::tuple std_mean(const Tensor & self, DimnameList dim, bool unbiased, bool keepdim); #endif Tensor & std_out(Tensor & out, const Tensor & self, IntArrayRef dim, bool unbiased, bool keepdim); #ifdef BUILD_NAMEDTENSOR Tensor std(const Tensor & self, DimnameList dim, bool unbiased, bool keepdim); #endif #ifdef BUILD_NAMEDTENSOR Tensor & std_out(Tensor & out, const Tensor & self, DimnameList dim, bool unbiased, bool keepdim); #endif Tensor prod(const Tensor & self, c10::optional dtype); Tensor prod(const Tensor & self, int64_t dim, bool keepdim, c10::optional dtype); Tensor & prod_out(Tensor & out, const Tensor & self, int64_t dim, bool keepdim, c10::optional dtype); #ifdef BUILD_NAMEDTENSOR Tensor prod(const Tensor & self, Dimname dim, bool keepdim, c10::optional dtype); #endif #ifdef BUILD_NAMEDTENSOR Tensor & prod_out(Tensor & out, const Tensor & self, Dimname dim, bool keepdim, c10::optional dtype); #endif Tensor t(const Tensor & self); Tensor & t_(Tensor & self); Tensor tan(const Tensor & self); Tensor tanh(const Tensor & self); Tensor tensordot(const Tensor & self, const Tensor & other, IntArrayRef dims_self, IntArrayRef dims_other); Tensor threshold(const Tensor & self, Scalar threshold, Scalar value); Tensor & threshold_(Tensor & self, Scalar threshold, Scalar value); Tensor & threshold_out(Tensor & out, const Tensor & self, Scalar threshold, Scalar value); Tensor threshold_backward(const Tensor & grad_output, const Tensor & self, Scalar threshold); Tensor transpose(const Tensor & self, int64_t dim0, int64_t dim1); #ifdef BUILD_NAMEDTENSOR Tensor transpose(const Tensor & self, Dimname dim0, Dimname dim1); #endif Tensor & transpose_(Tensor & self, int64_t dim0, int64_t dim1); Tensor one_hot(const Tensor & self, int64_t num_classes); Tensor rot90(const Tensor & self, int64_t k, IntArrayRef dims); Tensor trapz(const Tensor & y, const Tensor & x, int64_t dim); Tensor trapz(const Tensor & y, double dx, int64_t dim); Tensor _trilinear(const Tensor & i1, const Tensor & i2, const Tensor & i3, IntArrayRef expand1, IntArrayRef expand2, IntArrayRef expand3, IntArrayRef sumdim, int64_t unroll_dim); Tensor triplet_margin_loss(const Tensor & anchor, const Tensor & positive, const Tensor & negative, double margin, double p, double eps, bool swap, int64_t reduction); Tensor trunc(const Tensor & self); Tensor & trunc_(Tensor & self); Tensor type_as(const Tensor & self, const Tensor & other); bool _has_compatible_shallow_copy_type(const Tensor & self, const Tensor & from); Tensor _unsafe_view(const Tensor & self, IntArrayRef size); Tensor unsqueeze(const Tensor & self, int64_t dim); Tensor & unsqueeze_(Tensor & self, int64_t dim); Tensor var(const Tensor & self, bool unbiased); Tensor var(const Tensor & self, IntArrayRef dim, bool unbiased, bool keepdim); Tensor & var_out(Tensor & out, const Tensor & self, IntArrayRef dim, bool unbiased, bool keepdim); #ifdef BUILD_NAMEDTENSOR Tensor var(const Tensor & self, DimnameList dim, bool unbiased, bool keepdim); #endif #ifdef BUILD_NAMEDTENSOR Tensor & var_out(Tensor & out, const Tensor & self, DimnameList dim, bool unbiased, bool keepdim); #endif std::tuple var_mean(const Tensor & self, bool unbiased); std::tuple var_mean(const Tensor & self, IntArrayRef dim, bool unbiased, bool keepdim); #ifdef BUILD_NAMEDTENSOR std::tuple var_mean(const Tensor & self, DimnameList dim, bool unbiased, bool keepdim); #endif Tensor view_as(const Tensor & self, const Tensor & other); Tensor where(const Tensor & condition, const Tensor & self, const Tensor & other); std::vector where(const Tensor & condition); Tensor norm_except_dim(const Tensor & v, int64_t pow, int64_t dim); Tensor _weight_norm(const Tensor & v, const Tensor & g, int64_t dim); std::tuple _weight_norm_differentiable_backward(const Tensor & grad_w, const Tensor & saved_v, const Tensor & saved_g, const Tensor & saved_norms, int64_t dim); #ifdef BUILD_NAMEDTENSOR Tensor zeros(IntArrayRef size, c10::optional names, const TensorOptions & options); #endif Tensor zeros(IntArrayRef size, const TensorOptions & options); Tensor & zeros_out(Tensor & out, IntArrayRef size); Tensor zeros_like(const Tensor & self); Tensor zeros_like(const Tensor & self, const TensorOptions & options); Tensor _sparse_sum(const Tensor & self); Tensor _sparse_sum(const Tensor & self, ScalarType dtype); Tensor _sparse_sum(const Tensor & self, IntArrayRef dim); Tensor _sparse_sum(const Tensor & self, IntArrayRef dim, ScalarType dtype); Tensor norm(const Tensor & self, c10::optional p, ScalarType dtype); Tensor norm(const Tensor & self, Scalar p); Tensor norm(const Tensor & self, c10::optional p, IntArrayRef dim, bool keepdim, ScalarType dtype); Tensor norm(const Tensor & self, c10::optional p, IntArrayRef dim, bool keepdim); Tensor & norm_out(Tensor & out, const Tensor & self, c10::optional p, IntArrayRef dim, bool keepdim, ScalarType dtype); Tensor & norm_out(Tensor & out, const Tensor & self, c10::optional p, IntArrayRef dim, bool keepdim); #ifdef BUILD_NAMEDTENSOR Tensor norm(const Tensor & self, c10::optional p, DimnameList dim, bool keepdim, ScalarType dtype); #endif #ifdef BUILD_NAMEDTENSOR Tensor norm(const Tensor & self, c10::optional p, DimnameList dim, bool keepdim); #endif #ifdef BUILD_NAMEDTENSOR Tensor & norm_out(Tensor & out, const Tensor & self, c10::optional p, DimnameList dim, bool keepdim, ScalarType dtype); #endif #ifdef BUILD_NAMEDTENSOR Tensor & norm_out(Tensor & out, const Tensor & self, c10::optional p, DimnameList dim, bool keepdim); #endif Tensor frobenius_norm(const Tensor & self); Tensor frobenius_norm(const Tensor & self, IntArrayRef dim, bool keepdim); Tensor & frobenius_norm_out(Tensor & out, const Tensor & self, IntArrayRef dim, bool keepdim); Tensor nuclear_norm(const Tensor & self, bool keepdim); Tensor & nuclear_norm_out(Tensor & out, const Tensor & self, bool keepdim); Tensor nuclear_norm(const Tensor & self, IntArrayRef dim, bool keepdim); Tensor & nuclear_norm_out(Tensor & out, const Tensor & self, IntArrayRef dim, bool keepdim); Tensor & resize_as_(Tensor & self, const Tensor & the_template); Tensor sub(const Tensor & self, Scalar other, Scalar alpha); Tensor & sub_(Tensor & self, Scalar other, Scalar alpha); Tensor rsub(const Tensor & self, const Tensor & other, Scalar alpha); Tensor rsub(const Tensor & self, Scalar other, Scalar alpha); Tensor _sparse_addmm(const Tensor & self, const Tensor & sparse, const Tensor & dense, Scalar beta, Scalar alpha); Tensor sparse_coo_tensor(IntArrayRef size, const TensorOptions & options); Tensor sparse_coo_tensor(const Tensor & indices, const Tensor & values, const TensorOptions & options); Tensor sparse_coo_tensor(const Tensor & indices, const Tensor & values, IntArrayRef size, const TensorOptions & options); Tensor _sparse_coo_tensor_unsafe(const Tensor & indices, const Tensor & values, IntArrayRef size, const TensorOptions & options); Tensor to_dense_backward(const Tensor & grad, const Tensor & input); int64_t numel(const Tensor & self); std::vector unbind(const Tensor & self, int64_t dim); #ifdef BUILD_NAMEDTENSOR std::vector unbind(const Tensor & self, Dimname dim); #endif Tensor to_mkldnn_backward(const Tensor & grad, const Tensor & input); Tensor to(const Tensor & self, const TensorOptions & options, bool non_blocking, bool copy); Tensor to(const Tensor & self, Device device, ScalarType dtype, bool non_blocking, bool copy); Tensor to(const Tensor & self, ScalarType dtype, bool non_blocking, bool copy); Tensor to(const Tensor & self, const Tensor & other, bool non_blocking, bool copy); std::vector meshgrid(TensorList tensors); Tensor cartesian_prod(TensorList tensors); Tensor combinations(const Tensor & self, int64_t r, bool with_replacement); Scalar item(const Tensor & self); ScalarType result_type(const Tensor & tensor, const Tensor & other); ScalarType result_type(const Tensor & tensor, Scalar other); ScalarType result_type(Scalar scalar, const Tensor & tensor); ScalarType result_type(Scalar scalar1, Scalar scalar2); bool can_cast(ScalarType from, ScalarType to); ScalarType promote_types(ScalarType type1, ScalarType type2); std::tuple _thnn_differentiable_lstm_cell_backward(const Tensor & grad_hy, const Tensor & grad_cy, const Tensor & input_gates, const Tensor & hidden_gates, const Tensor & input_bias, const Tensor & hidden_bias, const Tensor & cx, const Tensor & cy); std::tuple _thnn_differentiable_gru_cell_backward(const Tensor & grad_hy, const Tensor & input_gates, const Tensor & hidden_gates, const Tensor & hx, const Tensor & input_bias, const Tensor & hidden_bias); std::tuple lstm(const Tensor & input, TensorList hx, TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional, bool batch_first); std::tuple lstm(const Tensor & data, const Tensor & batch_sizes, TensorList hx, TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional); std::tuple gru(const Tensor & input, const Tensor & hx, TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional, bool batch_first); std::tuple gru(const Tensor & data, const Tensor & batch_sizes, const Tensor & hx, TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional); std::tuple rnn_tanh(const Tensor & input, const Tensor & hx, TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional, bool batch_first); std::tuple rnn_tanh(const Tensor & data, const Tensor & batch_sizes, const Tensor & hx, TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional); std::tuple rnn_relu(const Tensor & input, const Tensor & hx, TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional, bool batch_first); std::tuple rnn_relu(const Tensor & data, const Tensor & batch_sizes, const Tensor & hx, TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional); std::tuple lstm_cell(const Tensor & input, TensorList hx, const Tensor & w_ih, const Tensor & w_hh, const Tensor & b_ih, const Tensor & b_hh); Tensor gru_cell(const Tensor & input, const Tensor & hx, const Tensor & w_ih, const Tensor & w_hh, const Tensor & b_ih, const Tensor & b_hh); Tensor rnn_tanh_cell(const Tensor & input, const Tensor & hx, const Tensor & w_ih, const Tensor & w_hh, const Tensor & b_ih, const Tensor & b_hh); Tensor rnn_relu_cell(const Tensor & input, const Tensor & hx, const Tensor & w_ih, const Tensor & w_hh, const Tensor & b_ih, const Tensor & b_hh); std::tuple quantized_lstm(const Tensor & input, TensorList hx, TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional, bool batch_first, c10::optional dtype, bool use_dynamic); std::tuple quantized_gru(const Tensor & input, const Tensor & hx, TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional, bool batch_first); std::tuple quantized_gru(const Tensor & data, const Tensor & batch_sizes, const Tensor & hx, TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional); std::tuple quantized_lstm_cell(const Tensor & input, TensorList hx, const Tensor & w_ih, const Tensor & w_hh, const Tensor & b_ih, const Tensor & b_hh, const Tensor & packed_ih, const Tensor & packed_hh, const Tensor & col_offsets_ih, const Tensor & col_offsets_hh, Scalar scale_ih, Scalar scale_hh, Scalar zero_point_ih, Scalar zero_point_hh); Tensor quantized_gru_cell(const Tensor & input, const Tensor & hx, const Tensor & w_ih, const Tensor & w_hh, const Tensor & b_ih, const Tensor & b_hh, const Tensor & packed_ih, const Tensor & packed_hh, const Tensor & col_offsets_ih, const Tensor & col_offsets_hh, Scalar scale_ih, Scalar scale_hh, Scalar zero_point_ih, Scalar zero_point_hh); Tensor quantized_rnn_relu_cell(const Tensor & input, const Tensor & hx, const Tensor & w_ih, const Tensor & w_hh, const Tensor & b_ih, const Tensor & b_hh, const Tensor & packed_ih, const Tensor & packed_hh, const Tensor & col_offsets_ih, const Tensor & col_offsets_hh, Scalar scale_ih, Scalar scale_hh, Scalar zero_point_ih, Scalar zero_point_hh); Tensor quantized_rnn_tanh_cell(const Tensor & input, const Tensor & hx, const Tensor & w_ih, const Tensor & w_hh, const Tensor & b_ih, const Tensor & b_hh, const Tensor & packed_ih, const Tensor & packed_hh, const Tensor & col_offsets_ih, const Tensor & col_offsets_hh, Scalar scale_ih, Scalar scale_hh, Scalar zero_point_ih, Scalar zero_point_hh); std::tuple _pack_padded_sequence(const Tensor & input, const Tensor & lengths, bool batch_first); Tensor _pack_padded_sequence_backward(const Tensor & grad, IntArrayRef input_size, const Tensor & batch_sizes, bool batch_first); std::tuple _pad_packed_sequence(const Tensor & data, const Tensor & batch_sizes, bool batch_first, Scalar padding_value, int64_t total_length); Tensor masked_fill(const Tensor & self, const Tensor & mask, Scalar value); Tensor masked_fill(const Tensor & self, const Tensor & mask, const Tensor & value); Tensor masked_scatter(const Tensor & self, const Tensor & mask, const Tensor & source); Tensor index_add(const Tensor & self, int64_t dim, const Tensor & index, const Tensor & source); #ifdef BUILD_NAMEDTENSOR Tensor index_add(const Tensor & self, Dimname dim, const Tensor & index, const Tensor & source); #endif Tensor index_fill(const Tensor & self, int64_t dim, const Tensor & index, Scalar value); Tensor index_fill(const Tensor & self, int64_t dim, const Tensor & index, const Tensor & value); #ifdef BUILD_NAMEDTENSOR Tensor & index_fill_(Tensor & self, Dimname dim, const Tensor & index, Scalar value); #endif #ifdef BUILD_NAMEDTENSOR Tensor & index_fill_(Tensor & self, Dimname dim, const Tensor & index, const Tensor & value); #endif #ifdef BUILD_NAMEDTENSOR Tensor index_fill(const Tensor & self, Dimname dim, const Tensor & index, Scalar value); #endif #ifdef BUILD_NAMEDTENSOR Tensor index_fill(const Tensor & self, Dimname dim, const Tensor & index, const Tensor & value); #endif Tensor scatter(const Tensor & self, int64_t dim, const Tensor & index, const Tensor & src); Tensor scatter(const Tensor & self, int64_t dim, const Tensor & index, Scalar value); #ifdef BUILD_NAMEDTENSOR Tensor scatter(const Tensor & self, Dimname dim, const Tensor & index, const Tensor & src); #endif #ifdef BUILD_NAMEDTENSOR Tensor scatter(const Tensor & self, Dimname dim, const Tensor & index, Scalar value); #endif Tensor scatter_add(const Tensor & self, int64_t dim, const Tensor & index, const Tensor & src); #ifdef BUILD_NAMEDTENSOR Tensor scatter_add(const Tensor & self, Dimname dim, const Tensor & index, const Tensor & src); #endif Tensor & lt_(Tensor & self, Scalar other); Tensor & lt_(Tensor & self, const Tensor & other); Tensor & gt_(Tensor & self, Scalar other); Tensor & gt_(Tensor & self, const Tensor & other); Tensor & le_(Tensor & self, Scalar other); Tensor & le_(Tensor & self, const Tensor & other); Tensor & ge_(Tensor & self, Scalar other); Tensor & ge_(Tensor & self, const Tensor & other); Tensor & eq_(Tensor & self, Scalar other); Tensor & eq_(Tensor & self, const Tensor & other); Tensor & ne_(Tensor & self, Scalar other); Tensor & ne_(Tensor & self, const Tensor & other); Tensor & atan2_(Tensor & self, const Tensor & other); Tensor & digamma_(Tensor & self); Tensor & polygamma_(Tensor & self, int64_t n); Tensor & addcdiv_(Tensor & self, const Tensor & tensor1, const Tensor & tensor2, Scalar value); Tensor & cross_out(Tensor & out, const Tensor & self, const Tensor & other, c10::optional dim); Tensor cross(const Tensor & self, const Tensor & other, c10::optional dim); Tensor triu(const Tensor & self, int64_t diagonal); Tensor tril(const Tensor & self, int64_t diagonal); #ifdef BUILD_NAMEDTENSOR Tensor & index_select_out(Tensor & out, const Tensor & self, Dimname dim, const Tensor & index); #endif #ifdef BUILD_NAMEDTENSOR Tensor index_select(const Tensor & self, Dimname dim, const Tensor & index); #endif std::vector nonzero_numpy(const Tensor & self); #ifdef BUILD_NAMEDTENSOR Tensor & gather_out(Tensor & out, const Tensor & self, Dimname dim, const Tensor & index, bool sparse_grad); #endif #ifdef BUILD_NAMEDTENSOR Tensor gather(const Tensor & self, Dimname dim, const Tensor & index, bool sparse_grad); #endif Tensor _gather_sparse_backward(const Tensor & self, int64_t dim, const Tensor & index, const Tensor & grad); Tensor & addcmul_out(Tensor & out, const Tensor & self, const Tensor & tensor1, const Tensor & tensor2, Scalar value); Tensor addcmul(const Tensor & self, const Tensor & tensor1, const Tensor & tensor2, Scalar value); Tensor & addcmul_(Tensor & self, const Tensor & tensor1, const Tensor & tensor2, Scalar value); Tensor & addcdiv_out(Tensor & out, const Tensor & self, const Tensor & tensor1, const Tensor & tensor2, Scalar value); Tensor addcdiv(const Tensor & self, const Tensor & tensor1, const Tensor & tensor2, Scalar value); std::tuple triangular_solve_out(Tensor & X, Tensor & M, const Tensor & self, const Tensor & A, bool upper, bool transpose, bool unitriangular); std::tuple triangular_solve(const Tensor & self, const Tensor & A, bool upper, bool transpose, bool unitriangular); std::tuple symeig_out(Tensor & e, Tensor & V, const Tensor & self, bool eigenvectors, bool upper); std::tuple symeig(const Tensor & self, bool eigenvectors, bool upper); std::tuple svd_out(Tensor & U, Tensor & S, Tensor & V, const Tensor & self, bool some, bool compute_uv); std::tuple svd(const Tensor & self, bool some, bool compute_uv); Tensor & cholesky_out(Tensor & out, const Tensor & self, bool upper); Tensor cholesky(const Tensor & self, bool upper); Tensor & cholesky_solve_out(Tensor & out, const Tensor & self, const Tensor & input2, bool upper); Tensor cholesky_solve(const Tensor & self, const Tensor & input2, bool upper); std::tuple solve(const Tensor & self, const Tensor & A); std::tuple solve_out(Tensor & solution, Tensor & lu, const Tensor & self, const Tensor & A); std::tuple qr_out(Tensor & Q, Tensor & R, const Tensor & self, bool some); std::tuple qr(const Tensor & self, bool some); Tensor & lu_solve_out(Tensor & out, const Tensor & self, const Tensor & LU_data, const Tensor & LU_pivots); Tensor lu_solve(const Tensor & self, const Tensor & LU_data, const Tensor & LU_pivots); Tensor & digamma_out(Tensor & out, const Tensor & self); Tensor digamma(const Tensor & self); Tensor & polygamma_out(Tensor & out, int64_t n, const Tensor & self); Tensor polygamma(int64_t n, const Tensor & self); Tensor sign(const Tensor & self); Tensor & sign_(Tensor & self); Tensor & atan2_out(Tensor & out, const Tensor & self, const Tensor & other); Tensor atan2(const Tensor & self, const Tensor & other); #ifdef BUILD_NAMEDTENSOR std::tuple sort_out(Tensor & values, Tensor & indices, const Tensor & self, Dimname dim, bool descending); #endif #ifdef BUILD_NAMEDTENSOR std::tuple sort(const Tensor & self, Dimname dim, bool descending); #endif Tensor argsort(const Tensor & self, int64_t dim, bool descending); #ifdef BUILD_NAMEDTENSOR Tensor argsort(const Tensor & self, Dimname dim, bool descending); #endif Tensor all(const Tensor & self); Tensor any(const Tensor & self); Tensor normal(double mean, double std, IntArrayRef size, Generator * generator, const TensorOptions & options); Tensor & normal_out(Tensor & out, double mean, double std, IntArrayRef size, Generator * generator); Tensor alias(const Tensor & self); Tensor & multilabel_margin_loss_out(Tensor & out, const Tensor & self, const Tensor & target, int64_t reduction); Tensor multilabel_margin_loss(const Tensor & self, const Tensor & target, int64_t reduction); Tensor & nll_loss_out(Tensor & out, const Tensor & self, const Tensor & target, const Tensor & weight, int64_t reduction, int64_t ignore_index); Tensor nll_loss(const Tensor & self, const Tensor & target, const Tensor & weight, int64_t reduction, int64_t ignore_index); Tensor & nll_loss2d_out(Tensor & out, const Tensor & self, const Tensor & target, const Tensor & weight, int64_t reduction, int64_t ignore_index); Tensor nll_loss2d(const Tensor & self, const Tensor & target, const Tensor & weight, int64_t reduction, int64_t ignore_index); Tensor & log_sigmoid_out(Tensor & out, const Tensor & self); Tensor log_sigmoid(const Tensor & self); Tensor adaptive_avg_pool2d(const Tensor & self, IntArrayRef output_size); Tensor & thnn_conv2d_out(Tensor & out, const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const Tensor & bias, IntArrayRef stride, IntArrayRef padding); Tensor thnn_conv2d(const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const Tensor & bias, IntArrayRef stride, IntArrayRef padding); Tensor & thnn_conv_depthwise2d_out(Tensor & out, const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const Tensor & bias, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation); Tensor thnn_conv_depthwise2d(const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const Tensor & bias, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation); Tensor & thnn_conv3d_out(Tensor & out, const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const Tensor & bias, IntArrayRef stride, IntArrayRef padding); Tensor thnn_conv3d(const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const Tensor & bias, IntArrayRef stride, IntArrayRef padding); } // namespace TypeDefault } // namespace at