#pragma once // @generated by aten/src/ATen/gen.py #include #include #include #include #include #include #include #include #include #include #include #include #include #include namespace at { using native::tensor; static inline Tensor _cast_Byte(const Tensor & self, bool non_blocking=false); static inline Tensor _cast_Char(const Tensor & self, bool non_blocking=false); static inline Tensor _cast_Double(const Tensor & self, bool non_blocking=false); static inline Tensor _cast_Float(const Tensor & self, bool non_blocking=false); static inline Tensor _cast_Int(const Tensor & self, bool non_blocking=false); static inline Tensor _cast_Long(const Tensor & self, bool non_blocking=false); static inline Tensor _cast_Short(const Tensor & self, bool non_blocking=false); static inline Tensor _cast_Half(const Tensor & self, bool non_blocking=false); #ifdef BUILD_NAMEDTENSOR static inline std::vector align_tensors(TensorList tensors); #endif static inline std::tuple _cudnn_ctc_loss(const Tensor & log_probs, const Tensor & targets, IntArrayRef input_lengths, IntArrayRef target_lengths, int64_t blank, bool deterministic, bool zero_infinity); static inline Tensor _cudnn_rnn_flatten_weight(TensorList weight_arr, int64_t weight_stride0, int64_t input_size, int64_t mode, int64_t hidden_size, int64_t num_layers, bool batch_first, bool bidirectional); static inline std::tuple _cudnn_rnn(const Tensor & input, TensorList weight, int64_t weight_stride0, const Tensor & weight_buf, const Tensor & hx, const Tensor & cx, int64_t mode, int64_t hidden_size, int64_t num_layers, bool batch_first, double dropout, bool train, bool bidirectional, IntArrayRef batch_sizes, const Tensor & dropout_state); static inline std::tuple> _cudnn_rnn_backward(const Tensor & input, TensorList weight, int64_t weight_stride0, const Tensor & weight_buf, const Tensor & hx, const Tensor & cx, const Tensor & output, const Tensor & grad_output, const Tensor & grad_hy, const Tensor & grad_cy, int64_t mode, int64_t hidden_size, int64_t num_layers, bool batch_first, double dropout, bool train, bool bidirectional, IntArrayRef batch_sizes, const Tensor & dropout_state, const Tensor & reserve, std::array output_mask); static inline Tensor _cudnn_init_dropout_state(double dropout, bool train, int64_t dropout_seed, const TensorOptions & options); static inline int64_t _debug_has_internal_overlap(const Tensor & self); static inline std::tuple _fused_dropout(const Tensor & self, double p, Generator * generator=nullptr); static inline Tensor _masked_scale(const Tensor & self, const Tensor & mask, double scale); static inline std::tuple _sobol_engine_draw(const Tensor & quasi, int64_t n, const Tensor & sobolstate, int64_t dimension, int64_t num_generated, c10::optional dtype); static inline Tensor & _sobol_engine_ff_(Tensor & self, int64_t n, const Tensor & sobolstate, int64_t dimension, int64_t num_generated); static inline Tensor & _sobol_engine_scramble_(Tensor & self, const Tensor & ltm, int64_t dimension); static inline Tensor & _sobol_engine_initialize_state_(Tensor & self, int64_t dimension); static inline Tensor _reshape_from_tensor(const Tensor & self, const Tensor & shape); static inline Tensor _shape_as_tensor(const Tensor & self); static inline Tensor dropout(const Tensor & input, double p, bool train); static inline Tensor & dropout_(Tensor & self, double p, bool train); static inline Tensor feature_dropout(const Tensor & input, double p, bool train); static inline Tensor & feature_dropout_(Tensor & self, double p, bool train); static inline Tensor alpha_dropout(const Tensor & input, double p, bool train); static inline Tensor & alpha_dropout_(Tensor & self, double p, bool train); static inline Tensor feature_alpha_dropout(const Tensor & input, double p, bool train); static inline Tensor & feature_alpha_dropout_(Tensor & self, double p, bool train); static inline Tensor abs(const Tensor & self); static inline Tensor & abs_(Tensor & self); static inline Tensor & abs_out(Tensor & out, const Tensor & self); static inline Tensor acos(const Tensor & self); static inline Tensor & acos_(Tensor & self); static inline Tensor & acos_out(Tensor & out, const Tensor & self); static inline Tensor avg_pool1d(const Tensor & self, IntArrayRef kernel_size, IntArrayRef stride={}, IntArrayRef padding=0, bool ceil_mode=false, bool count_include_pad=true); static inline Tensor adaptive_avg_pool1d(const Tensor & self, IntArrayRef output_size); static inline std::tuple adaptive_max_pool1d(const Tensor & self, IntArrayRef output_size); static inline Tensor add(const Tensor & self, const Tensor & other, Scalar alpha=1); static inline Tensor & add_out(Tensor & out, const Tensor & self, const Tensor & other, Scalar alpha=1); static inline Tensor add(const Tensor & self, Scalar other, Scalar alpha=1); static inline Tensor addmv(const Tensor & self, const Tensor & mat, const Tensor & vec, Scalar beta=1, Scalar alpha=1); static inline Tensor & addmv_(Tensor & self, const Tensor & mat, const Tensor & vec, Scalar beta=1, Scalar alpha=1); static inline Tensor & addmv_out(Tensor & out, const Tensor & self, const Tensor & mat, const Tensor & vec, Scalar beta=1, Scalar alpha=1); static inline Tensor addr(const Tensor & self, const Tensor & vec1, const Tensor & vec2, Scalar beta=1, Scalar alpha=1); static inline Tensor & addr_out(Tensor & out, const Tensor & self, const Tensor & vec1, const Tensor & vec2, Scalar beta=1, Scalar alpha=1); static inline Tensor affine_grid_generator(const Tensor & theta, IntArrayRef size, bool align_corners); static inline Tensor affine_grid_generator_backward(const Tensor & grad, IntArrayRef size, bool align_corners); static inline Tensor all(const Tensor & self, int64_t dim, bool keepdim=false); static inline Tensor & all_out(Tensor & out, const Tensor & self, int64_t dim, bool keepdim=false); #ifdef BUILD_NAMEDTENSOR static inline Tensor all(const Tensor & self, Dimname dim, bool keepdim=false); #endif #ifdef BUILD_NAMEDTENSOR static inline Tensor & all_out(Tensor & out, const Tensor & self, Dimname dim, bool keepdim=false); #endif static inline bool allclose(const Tensor & self, const Tensor & other, double rtol=1e-05, double atol=1e-08, bool equal_nan=false); static inline Tensor any(const Tensor & self, int64_t dim, bool keepdim=false); static inline Tensor & any_out(Tensor & out, const Tensor & self, int64_t dim, bool keepdim=false); #ifdef BUILD_NAMEDTENSOR static inline Tensor any(const Tensor & self, Dimname dim, bool keepdim=false); #endif #ifdef BUILD_NAMEDTENSOR static inline Tensor & any_out(Tensor & out, const Tensor & self, Dimname dim, bool keepdim=false); #endif static inline Tensor arange(Scalar end, const TensorOptions & options={}); static inline Tensor arange(Scalar start, Scalar end, const TensorOptions & options={}); static inline Tensor arange(Scalar start, Scalar end, Scalar step, const TensorOptions & options={}); static inline Tensor & arange_out(Tensor & out, Scalar end); static inline Tensor & arange_out(Tensor & out, Scalar start, Scalar end, Scalar step=1); static inline Tensor _dim_arange(const Tensor & like, int64_t dim); static inline Tensor argmax(const Tensor & self, c10::optional dim=c10::nullopt, bool keepdim=false); static inline Tensor argmin(const Tensor & self, c10::optional dim=c10::nullopt, bool keepdim=false); static inline Tensor as_strided(const Tensor & self, IntArrayRef size, IntArrayRef stride, c10::optional storage_offset=c10::nullopt); static inline Tensor & as_strided_(Tensor & self, IntArrayRef size, IntArrayRef stride, c10::optional storage_offset=c10::nullopt); static inline Tensor asin(const Tensor & self); static inline Tensor & asin_(Tensor & self); static inline Tensor & asin_out(Tensor & out, const Tensor & self); static inline Tensor atan(const Tensor & self); static inline Tensor & atan_(Tensor & self); static inline Tensor & atan_out(Tensor & out, const Tensor & self); static inline Tensor baddbmm(const Tensor & self, const Tensor & batch1, const Tensor & batch2, Scalar beta=1, Scalar alpha=1); static inline Tensor & _baddbmm_mkl_(Tensor & self, const Tensor & batch1, const Tensor & batch2, Scalar beta=1, Scalar alpha=1); static inline Tensor & baddbmm_out(Tensor & out, const Tensor & self, const Tensor & batch1, const Tensor & batch2, Scalar beta=1, Scalar alpha=1); static inline Tensor bartlett_window(int64_t window_length, const TensorOptions & options={}); static inline Tensor bartlett_window(int64_t window_length, bool periodic, const TensorOptions & options={}); static inline Tensor batch_norm(const Tensor & input, const Tensor & weight, const Tensor & bias, const Tensor & running_mean, const Tensor & running_var, bool training, double momentum, double eps, bool cudnn_enabled); static inline std::tuple _batch_norm_impl_index(const Tensor & input, const Tensor & weight, const Tensor & bias, const Tensor & running_mean, const Tensor & running_var, bool training, double momentum, double eps, bool cudnn_enabled); static inline std::tuple _batch_norm_impl_index_backward(int64_t impl_index, const Tensor & input, const Tensor & grad_output, const Tensor & weight, const Tensor & running_mean, const Tensor & running_var, const Tensor & save_mean, const Tensor & save_var_transform, bool train, double eps, std::array output_mask); static inline Tensor bernoulli(const Tensor & self, Generator * generator=nullptr); static inline Tensor & bernoulli_out(Tensor & out, const Tensor & self, Generator * generator=nullptr); static inline Tensor bernoulli(const Tensor & self, double p, Generator * generator=nullptr); static inline Tensor bilinear(const Tensor & input1, const Tensor & input2, const Tensor & weight, const Tensor & bias); static inline Tensor binary_cross_entropy_with_logits(const Tensor & self, const Tensor & target, const Tensor & weight={}, const Tensor & pos_weight={}, int64_t reduction=Reduction::Mean); static inline Tensor binary_cross_entropy_with_logits_backward(const Tensor & grad_output, const Tensor & self, const Tensor & target, const Tensor & weight={}, const Tensor & pos_weight={}, int64_t reduction=Reduction::Mean); static inline Tensor bincount(const Tensor & self, const Tensor & weights={}, int64_t minlength=0); static inline Tensor bitwise_not(const Tensor & self); static inline Tensor & bitwise_not_out(Tensor & out, const Tensor & self); static inline Tensor logical_not(const Tensor & self); static inline Tensor & logical_not_out(Tensor & out, const Tensor & self); static inline Tensor logical_xor(const Tensor & self, const Tensor & other); static inline Tensor & logical_xor_out(Tensor & out, const Tensor & self, const Tensor & other); static inline Tensor blackman_window(int64_t window_length, const TensorOptions & options={}); static inline Tensor blackman_window(int64_t window_length, bool periodic, const TensorOptions & options={}); static inline Tensor bmm(const Tensor & self, const Tensor & mat2); static inline Tensor & bmm_out(Tensor & out, const Tensor & self, const Tensor & mat2); static inline std::vector broadcast_tensors(TensorList tensors); static inline Tensor cat(TensorList tensors, int64_t dim=0); static inline Tensor & cat_out(Tensor & out, TensorList tensors, int64_t dim=0); #ifdef BUILD_NAMEDTENSOR static inline Tensor cat(TensorList tensors, Dimname dim); #endif #ifdef BUILD_NAMEDTENSOR static inline Tensor & cat_out(Tensor & out, TensorList tensors, Dimname dim); #endif static inline Tensor ceil(const Tensor & self); static inline Tensor & ceil_(Tensor & self); static inline Tensor & ceil_out(Tensor & out, const Tensor & self); static inline Tensor chain_matmul(TensorList matrices); static inline std::vector chunk(const Tensor & self, int64_t chunks, int64_t dim=0); static inline Tensor clamp(const Tensor & self, c10::optional min=c10::nullopt, c10::optional max=c10::nullopt); static inline Tensor & clamp_(Tensor & self, c10::optional min=c10::nullopt, c10::optional max=c10::nullopt); static inline Tensor & clamp_out(Tensor & out, const Tensor & self, c10::optional min=c10::nullopt, c10::optional max=c10::nullopt); static inline Tensor clamp_max(const Tensor & self, Scalar max); static inline Tensor & clamp_max_(Tensor & self, Scalar max); static inline Tensor & clamp_max_out(Tensor & out, const Tensor & self, Scalar max); static inline Tensor clamp_min(const Tensor & self, Scalar min); static inline Tensor & clamp_min_(Tensor & self, Scalar min); static inline Tensor & clamp_min_out(Tensor & out, const Tensor & self, Scalar min); static inline bool cudnn_is_acceptable(const Tensor & self); static inline Tensor constant_pad_nd(const Tensor & self, IntArrayRef pad, Scalar value=0); static inline Tensor convolution(const Tensor & input, const Tensor & weight, const Tensor & bias, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, bool transposed, IntArrayRef output_padding, int64_t groups); static inline Tensor convolution_overrideable(const Tensor & input, const Tensor & weight, const Tensor & bias, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, bool transposed, IntArrayRef output_padding, int64_t groups); static inline std::tuple convolution_backward_overrideable(const Tensor & grad_output, const Tensor & input, const Tensor & weight, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, bool transposed, IntArrayRef output_padding, int64_t groups, std::array output_mask); static inline Tensor _convolution(const Tensor & input, const Tensor & weight, const Tensor & bias, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, bool transposed, IntArrayRef output_padding, int64_t groups, bool benchmark, bool deterministic, bool cudnn_enabled); static inline Tensor _convolution_nogroup(const Tensor & input, const Tensor & weight, const Tensor & bias, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, bool transposed, IntArrayRef output_padding); static inline std::tuple _convolution_double_backward(const Tensor & ggI, const Tensor & ggW, const Tensor & ggb, const Tensor & gO, const Tensor & weight, const Tensor & self, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, bool transposed, IntArrayRef output_padding, int64_t groups, bool benchmark, bool deterministic, bool cudnn_enabled, std::array output_mask); static inline Tensor conv1d(const Tensor & input, const Tensor & weight, const Tensor & bias={}, IntArrayRef stride=1, IntArrayRef padding=0, IntArrayRef dilation=1, int64_t groups=1); static inline Tensor conv2d(const Tensor & input, const Tensor & weight, const Tensor & bias={}, IntArrayRef stride=1, IntArrayRef padding=0, IntArrayRef dilation=1, int64_t groups=1); static inline Tensor conv3d(const Tensor & input, const Tensor & weight, const Tensor & bias={}, IntArrayRef stride=1, IntArrayRef padding=0, IntArrayRef dilation=1, int64_t groups=1); static inline Tensor conv_tbc(const Tensor & self, const Tensor & weight, const Tensor & bias, int64_t pad=0); static inline std::tuple conv_tbc_backward(const Tensor & self, const Tensor & input, const Tensor & weight, const Tensor & bias, int64_t pad); static inline Tensor conv_transpose1d(const Tensor & input, const Tensor & weight, const Tensor & bias={}, IntArrayRef stride=1, IntArrayRef padding=0, IntArrayRef output_padding=0, int64_t groups=1, IntArrayRef dilation=1); static inline Tensor conv_transpose2d(const Tensor & input, const Tensor & weight, const Tensor & bias={}, IntArrayRef stride=1, IntArrayRef padding=0, IntArrayRef output_padding=0, int64_t groups=1, IntArrayRef dilation=1); static inline Tensor conv_transpose3d(const Tensor & input, const Tensor & weight, const Tensor & bias={}, IntArrayRef stride=1, IntArrayRef padding=0, IntArrayRef output_padding=0, int64_t groups=1, IntArrayRef dilation=1); static inline Tensor _copy_from(const Tensor & self, const Tensor & dst, bool non_blocking=false); static inline Tensor cos(const Tensor & self); static inline Tensor & cos_(Tensor & self); static inline Tensor & cos_out(Tensor & out, const Tensor & self); static inline Tensor cosh(const Tensor & self); static inline Tensor & cosh_(Tensor & self); static inline Tensor & cosh_out(Tensor & out, const Tensor & self); static inline Tensor cosine_embedding_loss(const Tensor & input1, const Tensor & input2, const Tensor & target, double margin=0.0, int64_t reduction=Reduction::Mean); static inline Tensor cudnn_affine_grid_generator(const Tensor & theta, int64_t N, int64_t C, int64_t H, int64_t W); static inline Tensor cudnn_affine_grid_generator_backward(const Tensor & grad, int64_t N, int64_t C, int64_t H, int64_t W); static inline std::tuple cudnn_batch_norm(const Tensor & input, const Tensor & weight, const Tensor & bias, const Tensor & running_mean, const Tensor & running_var, bool training, double exponential_average_factor, double epsilon); static inline std::tuple cudnn_batch_norm_backward(const Tensor & input, const Tensor & grad_output, const Tensor & weight, const Tensor & running_mean, const Tensor & running_var, const Tensor & save_mean, const Tensor & save_var, double epsilon); static inline Tensor cudnn_convolution(const Tensor & self, const Tensor & weight, const Tensor & bias, IntArrayRef padding, IntArrayRef stride, IntArrayRef dilation, int64_t groups, bool benchmark, bool deterministic); static inline Tensor cudnn_convolution_backward_input(IntArrayRef self_size, const Tensor & grad_output, const Tensor & weight, IntArrayRef padding, IntArrayRef stride, IntArrayRef dilation, int64_t groups, bool benchmark, bool deterministic); static inline std::tuple cudnn_convolution_backward(const Tensor & self, const Tensor & grad_output, const Tensor & weight, IntArrayRef padding, IntArrayRef stride, IntArrayRef dilation, int64_t groups, bool benchmark, bool deterministic, std::array output_mask); static inline Tensor cudnn_convolution_backward_bias(const Tensor & grad_output); static inline Tensor cudnn_convolution_backward_weight(IntArrayRef weight_size, const Tensor & grad_output, const Tensor & self, IntArrayRef padding, IntArrayRef stride, IntArrayRef dilation, int64_t groups, bool benchmark, bool deterministic); static inline Tensor cudnn_convolution_transpose(const Tensor & self, const Tensor & weight, const Tensor & bias, IntArrayRef padding, IntArrayRef output_padding, IntArrayRef stride, IntArrayRef dilation, int64_t groups, bool benchmark, bool deterministic); static inline std::tuple cudnn_convolution_transpose_backward(const Tensor & self, const Tensor & grad_output, const Tensor & weight, IntArrayRef padding, IntArrayRef output_padding, IntArrayRef stride, IntArrayRef dilation, int64_t groups, bool benchmark, bool deterministic, std::array output_mask); static inline Tensor cudnn_convolution_transpose_backward_bias(const Tensor & grad_output); static inline Tensor cudnn_convolution_transpose_backward_input(const Tensor & grad_output, const Tensor & weight, IntArrayRef padding, IntArrayRef stride, IntArrayRef dilation, int64_t groups, bool benchmark, bool deterministic); static inline Tensor cudnn_convolution_transpose_backward_weight(IntArrayRef weight_size, const Tensor & grad_output, const Tensor & self, IntArrayRef padding, IntArrayRef stride, IntArrayRef dilation, int64_t groups, bool benchmark, bool deterministic); static inline Tensor cudnn_grid_sampler(const Tensor & self, const Tensor & grid); static inline std::tuple cudnn_grid_sampler_backward(const Tensor & self, const Tensor & grid, const Tensor & grad_output); static inline Tensor cumsum(const Tensor & self, int64_t dim, c10::optional dtype=c10::nullopt); static inline Tensor & cumsum_out(Tensor & out, const Tensor & self, int64_t dim, c10::optional dtype=c10::nullopt); #ifdef BUILD_NAMEDTENSOR static inline Tensor cumsum(const Tensor & self, Dimname dim, c10::optional dtype=c10::nullopt); #endif #ifdef BUILD_NAMEDTENSOR static inline Tensor & cumsum_out(Tensor & out, const Tensor & self, Dimname dim, c10::optional dtype=c10::nullopt); #endif static inline Tensor cumprod(const Tensor & self, int64_t dim, c10::optional dtype=c10::nullopt); static inline Tensor & cumprod_out(Tensor & out, const Tensor & self, int64_t dim, c10::optional dtype=c10::nullopt); #ifdef BUILD_NAMEDTENSOR static inline Tensor cumprod(const Tensor & self, Dimname dim, c10::optional dtype=c10::nullopt); #endif #ifdef BUILD_NAMEDTENSOR static inline Tensor & cumprod_out(Tensor & out, const Tensor & self, Dimname dim, c10::optional dtype=c10::nullopt); #endif static inline Tensor ctc_loss(const Tensor & log_probs, const Tensor & targets, IntArrayRef input_lengths, IntArrayRef target_lengths, int64_t blank=0, int64_t reduction=Reduction::Mean, bool zero_infinity=false); static inline Tensor ctc_loss(const Tensor & log_probs, const Tensor & targets, const Tensor & input_lengths, const Tensor & target_lengths, int64_t blank=0, int64_t reduction=Reduction::Mean, bool zero_infinity=false); static inline std::tuple _ctc_loss(const Tensor & log_probs, const Tensor & targets, IntArrayRef input_lengths, IntArrayRef target_lengths, int64_t blank=0, bool zero_infinity=false); static inline Tensor _ctc_loss_backward(const Tensor & grad, const Tensor & log_probs, const Tensor & targets, IntArrayRef input_lengths, IntArrayRef target_lengths, const Tensor & neg_log_likelihood, const Tensor & log_alpha, int64_t blank, bool zero_infinity=false); static inline Tensor det(const Tensor & self); static inline Tensor diag_embed(const Tensor & self, int64_t offset=0, int64_t dim1=-2, int64_t dim2=-1); static inline Tensor diagflat(const Tensor & self, int64_t offset=0); static inline Tensor diagonal(const Tensor & self, int64_t offset=0, int64_t dim1=0, int64_t dim2=1); static inline Tensor div(const Tensor & self, const Tensor & other); static inline Tensor & div_out(Tensor & out, const Tensor & self, const Tensor & other); static inline Tensor div(const Tensor & self, Scalar other); static inline Tensor dot(const Tensor & self, const Tensor & tensor); static inline Tensor & dot_out(Tensor & out, const Tensor & self, const Tensor & tensor); static inline Tensor einsum(std::string equation, TensorList tensors); static inline Tensor embedding(const Tensor & weight, const Tensor & indices, int64_t padding_idx=-1, bool scale_grad_by_freq=false, bool sparse=false); static inline Tensor embedding_backward(const Tensor & grad, const Tensor & indices, int64_t num_weights, int64_t padding_idx, bool scale_grad_by_freq, bool sparse); static inline Tensor embedding_dense_backward(const Tensor & grad_output, const Tensor & indices, int64_t num_weights, int64_t padding_idx, bool scale_grad_by_freq); static inline Tensor & embedding_renorm_(Tensor & self, const Tensor & indices, double max_norm, double norm_type); static inline Tensor embedding_sparse_backward(const Tensor & grad, const Tensor & indices, int64_t num_weights, int64_t padding_idx, bool scale_grad_by_freq); static inline std::tuple embedding_bag(const Tensor & weight, const Tensor & indices, const Tensor & offsets, bool scale_grad_by_freq=false, int64_t mode=0, bool sparse=false, const Tensor & per_sample_weights={}); static inline std::tuple _embedding_bag(const Tensor & weight, const Tensor & indices, const Tensor & offsets, bool scale_grad_by_freq=false, int64_t mode=0, bool sparse=false, const Tensor & per_sample_weights={}); static inline Tensor _embedding_bag_backward(const Tensor & grad, const Tensor & indices, const Tensor & offsets, const Tensor & offset2bag, const Tensor & bag_size, const Tensor & maximum_indices, int64_t num_weights, bool scale_grad_by_freq, int64_t mode, bool sparse, const Tensor & per_sample_weights); static inline Tensor _embedding_bag_sparse_backward(const Tensor & grad, const Tensor & indices, const Tensor & offsets, const Tensor & offset2bag, const Tensor & bag_size, int64_t num_weights, bool scale_grad_by_freq, int64_t mode, const Tensor & per_sample_weights); static inline Tensor _embedding_bag_dense_backward(const Tensor & grad, const Tensor & indices, const Tensor & offsets, const Tensor & offset2bag, const Tensor & bag_size, const Tensor & maximum_indices, int64_t num_weights, bool scale_grad_by_freq, int64_t mode, const Tensor & per_sample_weights); static inline Tensor _embedding_bag_per_sample_weights_backward(const Tensor & grad, const Tensor & weight, const Tensor & indices, const Tensor & offsets, const Tensor & offset2bag, int64_t mode); #ifdef BUILD_NAMEDTENSOR static inline Tensor empty(IntArrayRef size, c10::optional names, const TensorOptions & options={}, c10::optional memory_format=c10::nullopt); #endif static inline Tensor empty(IntArrayRef size, const TensorOptions & options={}, c10::optional memory_format=c10::nullopt); static inline Tensor _empty_affine_quantized(IntArrayRef size, const TensorOptions & options={}, double scale=1, int64_t zero_point=0, c10::optional memory_format=MemoryFormat::Contiguous); static inline Tensor _empty_per_channel_affine_quantized(IntArrayRef size, const Tensor & scales, const Tensor & zero_points, int64_t axis, const TensorOptions & options={}, c10::optional memory_format=MemoryFormat::Contiguous); static inline Tensor & empty_out(Tensor & out, IntArrayRef size, c10::optional memory_format=c10::nullopt); static inline Tensor empty_like(const Tensor & self); static inline Tensor empty_like(const Tensor & self, const TensorOptions & options, c10::optional memory_format=MemoryFormat::Contiguous); static inline Tensor empty_strided(IntArrayRef size, IntArrayRef stride, const TensorOptions & options={}); static inline Tensor erf(const Tensor & self); static inline Tensor & erf_(Tensor & self); static inline Tensor & erf_out(Tensor & out, const Tensor & self); static inline Tensor erfc(const Tensor & self); static inline Tensor & erfc_(Tensor & self); static inline Tensor & erfc_out(Tensor & out, const Tensor & self); static inline Tensor exp(const Tensor & self); static inline Tensor & exp_(Tensor & self); static inline Tensor & exp_out(Tensor & out, const Tensor & self); static inline Tensor expm1(const Tensor & self); static inline Tensor & expm1_(Tensor & self); static inline Tensor & expm1_out(Tensor & out, const Tensor & self); static inline Tensor eye(int64_t n, const TensorOptions & options={}); static inline Tensor eye(int64_t n, int64_t m, const TensorOptions & options={}); static inline Tensor & eye_out(Tensor & out, int64_t n); static inline Tensor & eye_out(Tensor & out, int64_t n, int64_t m); static inline Tensor flatten(const Tensor & self, int64_t start_dim=0, int64_t end_dim=-1); #ifdef BUILD_NAMEDTENSOR static inline Tensor flatten(const Tensor & self, int64_t start_dim, int64_t end_dim, Dimname out_dim); #endif #ifdef BUILD_NAMEDTENSOR static inline Tensor flatten(const Tensor & self, Dimname start_dim, Dimname end_dim, Dimname out_dim); #endif #ifdef BUILD_NAMEDTENSOR static inline Tensor flatten(const Tensor & self, DimnameList dims, Dimname out_dim); #endif static inline Tensor & fill_(Tensor & self, Scalar value); static inline Tensor & fill_(Tensor & self, const Tensor & value); static inline Tensor floor(const Tensor & self); static inline Tensor & floor_(Tensor & self); static inline Tensor & floor_out(Tensor & out, const Tensor & self); static inline Tensor frac(const Tensor & self); static inline Tensor & frac_(Tensor & self); static inline Tensor & frac_out(Tensor & out, const Tensor & self); #ifdef BUILD_NAMEDTENSOR static inline Tensor full(IntArrayRef size, Scalar fill_value, c10::optional names, const TensorOptions & options={}); #endif static inline Tensor full(IntArrayRef size, Scalar fill_value, const TensorOptions & options={}); static inline Tensor & full_out(Tensor & out, IntArrayRef size, Scalar fill_value); static inline Tensor full_like(const Tensor & self, Scalar fill_value); static inline Tensor full_like(const Tensor & self, Scalar fill_value, const TensorOptions & options); static inline Tensor from_file(std::string filename, c10::optional shared=c10::nullopt, c10::optional size=0, const TensorOptions & options={}); static inline Tensor grid_sampler(const Tensor & input, const Tensor & grid, int64_t interpolation_mode, int64_t padding_mode, bool align_corners); static inline Tensor grid_sampler_2d(const Tensor & input, const Tensor & grid, int64_t interpolation_mode, int64_t padding_mode, bool align_corners); static inline std::tuple grid_sampler_2d_backward(const Tensor & grad_output, const Tensor & input, const Tensor & grid, int64_t interpolation_mode, int64_t padding_mode, bool align_corners); static inline Tensor grid_sampler_3d(const Tensor & input, const Tensor & grid, int64_t interpolation_mode, int64_t padding_mode, bool align_corners); static inline std::tuple grid_sampler_3d_backward(const Tensor & grad_output, const Tensor & input, const Tensor & grid, int64_t interpolation_mode, int64_t padding_mode, bool align_corners); static inline Tensor hann_window(int64_t window_length, const TensorOptions & options={}); static inline Tensor hann_window(int64_t window_length, bool periodic, const TensorOptions & options={}); static inline Tensor hamming_window(int64_t window_length, const TensorOptions & options={}); static inline Tensor hamming_window(int64_t window_length, bool periodic, const TensorOptions & options={}); static inline Tensor hamming_window(int64_t window_length, bool periodic, double alpha, const TensorOptions & options={}); static inline Tensor hamming_window(int64_t window_length, bool periodic, double alpha, double beta, const TensorOptions & options={}); static inline Tensor hinge_embedding_loss(const Tensor & self, const Tensor & target, double margin=1.0, int64_t reduction=Reduction::Mean); static inline Tensor ger(const Tensor & self, const Tensor & vec2); static inline Tensor & ger_out(Tensor & out, const Tensor & self, const Tensor & vec2); static inline Tensor group_norm(const Tensor & input, int64_t num_groups, const Tensor & weight={}, const Tensor & bias={}, double eps=1e-05, bool cudnn_enabled=true); static inline Tensor fft(const Tensor & self, int64_t signal_ndim, bool normalized=false); static inline Tensor ifft(const Tensor & self, int64_t signal_ndim, bool normalized=false); static inline Tensor rfft(const Tensor & self, int64_t signal_ndim, bool normalized=false, bool onesided=true); static inline Tensor irfft(const Tensor & self, int64_t signal_ndim, bool normalized=false, bool onesided=true, IntArrayRef signal_sizes={}); static inline Tensor _fft_with_size(const Tensor & self, int64_t signal_ndim, bool complex_input, bool complex_output, bool inverse, IntArrayRef checked_signal_sizes, bool normalized, bool onesided, IntArrayRef output_sizes); static inline int64_t _cufft_get_plan_cache_size(int64_t device_index); static inline int64_t _cufft_get_plan_cache_max_size(int64_t device_index); static inline void _cufft_set_plan_cache_max_size(int64_t device_index, int64_t max_size); static inline void _cufft_clear_plan_cache(int64_t device_index); static inline Tensor index(const Tensor & self, TensorList indices); static inline Tensor index_copy(const Tensor & self, int64_t dim, const Tensor & index, const Tensor & source); #ifdef BUILD_NAMEDTENSOR static inline Tensor index_copy(const Tensor & self, Dimname dim, const Tensor & index, const Tensor & source); #endif static inline Tensor & index_put_(Tensor & self, TensorList indices, const Tensor & values, bool accumulate=false); static inline Tensor index_put(const Tensor & self, TensorList indices, const Tensor & values, bool accumulate=false); static inline Tensor & _index_put_impl_(Tensor & self, TensorList indices, const Tensor & values, bool accumulate=false, bool unsafe=false); static inline Tensor instance_norm(const Tensor & input, const Tensor & weight, const Tensor & bias, const Tensor & running_mean, const Tensor & running_var, bool use_input_stats, double momentum, double eps, bool cudnn_enabled); static inline Tensor inverse(const Tensor & self); static inline Tensor & inverse_out(Tensor & out, const Tensor & self); static inline Tensor _inverse_helper(const Tensor & self); static inline Tensor isclose(const Tensor & self, const Tensor & other, double rtol=1e-05, double atol=1e-08, bool equal_nan=false); static inline Tensor isnan(const Tensor & self); static inline bool is_distributed(const Tensor & self); static inline bool is_floating_point(const Tensor & self); static inline bool is_complex(const Tensor & self); static inline bool is_nonzero(const Tensor & self); static inline bool is_same_size(const Tensor & self, const Tensor & other); static inline bool is_signed(const Tensor & self); static inline Tensor kl_div(const Tensor & self, const Tensor & target, int64_t reduction=Reduction::Mean); static inline Tensor kl_div_backward(const Tensor & grad_output, const Tensor & self, const Tensor & target, int64_t reduction=Reduction::Mean); static inline std::tuple kthvalue(const Tensor & self, int64_t k, int64_t dim=-1, bool keepdim=false); static inline std::tuple kthvalue_out(Tensor & values, Tensor & indices, const Tensor & self, int64_t k, int64_t dim=-1, bool keepdim=false); #ifdef BUILD_NAMEDTENSOR static inline std::tuple kthvalue(const Tensor & self, int64_t k, Dimname dim, bool keepdim=false); #endif #ifdef BUILD_NAMEDTENSOR static inline std::tuple kthvalue_out(Tensor & values, Tensor & indices, const Tensor & self, int64_t k, Dimname dim, bool keepdim=false); #endif static inline Tensor layer_norm(const Tensor & input, IntArrayRef normalized_shape, const Tensor & weight={}, const Tensor & bias={}, double eps=1e-05, bool cudnn_enable=true); static inline std::tuple native_layer_norm(const Tensor & input, const Tensor & weight, const Tensor & bias, int64_t M, int64_t N, double eps); static inline std::tuple native_layer_norm_backward(const Tensor & grad_out, const Tensor & input, const Tensor & mean, const Tensor & rstd, const Tensor & weight, int64_t M, int64_t N, std::array output_mask); static inline std::tuple native_layer_norm_double_backward(const Tensor & ggI, const Tensor & ggW, const Tensor & ggb, const Tensor & gO, const Tensor & input, const Tensor & mean, const Tensor & rstd, const Tensor & weight, int64_t M, int64_t N, std::array output_mask); static inline Tensor linear(const Tensor & input, const Tensor & weight, const Tensor & bias={}); static inline Tensor mkldnn_linear(const Tensor & input, const Tensor & weight, const Tensor & bias={}); static inline Tensor fbgemm_linear_int8_weight_fp32_activation(const Tensor & input, const Tensor & weight, const Tensor & packed, const Tensor & col_offsets, Scalar weight_scale, Scalar weight_zero_point, const Tensor & bias); static inline Tensor fbgemm_linear_int8_weight(const Tensor & input, const Tensor & weight, const Tensor & packed, const Tensor & col_offsets, Scalar weight_scale, Scalar weight_zero_point, const Tensor & bias); static inline std::tuple fbgemm_linear_quantize_weight(const Tensor & input); static inline Tensor fbgemm_pack_gemm_matrix_fp16(const Tensor & input); static inline Tensor fbgemm_linear_fp16_weight_fp32_activation(const Tensor & input, const Tensor & packed_weight, const Tensor & bias); static inline Tensor fbgemm_linear_fp16_weight(const Tensor & input, const Tensor & packed_weight, const Tensor & bias); static inline Tensor fbgemm_pack_quantized_matrix(const Tensor & input); static inline Tensor fbgemm_pack_quantized_matrix(const Tensor & input, int64_t K, int64_t N); static inline Tensor linspace(Scalar start, Scalar end, int64_t steps=100, const TensorOptions & options={}); static inline Tensor & linspace_out(Tensor & out, Scalar start, Scalar end, int64_t steps=100); static inline Tensor log(const Tensor & self); static inline Tensor & log_(Tensor & self); static inline Tensor & log_out(Tensor & out, const Tensor & self); static inline Tensor log10(const Tensor & self); static inline Tensor & log10_(Tensor & self); static inline Tensor & log10_out(Tensor & out, const Tensor & self); static inline Tensor log1p(const Tensor & self); static inline Tensor & log1p_(Tensor & self); static inline Tensor & log1p_out(Tensor & out, const Tensor & self); static inline Tensor log2(const Tensor & self); static inline Tensor & log2_(Tensor & self); static inline Tensor & log2_out(Tensor & out, const Tensor & self); static inline Tensor logdet(const Tensor & self); static inline Tensor logspace(Scalar start, Scalar end, int64_t steps=100, double base=10.0, const TensorOptions & options={}); static inline Tensor & logspace_out(Tensor & out, Scalar start, Scalar end, int64_t steps=100, double base=10.0); static inline Tensor log_softmax(const Tensor & self, int64_t dim, c10::optional dtype=c10::nullopt); #ifdef BUILD_NAMEDTENSOR static inline Tensor log_softmax(const Tensor & self, Dimname dim, c10::optional dtype=c10::nullopt); #endif static inline Tensor _log_softmax(const Tensor & self, int64_t dim, bool half_to_float); static inline Tensor _log_softmax_backward_data(const Tensor & grad_output, const Tensor & output, int64_t dim, const Tensor & self); static inline Tensor logsumexp(const Tensor & self, IntArrayRef dim, bool keepdim=false); static inline Tensor & logsumexp_out(Tensor & out, const Tensor & self, IntArrayRef dim, bool keepdim=false); #ifdef BUILD_NAMEDTENSOR static inline Tensor logsumexp(const Tensor & self, DimnameList dim, bool keepdim=false); #endif #ifdef BUILD_NAMEDTENSOR static inline Tensor & logsumexp_out(Tensor & out, const Tensor & self, DimnameList dim, bool keepdim=false); #endif static inline Tensor margin_ranking_loss(const Tensor & input1, const Tensor & input2, const Tensor & target, double margin=0.0, int64_t reduction=Reduction::Mean); static inline Tensor matmul(const Tensor & self, const Tensor & other); static inline Tensor & matmul_out(Tensor & out, const Tensor & self, const Tensor & other); static inline Tensor matrix_rank(const Tensor & self, double tol, bool symmetric=false); static inline Tensor matrix_rank(const Tensor & self, bool symmetric=false); static inline Tensor matrix_power(const Tensor & self, int64_t n); static inline std::tuple max(const Tensor & self, int64_t dim, bool keepdim=false); static inline std::tuple max_out(Tensor & max, Tensor & max_values, const Tensor & self, int64_t dim, bool keepdim=false); static inline Tensor max_values(const Tensor & self, IntArrayRef dim, bool keepdim=false); #ifdef BUILD_NAMEDTENSOR static inline std::tuple max(const Tensor & self, Dimname dim, bool keepdim=false); #endif #ifdef BUILD_NAMEDTENSOR static inline std::tuple max_out(Tensor & max, Tensor & max_values, const Tensor & self, Dimname dim, bool keepdim=false); #endif #ifdef BUILD_NAMEDTENSOR static inline Tensor max_values(const Tensor & self, DimnameList dim, bool keepdim=false); #endif static inline std::tuple max_pool1d_with_indices(const Tensor & self, IntArrayRef kernel_size, IntArrayRef stride={}, IntArrayRef padding=0, IntArrayRef dilation=1, bool ceil_mode=false); static inline Tensor max_pool1d(const Tensor & self, IntArrayRef kernel_size, IntArrayRef stride={}, IntArrayRef padding=0, IntArrayRef dilation=1, bool ceil_mode=false); static inline Tensor max_pool2d(const Tensor & self, IntArrayRef kernel_size, IntArrayRef stride={}, IntArrayRef padding=0, IntArrayRef dilation=1, bool ceil_mode=false); static inline Tensor mkldnn_max_pool2d(const Tensor & self, IntArrayRef kernel_size, IntArrayRef stride={}, IntArrayRef padding=0, IntArrayRef dilation=1, bool ceil_mode=false); static inline Tensor quantized_max_pool2d(const Tensor & self, IntArrayRef kernel_size, IntArrayRef stride={}, IntArrayRef padding=0, IntArrayRef dilation=1, bool ceil_mode=false); static inline Tensor max_pool3d(const Tensor & self, IntArrayRef kernel_size, IntArrayRef stride={}, IntArrayRef padding=0, IntArrayRef dilation=1, bool ceil_mode=false); static inline Tensor mean(const Tensor & self, c10::optional dtype=c10::nullopt); static inline Tensor mean(const Tensor & self, IntArrayRef dim, bool keepdim=false, c10::optional dtype=c10::nullopt); static inline Tensor & mean_out(Tensor & out, const Tensor & self, IntArrayRef dim, bool keepdim=false, c10::optional dtype=c10::nullopt); #ifdef BUILD_NAMEDTENSOR static inline Tensor mean(const Tensor & self, DimnameList dim, bool keepdim=false, c10::optional dtype=c10::nullopt); #endif #ifdef BUILD_NAMEDTENSOR static inline Tensor & mean_out(Tensor & out, const Tensor & self, DimnameList dim, bool keepdim=false, c10::optional dtype=c10::nullopt); #endif static inline std::tuple median(const Tensor & self, int64_t dim, bool keepdim=false); static inline std::tuple median_out(Tensor & values, Tensor & indices, const Tensor & self, int64_t dim, bool keepdim=false); #ifdef BUILD_NAMEDTENSOR static inline std::tuple median(const Tensor & self, Dimname dim, bool keepdim=false); #endif #ifdef BUILD_NAMEDTENSOR static inline std::tuple median_out(Tensor & values, Tensor & indices, const Tensor & self, Dimname dim, bool keepdim=false); #endif static inline std::tuple min(const Tensor & self, int64_t dim, bool keepdim=false); static inline std::tuple min_out(Tensor & min, Tensor & min_indices, const Tensor & self, int64_t dim, bool keepdim=false); static inline Tensor min_values(const Tensor & self, IntArrayRef dim, bool keepdim=false); #ifdef BUILD_NAMEDTENSOR static inline std::tuple min(const Tensor & self, Dimname dim, bool keepdim=false); #endif #ifdef BUILD_NAMEDTENSOR static inline std::tuple min_out(Tensor & min, Tensor & min_indices, const Tensor & self, Dimname dim, bool keepdim=false); #endif #ifdef BUILD_NAMEDTENSOR static inline Tensor min_values(const Tensor & self, DimnameList dim, bool keepdim=false); #endif static inline Tensor mkldnn_convolution(const Tensor & self, const Tensor & weight, const Tensor & bias, IntArrayRef padding, IntArrayRef stride, IntArrayRef dilation, int64_t groups); static inline Tensor mkldnn_convolution_backward_input(IntArrayRef self_size, const Tensor & grad_output, const Tensor & weight, IntArrayRef padding, IntArrayRef stride, IntArrayRef dilation, int64_t groups, bool bias_defined); static inline std::tuple mkldnn_convolution_backward_weights(IntArrayRef weight_size, const Tensor & grad_output, const Tensor & self, IntArrayRef padding, IntArrayRef stride, IntArrayRef dilation, int64_t groups, bool bias_defined); static inline std::tuple mkldnn_convolution_backward(const Tensor & self, const Tensor & grad_output, const Tensor & weight, IntArrayRef padding, IntArrayRef stride, IntArrayRef dilation, int64_t groups, std::array output_mask); static inline std::tuple miopen_batch_norm(const Tensor & input, const Tensor & weight, const Tensor & bias, const Tensor & running_mean, const Tensor & running_var, bool training, double exponential_average_factor, double epsilon); static inline std::tuple miopen_batch_norm_backward(const Tensor & input, const Tensor & grad_output, const Tensor & weight, const Tensor & running_mean, const Tensor & running_var, const Tensor & save_mean, const Tensor & save_var, double epsilon); static inline Tensor miopen_convolution(const Tensor & self, const Tensor & weight, const Tensor & bias, IntArrayRef padding, IntArrayRef stride, IntArrayRef dilation, int64_t groups, bool benchmark, bool deterministic); static inline Tensor miopen_convolution_backward_input(IntArrayRef self_size, const Tensor & grad_output, const Tensor & weight, IntArrayRef padding, IntArrayRef stride, IntArrayRef dilation, int64_t groups, bool benchmark, bool deterministic); static inline std::tuple miopen_convolution_backward(const Tensor & self, const Tensor & grad_output, const Tensor & weight, IntArrayRef padding, IntArrayRef stride, IntArrayRef dilation, int64_t groups, bool benchmark, bool deterministic, std::array output_mask); static inline Tensor miopen_convolution_backward_bias(const Tensor & grad_output); static inline Tensor miopen_convolution_backward_weight(IntArrayRef weight_size, const Tensor & grad_output, const Tensor & self, IntArrayRef padding, IntArrayRef stride, IntArrayRef dilation, int64_t groups, bool benchmark, bool deterministic); static inline Tensor miopen_convolution_transpose(const Tensor & self, const Tensor & weight, const Tensor & bias, IntArrayRef padding, IntArrayRef output_padding, IntArrayRef stride, IntArrayRef dilation, int64_t groups, bool benchmark, bool deterministic); static inline std::tuple miopen_convolution_transpose_backward(const Tensor & self, const Tensor & grad_output, const Tensor & weight, IntArrayRef padding, IntArrayRef output_padding, IntArrayRef stride, IntArrayRef dilation, int64_t groups, bool benchmark, bool deterministic, std::array output_mask); static inline Tensor miopen_convolution_transpose_backward_input(const Tensor & grad_output, const Tensor & weight, IntArrayRef padding, IntArrayRef stride, IntArrayRef dilation, int64_t groups, bool benchmark, bool deterministic); static inline Tensor miopen_convolution_transpose_backward_weight(IntArrayRef weight_size, const Tensor & grad_output, const Tensor & self, IntArrayRef padding, IntArrayRef stride, IntArrayRef dilation, int64_t groups, bool benchmark, bool deterministic); static inline Tensor miopen_depthwise_convolution(const Tensor & self, const Tensor & weight, const Tensor & bias, IntArrayRef padding, IntArrayRef stride, IntArrayRef dilation, int64_t groups, bool benchmark, bool deterministic); static inline Tensor miopen_depthwise_convolution_backward_input(IntArrayRef self_size, const Tensor & grad_output, const Tensor & weight, IntArrayRef padding, IntArrayRef stride, IntArrayRef dilation, int64_t groups, bool benchmark, bool deterministic); static inline std::tuple miopen_depthwise_convolution_backward(const Tensor & self, const Tensor & grad_output, const Tensor & weight, IntArrayRef padding, IntArrayRef stride, IntArrayRef dilation, int64_t groups, bool benchmark, bool deterministic, std::array output_mask); static inline Tensor miopen_depthwise_convolution_backward_weight(IntArrayRef weight_size, const Tensor & grad_output, const Tensor & self, IntArrayRef padding, IntArrayRef stride, IntArrayRef dilation, int64_t groups, bool benchmark, bool deterministic); static inline std::tuple miopen_rnn(const Tensor & input, TensorList weight, int64_t weight_stride0, const Tensor & hx, const Tensor & cx, int64_t mode, int64_t hidden_size, int64_t num_layers, bool batch_first, double dropout, bool train, bool bidirectional, IntArrayRef batch_sizes, const Tensor & dropout_state); static inline std::tuple> miopen_rnn_backward(const Tensor & input, TensorList weight, int64_t weight_stride0, const Tensor & weight_buf, const Tensor & hx, const Tensor & cx, const Tensor & output, const Tensor & grad_output, const Tensor & grad_hy, const Tensor & grad_cy, int64_t mode, int64_t hidden_size, int64_t num_layers, bool batch_first, double dropout, bool train, bool bidirectional, IntArrayRef batch_sizes, const Tensor & dropout_state, const Tensor & reserve, std::array output_mask); static inline Tensor mm(const Tensor & self, const Tensor & mat2); static inline Tensor & mm_out(Tensor & out, const Tensor & self, const Tensor & mat2); static inline Tensor _sparse_mm(const Tensor & sparse, const Tensor & dense); static inline std::tuple mode(const Tensor & self, int64_t dim=-1, bool keepdim=false); static inline std::tuple mode_out(Tensor & values, Tensor & indices, const Tensor & self, int64_t dim=-1, bool keepdim=false); #ifdef BUILD_NAMEDTENSOR static inline std::tuple mode(const Tensor & self, Dimname dim, bool keepdim=false); #endif #ifdef BUILD_NAMEDTENSOR static inline std::tuple mode_out(Tensor & values, Tensor & indices, const Tensor & self, Dimname dim, bool keepdim=false); #endif static inline Tensor mul(const Tensor & self, const Tensor & other); static inline Tensor & mul_out(Tensor & out, const Tensor & self, const Tensor & other); static inline Tensor mul(const Tensor & self, Scalar other); static inline Tensor mv(const Tensor & self, const Tensor & vec); static inline Tensor & mv_out(Tensor & out, const Tensor & self, const Tensor & vec); static inline Tensor mvlgamma(const Tensor & self, int64_t p); static inline Tensor narrow(const Tensor & self, int64_t dim, int64_t start, int64_t length); static inline std::tuple native_batch_norm(const Tensor & input, const Tensor & weight, const Tensor & bias, const Tensor & running_mean, const Tensor & running_var, bool training, double momentum, double eps); static inline std::tuple batch_norm_stats(const Tensor & input, double eps); static inline Tensor batch_norm_elemt(const Tensor & input, const Tensor & weight, const Tensor & bias, const Tensor & mean, const Tensor & invstd, double eps); static inline std::tuple batch_norm_gather_stats(const Tensor & input, const Tensor & mean, const Tensor & invstd, const Tensor & running_mean, const Tensor & running_var, double momentum, double eps, int64_t count); static inline std::tuple batch_norm_gather_stats_with_counts(const Tensor & input, const Tensor & mean, const Tensor & invstd, const Tensor & running_mean, const Tensor & running_var, double momentum, double eps, IntArrayRef counts); static inline std::tuple native_batch_norm_backward(const Tensor & grad_out, const Tensor & input, const Tensor & weight, const Tensor & running_mean, const Tensor & running_var, const Tensor & save_mean, const Tensor & save_invstd, bool train, double eps, std::array output_mask); static inline std::tuple batch_norm_backward_reduce(const Tensor & grad_out, const Tensor & input, const Tensor & mean, const Tensor & invstd, const Tensor & weight, bool input_g, bool weight_g, bool bias_g); static inline Tensor batch_norm_backward_elemt(const Tensor & grad_out, const Tensor & input, const Tensor & mean, const Tensor & invstd, const Tensor & weight, const Tensor & mean_dy, const Tensor & mean_dy_xmu); static inline std::tuple batch_norm_update_stats(const Tensor & input, const Tensor & running_mean, const Tensor & running_var, double momentum); static inline bool _nnpack_available(); static inline Tensor _nnpack_spatial_convolution(const Tensor & input, const Tensor & weight, const Tensor & bias, IntArrayRef padding); static inline std::tuple _nnpack_spatial_convolution_backward(const Tensor & input, const Tensor & grad_output, const Tensor & weight, IntArrayRef padding, std::array output_mask); static inline Tensor _nnpack_spatial_convolution_backward_input(const Tensor & input, const Tensor & grad_output, const Tensor & weight, IntArrayRef padding); static inline Tensor _nnpack_spatial_convolution_backward_weight(const Tensor & input, IntArrayRef weightsize, const Tensor & grad_output, IntArrayRef padding); #ifdef BUILD_NAMEDTENSOR static inline Tensor ones(IntArrayRef size, c10::optional names, const TensorOptions & options={}); #endif static inline Tensor ones(IntArrayRef size, const TensorOptions & options={}); static inline Tensor & ones_out(Tensor & out, IntArrayRef size); static inline Tensor ones_like(const Tensor & self); static inline Tensor ones_like(const Tensor & self, const TensorOptions & options); static inline Tensor pairwise_distance(const Tensor & x1, const Tensor & x2, double p=2, double eps=1e-06, bool keepdim=false); static inline Tensor cdist(const Tensor & x1, const Tensor & x2, double p=2); static inline Tensor _cdist_backward(const Tensor & grad, const Tensor & x1, const Tensor & x2, double p, const Tensor & cdist); static inline Tensor pdist(const Tensor & self, double p=2); static inline Tensor _pdist_forward(const Tensor & self, double p=2); static inline Tensor _pdist_backward(const Tensor & grad, const Tensor & self, double p, const Tensor & pdist); static inline Tensor cosine_similarity(const Tensor & x1, const Tensor & x2, int64_t dim=1, double eps=1e-08); static inline Tensor pixel_shuffle(const Tensor & self, int64_t upscale_factor); static inline Tensor pinverse(const Tensor & self, double rcond=1e-15); static inline Tensor poisson_nll_loss(const Tensor & input, const Tensor & target, bool log_input, bool full, double eps, int64_t reduction); static inline Tensor scalar_tensor(Scalar s, const TensorOptions & options={}); #ifdef BUILD_NAMEDTENSOR static inline Tensor rand(IntArrayRef size, c10::optional names, const TensorOptions & options={}); #endif #ifdef BUILD_NAMEDTENSOR static inline Tensor rand(IntArrayRef size, Generator * generator, c10::optional names, const TensorOptions & options={}); #endif static inline Tensor rand(IntArrayRef size, const TensorOptions & options={}); static inline Tensor rand(IntArrayRef size, Generator * generator, const TensorOptions & options={}); static inline Tensor & rand_out(Tensor & out, IntArrayRef size); static inline Tensor & rand_out(Tensor & out, IntArrayRef size, Generator * generator); static inline Tensor rand_like(const Tensor & self); static inline Tensor rand_like(const Tensor & self, const TensorOptions & options); static inline Tensor randint(int64_t high, IntArrayRef size, const TensorOptions & options={}); static inline Tensor randint(int64_t high, IntArrayRef size, Generator * generator, const TensorOptions & options={}); static inline Tensor randint(int64_t low, int64_t high, IntArrayRef size, const TensorOptions & options={}); static inline Tensor randint(int64_t low, int64_t high, IntArrayRef size, Generator * generator, const TensorOptions & options={}); static inline Tensor & randint_out(Tensor & out, int64_t high, IntArrayRef size); static inline Tensor & randint_out(Tensor & out, int64_t high, IntArrayRef size, Generator * generator); static inline Tensor & randint_out(Tensor & out, int64_t low, int64_t high, IntArrayRef size); static inline Tensor & randint_out(Tensor & out, int64_t low, int64_t high, IntArrayRef size, Generator * generator); static inline Tensor randint_like(const Tensor & self, int64_t high); static inline Tensor randint_like(const Tensor & self, int64_t low, int64_t high); static inline Tensor randint_like(const Tensor & self, int64_t high, const TensorOptions & options); static inline Tensor randint_like(const Tensor & self, int64_t low, int64_t high, const TensorOptions & options); static inline Tensor randn(IntArrayRef size, const TensorOptions & options={}); static inline Tensor randn(IntArrayRef size, Generator * generator, const TensorOptions & options={}); #ifdef BUILD_NAMEDTENSOR static inline Tensor randn(IntArrayRef size, c10::optional names, const TensorOptions & options={}); #endif #ifdef BUILD_NAMEDTENSOR static inline Tensor randn(IntArrayRef size, Generator * generator, c10::optional names, const TensorOptions & options={}); #endif static inline Tensor & randn_out(Tensor & out, IntArrayRef size); static inline Tensor & randn_out(Tensor & out, IntArrayRef size, Generator * generator); static inline Tensor randn_like(const Tensor & self); static inline Tensor randn_like(const Tensor & self, const TensorOptions & options); static inline Tensor randperm(int64_t n, const TensorOptions & options={}); static inline Tensor randperm(int64_t n, Generator * generator, const TensorOptions & options={}); static inline Tensor & randperm_out(Tensor & out, int64_t n); static inline Tensor & randperm_out(Tensor & out, int64_t n, Generator * generator); static inline Tensor range(Scalar start, Scalar end, Scalar step=1, const TensorOptions & options={}); static inline Tensor range(Scalar start, Scalar end, const TensorOptions & options={}); static inline Tensor & range_out(Tensor & out, Scalar start, Scalar end, Scalar step=1); static inline Tensor reciprocal(const Tensor & self); static inline Tensor & reciprocal_(Tensor & self); static inline Tensor & reciprocal_out(Tensor & out, const Tensor & self); static inline Tensor neg(const Tensor & self); static inline Tensor & neg_(Tensor & self); static inline Tensor & neg_out(Tensor & out, const Tensor & self); static inline Tensor repeat_interleave(const Tensor & repeats); static inline Tensor repeat_interleave(const Tensor & self, const Tensor & repeats, c10::optional dim=c10::nullopt); static inline Tensor repeat_interleave(const Tensor & self, int64_t repeats, c10::optional dim=c10::nullopt); static inline Tensor reshape(const Tensor & self, IntArrayRef shape); static inline Tensor _mkldnn_reshape(const Tensor & self, IntArrayRef shape); static inline Tensor round(const Tensor & self); static inline Tensor & round_(Tensor & self); static inline Tensor & round_out(Tensor & out, const Tensor & self); static inline Tensor rrelu(const Tensor & self, Scalar lower=0.125, Scalar upper=0.333333333333, bool training=false, Generator * generator=nullptr); static inline Tensor & rrelu_(Tensor & self, Scalar lower=0.125, Scalar upper=0.333333333333, bool training=false, Generator * generator=nullptr); static inline Tensor relu(const Tensor & self); static inline Tensor & relu_(Tensor & self); static inline Tensor prelu(const Tensor & self, const Tensor & weight); static inline std::tuple prelu_backward(const Tensor & grad_output, const Tensor & self, const Tensor & weight); static inline Tensor gelu(const Tensor & self); static inline Tensor gelu_backward(const Tensor & grad, const Tensor & self); static inline Tensor hardshrink(const Tensor & self, Scalar lambd=0.5); static inline Tensor hardshrink_backward(const Tensor & grad_out, const Tensor & self, Scalar lambd); static inline Tensor rsqrt(const Tensor & self); static inline Tensor & rsqrt_(Tensor & self); static inline Tensor & rsqrt_out(Tensor & out, const Tensor & self); #ifdef BUILD_NAMEDTENSOR static inline Tensor select(const Tensor & self, Dimname dim, int64_t index); #endif static inline Tensor select(const Tensor & self, int64_t dim, int64_t index); static inline Tensor selu(const Tensor & self); static inline Tensor & selu_(Tensor & self); static inline Tensor celu(const Tensor & self, Scalar alpha=1.0); static inline Tensor & celu_(Tensor & self, Scalar alpha=1.0); static inline Tensor sigmoid(const Tensor & self); static inline Tensor & sigmoid_(Tensor & self); static inline Tensor & sigmoid_out(Tensor & out, const Tensor & self); static inline Tensor sin(const Tensor & self); static inline Tensor & sin_(Tensor & self); static inline Tensor & sin_out(Tensor & out, const Tensor & self); static inline Tensor sinh(const Tensor & self); static inline Tensor & sinh_(Tensor & self); static inline Tensor & sinh_out(Tensor & out, const Tensor & self); static inline Tensor detach(const Tensor & self); static inline Tensor & detach_(Tensor & self); static inline int64_t size(const Tensor & self, int64_t dim); #ifdef BUILD_NAMEDTENSOR static inline int64_t size(const Tensor & self, Dimname dim); #endif static inline Tensor slice(const Tensor & self, int64_t dim=0, int64_t start=0, int64_t end=9223372036854775807, int64_t step=1); static inline std::tuple slogdet(const Tensor & self); static inline Tensor smm(const Tensor & self, const Tensor & mat2); static inline Tensor softmax(const Tensor & self, int64_t dim, c10::optional dtype=c10::nullopt); #ifdef BUILD_NAMEDTENSOR static inline Tensor softmax(const Tensor & self, Dimname dim, c10::optional dtype=c10::nullopt); #endif static inline Tensor _softmax(const Tensor & self, int64_t dim, bool half_to_float); static inline Tensor _softmax_backward_data(const Tensor & grad_output, const Tensor & output, int64_t dim, const Tensor & self); static inline std::vector split(const Tensor & self, int64_t split_size, int64_t dim=0); static inline std::vector split_with_sizes(const Tensor & self, IntArrayRef split_sizes, int64_t dim=0); static inline Tensor squeeze(const Tensor & self); static inline Tensor squeeze(const Tensor & self, int64_t dim); #ifdef BUILD_NAMEDTENSOR static inline Tensor squeeze(const Tensor & self, Dimname dim); #endif static inline Tensor sspaddmm(const Tensor & self, const Tensor & mat1, const Tensor & mat2, Scalar beta=1, Scalar alpha=1); static inline Tensor & sspaddmm_out(Tensor & out, const Tensor & self, const Tensor & mat1, const Tensor & mat2, Scalar beta=1, Scalar alpha=1); static inline Tensor stack(TensorList tensors, int64_t dim=0); static inline Tensor & stack_out(Tensor & out, TensorList tensors, int64_t dim=0); static inline Tensor stft(const Tensor & self, int64_t n_fft, c10::optional hop_length=c10::nullopt, c10::optional win_length=c10::nullopt, const Tensor & window={}, bool normalized=false, bool onesided=true); static inline int64_t stride(const Tensor & self, int64_t dim); #ifdef BUILD_NAMEDTENSOR static inline int64_t stride(const Tensor & self, Dimname dim); #endif static inline Tensor sum(const Tensor & self, c10::optional dtype=c10::nullopt); static inline Tensor sum(const Tensor & self, IntArrayRef dim, bool keepdim=false, c10::optional dtype=c10::nullopt); #ifdef BUILD_NAMEDTENSOR static inline Tensor sum(const Tensor & self, DimnameList dim, bool keepdim=false, c10::optional dtype=c10::nullopt); #endif static inline Tensor & sum_out(Tensor & out, const Tensor & self, IntArrayRef dim, bool keepdim=false, c10::optional dtype=c10::nullopt); #ifdef BUILD_NAMEDTENSOR static inline Tensor & sum_out(Tensor & out, const Tensor & self, DimnameList dim, bool keepdim=false, c10::optional dtype=c10::nullopt); #endif static inline Tensor sqrt(const Tensor & self); static inline Tensor & sqrt_(Tensor & self); static inline Tensor & sqrt_out(Tensor & out, const Tensor & self); static inline Tensor std(const Tensor & self, bool unbiased=true); static inline Tensor std(const Tensor & self, IntArrayRef dim, bool unbiased=true, bool keepdim=false); static inline std::tuple std_mean(const Tensor & self, bool unbiased=true); static inline std::tuple std_mean(const Tensor & self, IntArrayRef dim, bool unbiased=true, bool keepdim=false); #ifdef BUILD_NAMEDTENSOR static inline std::tuple std_mean(const Tensor & self, DimnameList dim, bool unbiased=true, bool keepdim=false); #endif static inline Tensor & std_out(Tensor & out, const Tensor & self, IntArrayRef dim, bool unbiased=true, bool keepdim=false); #ifdef BUILD_NAMEDTENSOR static inline Tensor std(const Tensor & self, DimnameList dim, bool unbiased=true, bool keepdim=false); #endif #ifdef BUILD_NAMEDTENSOR static inline Tensor & std_out(Tensor & out, const Tensor & self, DimnameList dim, bool unbiased=true, bool keepdim=false); #endif static inline Tensor prod(const Tensor & self, c10::optional dtype=c10::nullopt); static inline Tensor prod(const Tensor & self, int64_t dim, bool keepdim=false, c10::optional dtype=c10::nullopt); static inline Tensor & prod_out(Tensor & out, const Tensor & self, int64_t dim, bool keepdim=false, c10::optional dtype=c10::nullopt); #ifdef BUILD_NAMEDTENSOR static inline Tensor prod(const Tensor & self, Dimname dim, bool keepdim=false, c10::optional dtype=c10::nullopt); #endif #ifdef BUILD_NAMEDTENSOR static inline Tensor & prod_out(Tensor & out, const Tensor & self, Dimname dim, bool keepdim=false, c10::optional dtype=c10::nullopt); #endif static inline Tensor t(const Tensor & self); static inline Tensor tan(const Tensor & self); static inline Tensor & tan_(Tensor & self); static inline Tensor & tan_out(Tensor & out, const Tensor & self); static inline Tensor tanh(const Tensor & self); static inline Tensor & tanh_(Tensor & self); static inline Tensor & tanh_out(Tensor & out, const Tensor & self); static inline Tensor tensordot(const Tensor & self, const Tensor & other, IntArrayRef dims_self, IntArrayRef dims_other); static inline Tensor threshold(const Tensor & self, Scalar threshold, Scalar value); static inline Tensor & threshold_(Tensor & self, Scalar threshold, Scalar value); static inline Tensor & threshold_out(Tensor & out, const Tensor & self, Scalar threshold, Scalar value); static inline Tensor threshold_backward(const Tensor & grad_output, const Tensor & self, Scalar threshold); static inline Tensor transpose(const Tensor & self, int64_t dim0, int64_t dim1); #ifdef BUILD_NAMEDTENSOR static inline Tensor transpose(const Tensor & self, Dimname dim0, Dimname dim1); #endif static inline Tensor _mkldnn_transpose(const Tensor & self, int64_t dim0, int64_t dim1); static inline Tensor & _mkldnn_transpose_(Tensor & self, int64_t dim0, int64_t dim1); static inline Tensor one_hot(const Tensor & self, int64_t num_classes=-1); static inline Tensor flip(const Tensor & self, IntArrayRef dims); static inline Tensor roll(const Tensor & self, IntArrayRef shifts, IntArrayRef dims={}); static inline Tensor rot90(const Tensor & self, int64_t k=1, IntArrayRef dims={0,1}); static inline Tensor trapz(const Tensor & y, const Tensor & x, int64_t dim=-1); static inline Tensor trapz(const Tensor & y, double dx=1, int64_t dim=-1); static inline Tensor _trilinear(const Tensor & i1, const Tensor & i2, const Tensor & i3, IntArrayRef expand1, IntArrayRef expand2, IntArrayRef expand3, IntArrayRef sumdim, int64_t unroll_dim=1); static inline Tensor triplet_margin_loss(const Tensor & anchor, const Tensor & positive, const Tensor & negative, double margin=1.0, double p=2, double eps=1e-06, bool swap=false, int64_t reduction=Reduction::Mean); static inline Tensor trunc(const Tensor & self); static inline Tensor & trunc_(Tensor & self); static inline Tensor & trunc_out(Tensor & out, const Tensor & self); static inline bool _has_compatible_shallow_copy_type(const Tensor & self, const Tensor & from); static inline std::tuple _unique(const Tensor & self, bool sorted=true, bool return_inverse=false); static inline std::tuple unique_dim(const Tensor & self, int64_t dim, bool sorted=true, bool return_inverse=false, bool return_counts=false); static inline std::tuple unique_consecutive(const Tensor & self, bool return_inverse=false, bool return_counts=false, c10::optional dim=c10::nullopt); static inline std::tuple unique_dim_consecutive(const Tensor & self, int64_t dim, bool return_inverse=false, bool return_counts=false); static inline std::tuple _unique2(const Tensor & self, bool sorted=true, bool return_inverse=false, bool return_counts=false); static inline Tensor _unsafe_view(const Tensor & self, IntArrayRef size); static inline Tensor unsqueeze(const Tensor & self, int64_t dim); static inline Tensor var(const Tensor & self, bool unbiased=true); static inline Tensor var(const Tensor & self, IntArrayRef dim, bool unbiased=true, bool keepdim=false); static inline Tensor & var_out(Tensor & out, const Tensor & self, IntArrayRef dim, bool unbiased=true, bool keepdim=false); #ifdef BUILD_NAMEDTENSOR static inline Tensor var(const Tensor & self, DimnameList dim, bool unbiased=true, bool keepdim=false); #endif #ifdef BUILD_NAMEDTENSOR static inline Tensor & var_out(Tensor & out, const Tensor & self, DimnameList dim, bool unbiased=true, bool keepdim=false); #endif static inline std::tuple var_mean(const Tensor & self, bool unbiased=true); static inline std::tuple var_mean(const Tensor & self, IntArrayRef dim, bool unbiased=true, bool keepdim=false); #ifdef BUILD_NAMEDTENSOR static inline std::tuple var_mean(const Tensor & self, DimnameList dim, bool unbiased=true, bool keepdim=false); #endif static inline Tensor where(const Tensor & condition, const Tensor & self, const Tensor & other); static inline std::vector where(const Tensor & condition); static inline Tensor _s_where(const Tensor & condition, const Tensor & self, const Tensor & other); static inline Tensor norm_except_dim(const Tensor & v, int64_t pow=2, int64_t dim=0); static inline Tensor _weight_norm(const Tensor & v, const Tensor & g, int64_t dim=0); static inline std::tuple _weight_norm_cuda_interface(const Tensor & v, const Tensor & g, int64_t dim=0); static inline std::tuple _weight_norm_cuda_interface_backward(const Tensor & grad_w, const Tensor & saved_v, const Tensor & saved_g, const Tensor & saved_norms, int64_t dim); static inline std::tuple _weight_norm_differentiable_backward(const Tensor & grad_w, const Tensor & saved_v, const Tensor & saved_g, const Tensor & saved_norms, int64_t dim); #ifdef BUILD_NAMEDTENSOR static inline Tensor zeros(IntArrayRef size, c10::optional names, const TensorOptions & options={}); #endif static inline Tensor zeros(IntArrayRef size, const TensorOptions & options={}); static inline Tensor & zeros_out(Tensor & out, IntArrayRef size); static inline Tensor zeros_like(const Tensor & self); static inline Tensor zeros_like(const Tensor & self, const TensorOptions & options); static inline Tensor _standard_gamma_grad(const Tensor & self, const Tensor & output); static inline Tensor _standard_gamma(const Tensor & self, Generator * generator=nullptr); static inline Tensor _dirichlet_grad(const Tensor & x, const Tensor & alpha, const Tensor & total); static inline Tensor _sample_dirichlet(const Tensor & self, Generator * generator=nullptr); static inline Tensor poisson(const Tensor & self, Generator * generator=nullptr); static inline Tensor native_norm(const Tensor & self, Scalar p=2); static inline Tensor _sparse_sum(const Tensor & self); static inline Tensor _sparse_sum(const Tensor & self, ScalarType dtype); static inline Tensor _sparse_sum(const Tensor & self, IntArrayRef dim); static inline Tensor _sparse_sum(const Tensor & self, IntArrayRef dim, ScalarType dtype); static inline Tensor _sparse_sum_backward(const Tensor & grad, const Tensor & self, IntArrayRef dim); static inline Tensor norm(const Tensor & self, c10::optional p, ScalarType dtype); static inline Tensor norm(const Tensor & self, Scalar p=2); static inline Tensor norm(const Tensor & self, c10::optional p, IntArrayRef dim, bool keepdim, ScalarType dtype); static inline Tensor norm(const Tensor & self, c10::optional p, IntArrayRef dim, bool keepdim=false); static inline Tensor & norm_out(Tensor & out, const Tensor & self, c10::optional p, IntArrayRef dim, bool keepdim, ScalarType dtype); static inline Tensor & norm_out(Tensor & out, const Tensor & self, c10::optional p, IntArrayRef dim, bool keepdim=false); #ifdef BUILD_NAMEDTENSOR static inline Tensor norm(const Tensor & self, c10::optional p, DimnameList dim, bool keepdim, ScalarType dtype); #endif #ifdef BUILD_NAMEDTENSOR static inline Tensor norm(const Tensor & self, c10::optional p, DimnameList dim, bool keepdim=false); #endif #ifdef BUILD_NAMEDTENSOR static inline Tensor & norm_out(Tensor & out, const Tensor & self, c10::optional p, DimnameList dim, bool keepdim, ScalarType dtype); #endif #ifdef BUILD_NAMEDTENSOR static inline Tensor & norm_out(Tensor & out, const Tensor & self, c10::optional p, DimnameList dim, bool keepdim=false); #endif static inline Tensor frobenius_norm(const Tensor & self); static inline Tensor frobenius_norm(const Tensor & self, IntArrayRef dim, bool keepdim=false); static inline Tensor & frobenius_norm_out(Tensor & out, const Tensor & self, IntArrayRef dim, bool keepdim=false); static inline Tensor nuclear_norm(const Tensor & self, bool keepdim=false); static inline Tensor & nuclear_norm_out(Tensor & out, const Tensor & self, bool keepdim=false); static inline Tensor nuclear_norm(const Tensor & self, IntArrayRef dim, bool keepdim=false); static inline Tensor & nuclear_norm_out(Tensor & out, const Tensor & self, IntArrayRef dim, bool keepdim=false); static inline Tensor clone(const Tensor & self); static inline Tensor & resize_as_(Tensor & self, const Tensor & the_template); static inline Tensor & pow_out(Tensor & out, const Tensor & self, Scalar exponent); static inline Tensor pow(const Tensor & self, Scalar exponent); static inline Tensor & zero_(Tensor & self); static inline Tensor & sub_out(Tensor & out, const Tensor & self, const Tensor & other, Scalar alpha=1); static inline Tensor sub(const Tensor & self, const Tensor & other, Scalar alpha=1); static inline Tensor sub(const Tensor & self, Scalar other, Scalar alpha=1); static inline Tensor rsub(const Tensor & self, const Tensor & other, Scalar alpha=1); static inline Tensor rsub(const Tensor & self, Scalar other, Scalar alpha=1); static inline Tensor _sparse_addmm(const Tensor & self, const Tensor & sparse, const Tensor & dense, Scalar beta=1, Scalar alpha=1); static inline Tensor & addmm_out(Tensor & out, const Tensor & self, const Tensor & mat1, const Tensor & mat2, Scalar beta=1, Scalar alpha=1); static inline Tensor addmm(const Tensor & self, const Tensor & mat1, const Tensor & mat2, Scalar beta=1, Scalar alpha=1); static inline Tensor sparse_coo_tensor(IntArrayRef size, const TensorOptions & options); static inline Tensor sparse_coo_tensor(const Tensor & indices, const Tensor & values, const TensorOptions & options={}); static inline Tensor sparse_coo_tensor(const Tensor & indices, const Tensor & values, IntArrayRef size, const TensorOptions & options={}); static inline Tensor _sparse_coo_tensor_unsafe(const Tensor & indices, const Tensor & values, IntArrayRef size, const TensorOptions & options={}); static inline Tensor _sparse_coo_tensor_with_dims(int64_t sparse_dim, int64_t dense_dim, IntArrayRef size, const TensorOptions & options); static inline Tensor _sparse_coo_tensor_with_dims_and_tensors(int64_t sparse_dim, int64_t dense_dim, IntArrayRef size, const Tensor & indices, const Tensor & values, const TensorOptions & options); static inline Tensor to_dense_backward(const Tensor & grad, const Tensor & input); static inline Tensor & hspmm_out(Tensor & out, const Tensor & mat1, const Tensor & mat2); static inline Tensor hspmm(const Tensor & mat1, const Tensor & mat2); static inline Tensor & copy_sparse_to_sparse_(Tensor & self, const Tensor & src, bool non_blocking=false); static inline int64_t numel(const Tensor & self); static inline std::vector unbind(const Tensor & self, int64_t dim=0); #ifdef BUILD_NAMEDTENSOR static inline std::vector unbind(const Tensor & self, Dimname dim); #endif static inline Tensor mkldnn_reorder_conv2d_weight(const Tensor & self, IntArrayRef padding=0, IntArrayRef stride=1, IntArrayRef dilation=1, int64_t groups=1); static inline Tensor to_mkldnn_backward(const Tensor & grad, const Tensor & input); static inline Tensor quantize_per_tensor(const Tensor & self, double scale, int64_t zero_point, ScalarType dtype); static inline Tensor quantize_per_channel(const Tensor & self, const Tensor & scales, const Tensor & zero_points, int64_t axis, ScalarType dtype); static inline Tensor dequantize(const Tensor & self); static inline double q_scale(const Tensor & self); static inline int64_t q_zero_point(const Tensor & self); static inline Tensor q_per_channel_scales(const Tensor & self); static inline Tensor q_per_channel_zero_points(const Tensor & self); static inline int64_t q_per_channel_axis(const Tensor & self); static inline Tensor int_repr(const Tensor & self); static inline Tensor _make_per_tensor_quantized_tensor(const Tensor & self, double scale, int64_t zero_point); static inline Tensor _make_per_channel_quantized_tensor(const Tensor & self, const Tensor & scale, const Tensor & zero_point, int64_t axis); static inline Tensor fake_quantize_per_tensor_affine(const Tensor & self, double scale, int64_t zero_point, int64_t quant_min, int64_t quant_max); static inline Tensor fake_quantize_per_tensor_affine_backward(const Tensor & grad, const Tensor & self, double scale, int64_t zero_point, int64_t quant_min, int64_t quant_max); static inline Tensor fake_quantize_per_channel_affine(const Tensor & self, const Tensor & scale, const Tensor & zero_point, int64_t axis, int64_t quant_min, int64_t quant_max); static inline Tensor fake_quantize_per_channel_affine_backward(const Tensor & grad, const Tensor & self, const Tensor & scale, const Tensor & zero_point, int64_t axis, int64_t quant_min, int64_t quant_max); static inline std::vector meshgrid(TensorList tensors); static inline Tensor cartesian_prod(TensorList tensors); static inline Tensor combinations(const Tensor & self, int64_t r=2, bool with_replacement=false); static inline ScalarType result_type(const Tensor & tensor, const Tensor & other); static inline ScalarType result_type(const Tensor & tensor, Scalar other); static inline ScalarType result_type(Scalar scalar, const Tensor & tensor); static inline ScalarType result_type(Scalar scalar1, Scalar scalar2); static inline bool can_cast(ScalarType from, ScalarType to); static inline ScalarType promote_types(ScalarType type1, ScalarType type2); static inline Scalar _local_scalar_dense(const Tensor & self); static inline std::tuple _thnn_fused_lstm_cell(const Tensor & input_gates, const Tensor & hidden_gates, const Tensor & cx, const Tensor & input_bias={}, const Tensor & hidden_bias={}); static inline std::tuple _thnn_fused_lstm_cell_backward(const Tensor & grad_hy, const Tensor & grad_cy, const Tensor & cx, const Tensor & cy, const Tensor & workspace, bool has_bias); static inline std::tuple _thnn_differentiable_lstm_cell_backward(const Tensor & grad_hy, const Tensor & grad_cy, const Tensor & input_gates, const Tensor & hidden_gates, const Tensor & input_bias, const Tensor & hidden_bias, const Tensor & cx, const Tensor & cy); static inline std::tuple _thnn_fused_gru_cell(const Tensor & input_gates, const Tensor & hidden_gates, const Tensor & hx, const Tensor & input_bias={}, const Tensor & hidden_bias={}); static inline std::tuple _thnn_fused_gru_cell_backward(const Tensor & grad_hy, const Tensor & workspace, bool has_bias); static inline std::tuple _thnn_differentiable_gru_cell_backward(const Tensor & grad_hy, const Tensor & input_gates, const Tensor & hidden_gates, const Tensor & hx, const Tensor & input_bias, const Tensor & hidden_bias); static inline std::tuple lstm(const Tensor & input, TensorList hx, TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional, bool batch_first); static inline std::tuple lstm(const Tensor & data, const Tensor & batch_sizes, TensorList hx, TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional); static inline std::tuple gru(const Tensor & input, const Tensor & hx, TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional, bool batch_first); static inline std::tuple gru(const Tensor & data, const Tensor & batch_sizes, const Tensor & hx, TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional); static inline std::tuple rnn_tanh(const Tensor & input, const Tensor & hx, TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional, bool batch_first); static inline std::tuple rnn_tanh(const Tensor & data, const Tensor & batch_sizes, const Tensor & hx, TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional); static inline std::tuple rnn_relu(const Tensor & input, const Tensor & hx, TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional, bool batch_first); static inline std::tuple rnn_relu(const Tensor & data, const Tensor & batch_sizes, const Tensor & hx, TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional); static inline std::tuple lstm_cell(const Tensor & input, TensorList hx, const Tensor & w_ih, const Tensor & w_hh, const Tensor & b_ih={}, const Tensor & b_hh={}); static inline Tensor gru_cell(const Tensor & input, const Tensor & hx, const Tensor & w_ih, const Tensor & w_hh, const Tensor & b_ih={}, const Tensor & b_hh={}); static inline Tensor rnn_tanh_cell(const Tensor & input, const Tensor & hx, const Tensor & w_ih, const Tensor & w_hh, const Tensor & b_ih={}, const Tensor & b_hh={}); static inline Tensor rnn_relu_cell(const Tensor & input, const Tensor & hx, const Tensor & w_ih, const Tensor & w_hh, const Tensor & b_ih={}, const Tensor & b_hh={}); static inline std::tuple quantized_lstm(const Tensor & input, TensorList hx, TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional, bool batch_first, c10::optional dtype=c10::nullopt, bool use_dynamic=false); static inline std::tuple quantized_gru(const Tensor & input, const Tensor & hx, TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional, bool batch_first); static inline std::tuple quantized_gru(const Tensor & data, const Tensor & batch_sizes, const Tensor & hx, TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional); static inline std::tuple quantized_lstm_cell(const Tensor & input, TensorList hx, const Tensor & w_ih, const Tensor & w_hh, const Tensor & b_ih, const Tensor & b_hh, const Tensor & packed_ih, const Tensor & packed_hh, const Tensor & col_offsets_ih, const Tensor & col_offsets_hh, Scalar scale_ih, Scalar scale_hh, Scalar zero_point_ih, Scalar zero_point_hh); static inline Tensor quantized_gru_cell(const Tensor & input, const Tensor & hx, const Tensor & w_ih, const Tensor & w_hh, const Tensor & b_ih, const Tensor & b_hh, const Tensor & packed_ih, const Tensor & packed_hh, const Tensor & col_offsets_ih, const Tensor & col_offsets_hh, Scalar scale_ih, Scalar scale_hh, Scalar zero_point_ih, Scalar zero_point_hh); static inline Tensor quantized_rnn_relu_cell(const Tensor & input, const Tensor & hx, const Tensor & w_ih, const Tensor & w_hh, const Tensor & b_ih, const Tensor & b_hh, const Tensor & packed_ih, const Tensor & packed_hh, const Tensor & col_offsets_ih, const Tensor & col_offsets_hh, Scalar scale_ih, Scalar scale_hh, Scalar zero_point_ih, Scalar zero_point_hh); static inline Tensor quantized_rnn_tanh_cell(const Tensor & input, const Tensor & hx, const Tensor & w_ih, const Tensor & w_hh, const Tensor & b_ih, const Tensor & b_hh, const Tensor & packed_ih, const Tensor & packed_hh, const Tensor & col_offsets_ih, const Tensor & col_offsets_hh, Scalar scale_ih, Scalar scale_hh, Scalar zero_point_ih, Scalar zero_point_hh); static inline std::tuple _pack_padded_sequence(const Tensor & input, const Tensor & lengths, bool batch_first); static inline Tensor _pack_padded_sequence_backward(const Tensor & grad, IntArrayRef input_size, const Tensor & batch_sizes, bool batch_first); static inline std::tuple _pad_packed_sequence(const Tensor & data, const Tensor & batch_sizes, bool batch_first, Scalar padding_value, int64_t total_length); static inline Tensor masked_fill(const Tensor & self, const Tensor & mask, Scalar value); static inline Tensor masked_fill(const Tensor & self, const Tensor & mask, const Tensor & value); static inline Tensor masked_scatter(const Tensor & self, const Tensor & mask, const Tensor & source); static inline Tensor index_add(const Tensor & self, int64_t dim, const Tensor & index, const Tensor & source); #ifdef BUILD_NAMEDTENSOR static inline Tensor index_add(const Tensor & self, Dimname dim, const Tensor & index, const Tensor & source); #endif static inline Tensor index_fill(const Tensor & self, int64_t dim, const Tensor & index, Scalar value); static inline Tensor index_fill(const Tensor & self, int64_t dim, const Tensor & index, const Tensor & value); #ifdef BUILD_NAMEDTENSOR static inline Tensor index_fill(const Tensor & self, Dimname dim, const Tensor & index, Scalar value); #endif #ifdef BUILD_NAMEDTENSOR static inline Tensor index_fill(const Tensor & self, Dimname dim, const Tensor & index, const Tensor & value); #endif static inline Tensor scatter(const Tensor & self, int64_t dim, const Tensor & index, const Tensor & src); static inline Tensor scatter(const Tensor & self, int64_t dim, const Tensor & index, Scalar value); #ifdef BUILD_NAMEDTENSOR static inline Tensor scatter(const Tensor & self, Dimname dim, const Tensor & index, const Tensor & src); #endif #ifdef BUILD_NAMEDTENSOR static inline Tensor scatter(const Tensor & self, Dimname dim, const Tensor & index, Scalar value); #endif static inline Tensor scatter_add(const Tensor & self, int64_t dim, const Tensor & index, const Tensor & src); #ifdef BUILD_NAMEDTENSOR static inline Tensor scatter_add(const Tensor & self, Dimname dim, const Tensor & index, const Tensor & src); #endif static inline Tensor __and__(const Tensor & self, Scalar other); static inline Tensor __and__(const Tensor & self, const Tensor & other); static inline Tensor __or__(const Tensor & self, Scalar other); static inline Tensor __or__(const Tensor & self, const Tensor & other); static inline Tensor __xor__(const Tensor & self, Scalar other); static inline Tensor __xor__(const Tensor & self, const Tensor & other); static inline Tensor __lshift__(const Tensor & self, Scalar other); static inline Tensor __lshift__(const Tensor & self, const Tensor & other); static inline Tensor __rshift__(const Tensor & self, Scalar other); static inline Tensor __rshift__(const Tensor & self, const Tensor & other); static inline Tensor & addbmm_out(Tensor & out, const Tensor & self, const Tensor & batch1, const Tensor & batch2, Scalar beta=1, Scalar alpha=1); static inline Tensor addbmm(const Tensor & self, const Tensor & batch1, const Tensor & batch2, Scalar beta=1, Scalar alpha=1); static inline Tensor & diag_out(Tensor & out, const Tensor & self, int64_t diagonal=0); static inline Tensor diag(const Tensor & self, int64_t diagonal=0); static inline Tensor & cross_out(Tensor & out, const Tensor & self, const Tensor & other, c10::optional dim=c10::nullopt); static inline Tensor cross(const Tensor & self, const Tensor & other, c10::optional dim=c10::nullopt); static inline Tensor & triu_out(Tensor & out, const Tensor & self, int64_t diagonal=0); static inline Tensor triu(const Tensor & self, int64_t diagonal=0); static inline Tensor & tril_out(Tensor & out, const Tensor & self, int64_t diagonal=0); static inline Tensor tril(const Tensor & self, int64_t diagonal=0); static inline Tensor tril_indices(int64_t row, int64_t col, int64_t offset=0, const TensorOptions & options=at::kLong); static inline Tensor triu_indices(int64_t row, int64_t col, int64_t offset=0, const TensorOptions & options=at::kLong); static inline Tensor trace(const Tensor & self); static inline Tensor & ne_out(Tensor & out, const Tensor & self, Scalar other); static inline Tensor ne(const Tensor & self, Scalar other); static inline Tensor & ne_out(Tensor & out, const Tensor & self, const Tensor & other); static inline Tensor ne(const Tensor & self, const Tensor & other); static inline Tensor & eq_out(Tensor & out, const Tensor & self, Scalar other); static inline Tensor eq(const Tensor & self, Scalar other); static inline Tensor & eq_out(Tensor & out, const Tensor & self, const Tensor & other); static inline Tensor eq(const Tensor & self, const Tensor & other); static inline Tensor & ge_out(Tensor & out, const Tensor & self, Scalar other); static inline Tensor ge(const Tensor & self, Scalar other); static inline Tensor & ge_out(Tensor & out, const Tensor & self, const Tensor & other); static inline Tensor ge(const Tensor & self, const Tensor & other); static inline Tensor & le_out(Tensor & out, const Tensor & self, Scalar other); static inline Tensor le(const Tensor & self, Scalar other); static inline Tensor & le_out(Tensor & out, const Tensor & self, const Tensor & other); static inline Tensor le(const Tensor & self, const Tensor & other); static inline Tensor & gt_out(Tensor & out, const Tensor & self, Scalar other); static inline Tensor gt(const Tensor & self, Scalar other); static inline Tensor & gt_out(Tensor & out, const Tensor & self, const Tensor & other); static inline Tensor gt(const Tensor & self, const Tensor & other); static inline Tensor & lt_out(Tensor & out, const Tensor & self, Scalar other); static inline Tensor lt(const Tensor & self, Scalar other); static inline Tensor & lt_out(Tensor & out, const Tensor & self, const Tensor & other); static inline Tensor lt(const Tensor & self, const Tensor & other); static inline Tensor & take_out(Tensor & out, const Tensor & self, const Tensor & index); static inline Tensor take(const Tensor & self, const Tensor & index); static inline Tensor & index_select_out(Tensor & out, const Tensor & self, int64_t dim, const Tensor & index); static inline Tensor index_select(const Tensor & self, int64_t dim, const Tensor & index); #ifdef BUILD_NAMEDTENSOR static inline Tensor & index_select_out(Tensor & out, const Tensor & self, Dimname dim, const Tensor & index); #endif #ifdef BUILD_NAMEDTENSOR static inline Tensor index_select(const Tensor & self, Dimname dim, const Tensor & index); #endif static inline Tensor & masked_select_out(Tensor & out, const Tensor & self, const Tensor & mask); static inline Tensor masked_select(const Tensor & self, const Tensor & mask); static inline Tensor & nonzero_out(Tensor & out, const Tensor & self); static inline Tensor nonzero(const Tensor & self); static inline std::vector nonzero_numpy(const Tensor & self); static inline Tensor & gather_out(Tensor & out, const Tensor & self, int64_t dim, const Tensor & index, bool sparse_grad=false); static inline Tensor gather(const Tensor & self, int64_t dim, const Tensor & index, bool sparse_grad=false); #ifdef BUILD_NAMEDTENSOR static inline Tensor & gather_out(Tensor & out, const Tensor & self, Dimname dim, const Tensor & index, bool sparse_grad=false); #endif #ifdef BUILD_NAMEDTENSOR static inline Tensor gather(const Tensor & self, Dimname dim, const Tensor & index, bool sparse_grad=false); #endif static inline Tensor _gather_sparse_backward(const Tensor & self, int64_t dim, const Tensor & index, const Tensor & grad); static inline Tensor & addcmul_out(Tensor & out, const Tensor & self, const Tensor & tensor1, const Tensor & tensor2, Scalar value=1); static inline Tensor addcmul(const Tensor & self, const Tensor & tensor1, const Tensor & tensor2, Scalar value=1); static inline Tensor & addcdiv_out(Tensor & out, const Tensor & self, const Tensor & tensor1, const Tensor & tensor2, Scalar value=1); static inline Tensor addcdiv(const Tensor & self, const Tensor & tensor1, const Tensor & tensor2, Scalar value=1); static inline std::tuple lstsq_out(Tensor & X, Tensor & qr, const Tensor & self, const Tensor & A); static inline std::tuple lstsq(const Tensor & self, const Tensor & A); static inline std::tuple triangular_solve_out(Tensor & X, Tensor & M, const Tensor & self, const Tensor & A, bool upper=true, bool transpose=false, bool unitriangular=false); static inline std::tuple triangular_solve(const Tensor & self, const Tensor & A, bool upper=true, bool transpose=false, bool unitriangular=false); static inline std::tuple _triangular_solve_helper(const Tensor & self, const Tensor & A, bool upper, bool transpose, bool unitriangular); static inline std::tuple symeig_out(Tensor & e, Tensor & V, const Tensor & self, bool eigenvectors=false, bool upper=true); static inline std::tuple symeig(const Tensor & self, bool eigenvectors=false, bool upper=true); static inline std::tuple _symeig_helper(const Tensor & self, bool eigenvectors, bool upper); static inline std::tuple eig_out(Tensor & e, Tensor & v, const Tensor & self, bool eigenvectors=false); static inline std::tuple eig(const Tensor & self, bool eigenvectors=false); static inline std::tuple svd_out(Tensor & U, Tensor & S, Tensor & V, const Tensor & self, bool some=true, bool compute_uv=true); static inline std::tuple svd(const Tensor & self, bool some=true, bool compute_uv=true); static inline std::tuple _svd_helper(const Tensor & self, bool some, bool compute_uv); static inline Tensor & cholesky_out(Tensor & out, const Tensor & self, bool upper=false); static inline Tensor cholesky(const Tensor & self, bool upper=false); static inline Tensor _cholesky_helper(const Tensor & self, bool upper); static inline Tensor & cholesky_solve_out(Tensor & out, const Tensor & self, const Tensor & input2, bool upper=false); static inline Tensor cholesky_solve(const Tensor & self, const Tensor & input2, bool upper=false); static inline Tensor _cholesky_solve_helper(const Tensor & self, const Tensor & A, bool upper); static inline std::tuple solve(const Tensor & self, const Tensor & A); static inline std::tuple solve_out(Tensor & solution, Tensor & lu, const Tensor & self, const Tensor & A); static inline std::tuple _solve_helper(const Tensor & self, const Tensor & A); static inline Tensor & cholesky_inverse_out(Tensor & out, const Tensor & self, bool upper=false); static inline Tensor cholesky_inverse(const Tensor & self, bool upper=false); static inline std::tuple qr_out(Tensor & Q, Tensor & R, const Tensor & self, bool some=true); static inline std::tuple qr(const Tensor & self, bool some=true); static inline std::tuple _qr_helper(const Tensor & self, bool some); static inline std::tuple geqrf_out(Tensor & a, Tensor & tau, const Tensor & self); static inline std::tuple geqrf(const Tensor & self); static inline Tensor & orgqr_out(Tensor & out, const Tensor & self, const Tensor & input2); static inline Tensor orgqr(const Tensor & self, const Tensor & input2); static inline Tensor & ormqr_out(Tensor & out, const Tensor & self, const Tensor & input2, const Tensor & input3, bool left=true, bool transpose=false); static inline Tensor ormqr(const Tensor & self, const Tensor & input2, const Tensor & input3, bool left=true, bool transpose=false); static inline std::tuple _lu_with_info(const Tensor & self, bool pivot=true, bool check_errors=true); static inline Tensor & lu_solve_out(Tensor & out, const Tensor & self, const Tensor & LU_data, const Tensor & LU_pivots); static inline Tensor lu_solve(const Tensor & self, const Tensor & LU_data, const Tensor & LU_pivots); static inline Tensor _lu_solve_helper(const Tensor & self, const Tensor & LU_data, const Tensor & LU_pivots); static inline Tensor & multinomial_out(Tensor & out, const Tensor & self, int64_t num_samples, bool replacement=false, Generator * generator=nullptr); static inline Tensor multinomial(const Tensor & self, int64_t num_samples, bool replacement=false, Generator * generator=nullptr); static inline std::tuple _multinomial_alias_setup(const Tensor & probs); static inline Tensor _multinomial_alias_draw(const Tensor & J, const Tensor & q, int64_t num_samples, Generator * generator=nullptr); static inline Tensor & lgamma_out(Tensor & out, const Tensor & self); static inline Tensor lgamma(const Tensor & self); static inline Tensor & digamma_out(Tensor & out, const Tensor & self); static inline Tensor digamma(const Tensor & self); static inline Tensor & polygamma_out(Tensor & out, int64_t n, const Tensor & self); static inline Tensor polygamma(int64_t n, const Tensor & self); static inline Tensor erfinv(const Tensor & self); static inline Tensor & erfinv_out(Tensor & out, const Tensor & self); static inline Tensor sign(const Tensor & self); static inline Tensor & sign_out(Tensor & out, const Tensor & self); static inline Tensor dist(const Tensor & self, const Tensor & other, Scalar p=2); static inline Tensor & atan2_out(Tensor & out, const Tensor & self, const Tensor & other); static inline Tensor atan2(const Tensor & self, const Tensor & other); static inline Tensor & lerp_out(Tensor & out, const Tensor & self, const Tensor & end, Scalar weight); static inline Tensor & lerp_out(Tensor & out, const Tensor & self, const Tensor & end, const Tensor & weight); static inline Tensor lerp(const Tensor & self, const Tensor & end, Scalar weight); static inline Tensor lerp(const Tensor & self, const Tensor & end, const Tensor & weight); static inline Tensor & histc_out(Tensor & out, const Tensor & self, int64_t bins=100, Scalar min=0, Scalar max=0); static inline Tensor histc(const Tensor & self, int64_t bins=100, Scalar min=0, Scalar max=0); static inline Tensor & fmod_out(Tensor & out, const Tensor & self, Scalar other); static inline Tensor fmod(const Tensor & self, Scalar other); static inline Tensor & fmod_out(Tensor & out, const Tensor & self, const Tensor & other); static inline Tensor fmod(const Tensor & self, const Tensor & other); static inline Tensor & remainder_out(Tensor & out, const Tensor & self, Scalar other); static inline Tensor remainder(const Tensor & self, Scalar other); static inline Tensor & remainder_out(Tensor & out, const Tensor & self, const Tensor & other); static inline Tensor remainder(const Tensor & self, const Tensor & other); static inline Tensor & min_out(Tensor & out, const Tensor & self, const Tensor & other); static inline Tensor min(const Tensor & self, const Tensor & other); static inline Tensor min(const Tensor & self); static inline Tensor & max_out(Tensor & out, const Tensor & self, const Tensor & other); static inline Tensor max(const Tensor & self, const Tensor & other); static inline Tensor max(const Tensor & self); static inline Tensor median(const Tensor & self); static inline std::tuple sort_out(Tensor & values, Tensor & indices, const Tensor & self, int64_t dim=-1, bool descending=false); static inline std::tuple sort(const Tensor & self, int64_t dim=-1, bool descending=false); #ifdef BUILD_NAMEDTENSOR static inline std::tuple sort_out(Tensor & values, Tensor & indices, const Tensor & self, Dimname dim, bool descending=false); #endif #ifdef BUILD_NAMEDTENSOR static inline std::tuple sort(const Tensor & self, Dimname dim, bool descending=false); #endif static inline Tensor argsort(const Tensor & self, int64_t dim=-1, bool descending=false); #ifdef BUILD_NAMEDTENSOR static inline Tensor argsort(const Tensor & self, Dimname dim, bool descending=false); #endif static inline std::tuple topk_out(Tensor & values, Tensor & indices, const Tensor & self, int64_t k, int64_t dim=-1, bool largest=true, bool sorted=true); static inline std::tuple topk(const Tensor & self, int64_t k, int64_t dim=-1, bool largest=true, bool sorted=true); static inline Tensor all(const Tensor & self); static inline Tensor any(const Tensor & self); static inline Tensor & renorm_out(Tensor & out, const Tensor & self, Scalar p, int64_t dim, Scalar maxnorm); static inline Tensor renorm(const Tensor & self, Scalar p, int64_t dim, Scalar maxnorm); static inline bool equal(const Tensor & self, const Tensor & other); static inline Tensor & pow_out(Tensor & out, const Tensor & self, const Tensor & exponent); static inline Tensor pow(const Tensor & self, const Tensor & exponent); static inline Tensor & pow_out(Tensor & out, Scalar self, const Tensor & exponent); static inline Tensor pow(Scalar self, const Tensor & exponent); static inline Tensor & normal_out(Tensor & out, const Tensor & mean, double std=1, Generator * generator=nullptr); static inline Tensor normal(const Tensor & mean, double std=1, Generator * generator=nullptr); static inline Tensor & normal_out(Tensor & out, double mean, const Tensor & std, Generator * generator=nullptr); static inline Tensor normal(double mean, const Tensor & std, Generator * generator=nullptr); static inline Tensor & normal_out(Tensor & out, const Tensor & mean, const Tensor & std, Generator * generator=nullptr); static inline Tensor normal(const Tensor & mean, const Tensor & std, Generator * generator=nullptr); static inline Tensor normal(double mean, double std, IntArrayRef size, Generator * generator=nullptr, const TensorOptions & options={}); static inline Tensor & normal_out(Tensor & out, double mean, double std, IntArrayRef size, Generator * generator=nullptr); static inline Tensor alias(const Tensor & self); static inline Tensor _addr(const Tensor & self, const Tensor & vec1, const Tensor & vec2, Scalar beta=1, Scalar alpha=1); static inline Tensor & _addr_(Tensor & self, const Tensor & vec1, const Tensor & vec2, Scalar beta=1, Scalar alpha=1); static inline Tensor & _addr_out(Tensor & out, const Tensor & self, const Tensor & vec1, const Tensor & vec2, Scalar beta=1, Scalar alpha=1); static inline Tensor & _index_copy_(Tensor & self, int64_t dim, const Tensor & index, const Tensor & source); static inline Tensor _cumsum(const Tensor & self, int64_t dim); static inline Tensor & _cumsum_out(Tensor & out, const Tensor & self, int64_t dim); static inline Tensor _cumprod(const Tensor & self, int64_t dim); static inline Tensor & _cumprod_out(Tensor & out, const Tensor & self, int64_t dim); static inline Tensor _var(const Tensor & self, bool unbiased=true); static inline Tensor _std(const Tensor & self, bool unbiased=true); static inline Tensor _cat(TensorList tensors, int64_t dim=0); static inline Tensor & _cat_out(Tensor & out, TensorList tensors, int64_t dim=0); static inline std::tuple _mode(const Tensor & self, int64_t dim=-1, bool keepdim=false); static inline std::tuple _mode_out(Tensor & values, Tensor & indices, const Tensor & self, int64_t dim=-1, bool keepdim=false); static inline std::tuple _max(const Tensor & self, int64_t dim, bool keepdim=false); static inline std::tuple _max_out(Tensor & max, Tensor & max_indices, const Tensor & self, int64_t dim, bool keepdim=false); static inline std::tuple _min(const Tensor & self, int64_t dim, bool keepdim=false); static inline std::tuple _min_out(Tensor & min, Tensor & min_indices, const Tensor & self, int64_t dim, bool keepdim=false); static inline Tensor & binary_cross_entropy_out(Tensor & out, const Tensor & self, const Tensor & target, const Tensor & weight={}, int64_t reduction=Reduction::Mean); static inline Tensor binary_cross_entropy(const Tensor & self, const Tensor & target, const Tensor & weight={}, int64_t reduction=Reduction::Mean); static inline Tensor & binary_cross_entropy_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, const Tensor & target, const Tensor & weight={}, int64_t reduction=Reduction::Mean); static inline Tensor binary_cross_entropy_backward(const Tensor & grad_output, const Tensor & self, const Tensor & target, const Tensor & weight={}, int64_t reduction=Reduction::Mean); static inline Tensor & mse_loss_out(Tensor & out, const Tensor & self, const Tensor & target, int64_t reduction=Reduction::Mean); static inline Tensor mse_loss(const Tensor & self, const Tensor & target, int64_t reduction=Reduction::Mean); static inline Tensor & mse_loss_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, const Tensor & target, int64_t reduction); static inline Tensor mse_loss_backward(const Tensor & grad_output, const Tensor & self, const Tensor & target, int64_t reduction); static inline Tensor & l1_loss_out(Tensor & out, const Tensor & self, const Tensor & target, int64_t reduction=Reduction::Mean); static inline Tensor l1_loss(const Tensor & self, const Tensor & target, int64_t reduction=Reduction::Mean); static inline Tensor & l1_loss_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, const Tensor & target, int64_t reduction); static inline Tensor l1_loss_backward(const Tensor & grad_output, const Tensor & self, const Tensor & target, int64_t reduction); static inline Tensor & multi_margin_loss_out(Tensor & out, const Tensor & self, const Tensor & target, Scalar p=1, Scalar margin=1, const Tensor & weight={}, int64_t reduction=Reduction::Mean); static inline Tensor multi_margin_loss(const Tensor & self, const Tensor & target, Scalar p=1, Scalar margin=1, const Tensor & weight={}, int64_t reduction=Reduction::Mean); static inline Tensor & multi_margin_loss_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, const Tensor & target, Scalar p, Scalar margin, const Tensor & weight={}, int64_t reduction=Reduction::Mean); static inline Tensor multi_margin_loss_backward(const Tensor & grad_output, const Tensor & self, const Tensor & target, Scalar p, Scalar margin, const Tensor & weight={}, int64_t reduction=Reduction::Mean); static inline Tensor & multilabel_margin_loss_out(Tensor & out, const Tensor & self, const Tensor & target, int64_t reduction=Reduction::Mean); static inline Tensor multilabel_margin_loss(const Tensor & self, const Tensor & target, int64_t reduction=Reduction::Mean); static inline std::tuple multilabel_margin_loss_forward_out(Tensor & output, Tensor & is_target, const Tensor & self, const Tensor & target, int64_t reduction); static inline std::tuple multilabel_margin_loss_forward(const Tensor & self, const Tensor & target, int64_t reduction); static inline Tensor & multilabel_margin_loss_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, const Tensor & target, int64_t reduction, const Tensor & is_target); static inline Tensor multilabel_margin_loss_backward(const Tensor & grad_output, const Tensor & self, const Tensor & target, int64_t reduction, const Tensor & is_target); static inline Tensor & nll_loss_out(Tensor & out, const Tensor & self, const Tensor & target, const Tensor & weight={}, int64_t reduction=Reduction::Mean, int64_t ignore_index=-100); static inline Tensor nll_loss(const Tensor & self, const Tensor & target, const Tensor & weight={}, int64_t reduction=Reduction::Mean, int64_t ignore_index=-100); static inline std::tuple nll_loss_forward_out(Tensor & output, Tensor & total_weight, const Tensor & self, const Tensor & target, const Tensor & weight, int64_t reduction, int64_t ignore_index); static inline std::tuple nll_loss_forward(const Tensor & self, const Tensor & target, const Tensor & weight, int64_t reduction, int64_t ignore_index); static inline Tensor & nll_loss_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, const Tensor & target, const Tensor & weight, int64_t reduction, int64_t ignore_index, const Tensor & total_weight); static inline Tensor nll_loss_backward(const Tensor & grad_output, const Tensor & self, const Tensor & target, const Tensor & weight, int64_t reduction, int64_t ignore_index, const Tensor & total_weight); static inline Tensor & nll_loss2d_out(Tensor & out, const Tensor & self, const Tensor & target, const Tensor & weight={}, int64_t reduction=Reduction::Mean, int64_t ignore_index=-100); static inline Tensor nll_loss2d(const Tensor & self, const Tensor & target, const Tensor & weight={}, int64_t reduction=Reduction::Mean, int64_t ignore_index=-100); static inline std::tuple nll_loss2d_forward_out(Tensor & output, Tensor & total_weight, const Tensor & self, const Tensor & target, const Tensor & weight, int64_t reduction, int64_t ignore_index); static inline std::tuple nll_loss2d_forward(const Tensor & self, const Tensor & target, const Tensor & weight, int64_t reduction, int64_t ignore_index); static inline Tensor & nll_loss2d_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, const Tensor & target, const Tensor & weight, int64_t reduction, int64_t ignore_index, const Tensor & total_weight); static inline Tensor nll_loss2d_backward(const Tensor & grad_output, const Tensor & self, const Tensor & target, const Tensor & weight, int64_t reduction, int64_t ignore_index, const Tensor & total_weight); static inline Tensor & smooth_l1_loss_out(Tensor & out, const Tensor & self, const Tensor & target, int64_t reduction=Reduction::Mean); static inline Tensor smooth_l1_loss(const Tensor & self, const Tensor & target, int64_t reduction=Reduction::Mean); static inline Tensor & smooth_l1_loss_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, const Tensor & target, int64_t reduction); static inline Tensor smooth_l1_loss_backward(const Tensor & grad_output, const Tensor & self, const Tensor & target, int64_t reduction); static inline Tensor & soft_margin_loss_out(Tensor & out, const Tensor & self, const Tensor & target, int64_t reduction=Reduction::Mean); static inline Tensor soft_margin_loss(const Tensor & self, const Tensor & target, int64_t reduction=Reduction::Mean); static inline Tensor & soft_margin_loss_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, const Tensor & target, int64_t reduction); static inline Tensor soft_margin_loss_backward(const Tensor & grad_output, const Tensor & self, const Tensor & target, int64_t reduction); static inline Tensor & elu_out(Tensor & out, const Tensor & self, Scalar alpha=1, Scalar scale=1, Scalar input_scale=1); static inline Tensor elu(const Tensor & self, Scalar alpha=1, Scalar scale=1, Scalar input_scale=1); static inline Tensor & elu_backward_out(Tensor & grad_input, const Tensor & grad_output, Scalar alpha, Scalar scale, Scalar input_scale, const Tensor & output); static inline Tensor elu_backward(const Tensor & grad_output, Scalar alpha, Scalar scale, Scalar input_scale, const Tensor & output); static inline Tensor & elu_(Tensor & self, Scalar alpha=1, Scalar scale=1, Scalar input_scale=1); static inline Tensor & glu_out(Tensor & out, const Tensor & self, int64_t dim=-1); static inline Tensor glu(const Tensor & self, int64_t dim=-1); static inline Tensor & glu_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, int64_t dim); static inline Tensor glu_backward(const Tensor & grad_output, const Tensor & self, int64_t dim); static inline Tensor & hardtanh_out(Tensor & out, const Tensor & self, Scalar min_val=-1, Scalar max_val=1); static inline Tensor hardtanh(const Tensor & self, Scalar min_val=-1, Scalar max_val=1); static inline Tensor & hardtanh_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, Scalar min_val, Scalar max_val); static inline Tensor hardtanh_backward(const Tensor & grad_output, const Tensor & self, Scalar min_val, Scalar max_val); static inline Tensor & hardtanh_(Tensor & self, Scalar min_val=-1, Scalar max_val=1); static inline Tensor & leaky_relu_out(Tensor & out, const Tensor & self, Scalar negative_slope=0.01); static inline Tensor leaky_relu(const Tensor & self, Scalar negative_slope=0.01); static inline Tensor & leaky_relu_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, Scalar negative_slope); static inline Tensor leaky_relu_backward(const Tensor & grad_output, const Tensor & self, Scalar negative_slope); static inline Tensor & leaky_relu_(Tensor & self, Scalar negative_slope=0.01); static inline Tensor & log_sigmoid_out(Tensor & out, const Tensor & self); static inline Tensor log_sigmoid(const Tensor & self); static inline std::tuple log_sigmoid_forward_out(Tensor & output, Tensor & buffer, const Tensor & self); static inline std::tuple log_sigmoid_forward(const Tensor & self); static inline Tensor & log_sigmoid_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, const Tensor & buffer); static inline Tensor log_sigmoid_backward(const Tensor & grad_output, const Tensor & self, const Tensor & buffer); static inline Tensor & rrelu_with_noise_out(Tensor & out, const Tensor & self, const Tensor & noise, Scalar lower=0.125, Scalar upper=0.333333333333, bool training=false, Generator * generator=nullptr); static inline Tensor rrelu_with_noise(const Tensor & self, const Tensor & noise, Scalar lower=0.125, Scalar upper=0.333333333333, bool training=false, Generator * generator=nullptr); static inline Tensor & rrelu_with_noise_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, const Tensor & noise, Scalar lower, Scalar upper, bool training); static inline Tensor rrelu_with_noise_backward(const Tensor & grad_output, const Tensor & self, const Tensor & noise, Scalar lower, Scalar upper, bool training); static inline Tensor & rrelu_with_noise_(Tensor & self, const Tensor & noise, Scalar lower=0.125, Scalar upper=0.333333333333, bool training=false, Generator * generator=nullptr); static inline Tensor & softplus_out(Tensor & out, const Tensor & self, Scalar beta=1, Scalar threshold=20); static inline Tensor softplus(const Tensor & self, Scalar beta=1, Scalar threshold=20); static inline Tensor & softplus_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, Scalar beta, Scalar threshold, const Tensor & output); static inline Tensor softplus_backward(const Tensor & grad_output, const Tensor & self, Scalar beta, Scalar threshold, const Tensor & output); static inline Tensor & softshrink_out(Tensor & out, const Tensor & self, Scalar lambd=0.5); static inline Tensor softshrink(const Tensor & self, Scalar lambd=0.5); static inline Tensor & softshrink_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, Scalar lambd); static inline Tensor softshrink_backward(const Tensor & grad_output, const Tensor & self, Scalar lambd); static inline Tensor & adaptive_avg_pool2d_out(Tensor & out, const Tensor & self, IntArrayRef output_size); static inline Tensor adaptive_avg_pool2d(const Tensor & self, IntArrayRef output_size); static inline Tensor mkldnn_adaptive_avg_pool2d(const Tensor & self, IntArrayRef output_size); static inline Tensor _adaptive_avg_pool2d(const Tensor & self, IntArrayRef output_size); static inline Tensor _adaptive_avg_pool2d_backward(const Tensor & grad_output, const Tensor & self); static inline Tensor & adaptive_avg_pool3d_out(Tensor & out, const Tensor & self, IntArrayRef output_size); static inline Tensor adaptive_avg_pool3d(const Tensor & self, IntArrayRef output_size); static inline Tensor & adaptive_avg_pool3d_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self); static inline Tensor adaptive_avg_pool3d_backward(const Tensor & grad_output, const Tensor & self); static inline std::tuple adaptive_max_pool2d_out(Tensor & out, Tensor & indices, const Tensor & self, IntArrayRef output_size); static inline std::tuple adaptive_max_pool2d(const Tensor & self, IntArrayRef output_size); static inline Tensor & adaptive_max_pool2d_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, const Tensor & indices); static inline Tensor adaptive_max_pool2d_backward(const Tensor & grad_output, const Tensor & self, const Tensor & indices); static inline std::tuple adaptive_max_pool3d_out(Tensor & out, Tensor & indices, const Tensor & self, IntArrayRef output_size); static inline std::tuple adaptive_max_pool3d(const Tensor & self, IntArrayRef output_size); static inline Tensor & adaptive_max_pool3d_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, const Tensor & indices); static inline Tensor adaptive_max_pool3d_backward(const Tensor & grad_output, const Tensor & self, const Tensor & indices); static inline Tensor & avg_pool2d_out(Tensor & out, const Tensor & self, IntArrayRef kernel_size, IntArrayRef stride={}, IntArrayRef padding=0, bool ceil_mode=false, bool count_include_pad=true, c10::optional divisor_override=c10::nullopt); static inline Tensor avg_pool2d(const Tensor & self, IntArrayRef kernel_size, IntArrayRef stride={}, IntArrayRef padding=0, bool ceil_mode=false, bool count_include_pad=true, c10::optional divisor_override=c10::nullopt); static inline Tensor & avg_pool2d_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, bool ceil_mode, bool count_include_pad, c10::optional divisor_override); static inline Tensor avg_pool2d_backward(const Tensor & grad_output, const Tensor & self, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, bool ceil_mode, bool count_include_pad, c10::optional divisor_override); static inline Tensor & avg_pool3d_out(Tensor & out, const Tensor & self, IntArrayRef kernel_size, IntArrayRef stride={}, IntArrayRef padding=0, bool ceil_mode=false, bool count_include_pad=true, c10::optional divisor_override=c10::nullopt); static inline Tensor avg_pool3d(const Tensor & self, IntArrayRef kernel_size, IntArrayRef stride={}, IntArrayRef padding=0, bool ceil_mode=false, bool count_include_pad=true, c10::optional divisor_override=c10::nullopt); static inline Tensor & avg_pool3d_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, bool ceil_mode, bool count_include_pad, c10::optional divisor_override); static inline Tensor avg_pool3d_backward(const Tensor & grad_output, const Tensor & self, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, bool ceil_mode, bool count_include_pad, c10::optional divisor_override); static inline std::tuple fractional_max_pool2d_out(Tensor & output, Tensor & indices, const Tensor & self, IntArrayRef kernel_size, IntArrayRef output_size, const Tensor & random_samples); static inline std::tuple fractional_max_pool2d(const Tensor & self, IntArrayRef kernel_size, IntArrayRef output_size, const Tensor & random_samples); static inline Tensor & fractional_max_pool2d_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, IntArrayRef kernel_size, IntArrayRef output_size, const Tensor & indices); static inline Tensor fractional_max_pool2d_backward(const Tensor & grad_output, const Tensor & self, IntArrayRef kernel_size, IntArrayRef output_size, const Tensor & indices); static inline std::tuple fractional_max_pool3d_out(Tensor & output, Tensor & indices, const Tensor & self, IntArrayRef kernel_size, IntArrayRef output_size, const Tensor & random_samples); static inline std::tuple fractional_max_pool3d(const Tensor & self, IntArrayRef kernel_size, IntArrayRef output_size, const Tensor & random_samples); static inline Tensor & fractional_max_pool3d_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, IntArrayRef kernel_size, IntArrayRef output_size, const Tensor & indices); static inline Tensor fractional_max_pool3d_backward(const Tensor & grad_output, const Tensor & self, IntArrayRef kernel_size, IntArrayRef output_size, const Tensor & indices); static inline std::tuple max_pool2d_with_indices_out(Tensor & out, Tensor & indices, const Tensor & self, IntArrayRef kernel_size, IntArrayRef stride={}, IntArrayRef padding=0, IntArrayRef dilation=1, bool ceil_mode=false); static inline std::tuple max_pool2d_with_indices(const Tensor & self, IntArrayRef kernel_size, IntArrayRef stride={}, IntArrayRef padding=0, IntArrayRef dilation=1, bool ceil_mode=false); static inline Tensor & max_pool2d_with_indices_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, bool ceil_mode, const Tensor & indices); static inline Tensor max_pool2d_with_indices_backward(const Tensor & grad_output, const Tensor & self, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, bool ceil_mode, const Tensor & indices); static inline std::tuple max_pool3d_with_indices_out(Tensor & out, Tensor & indices, const Tensor & self, IntArrayRef kernel_size, IntArrayRef stride={}, IntArrayRef padding=0, IntArrayRef dilation=1, bool ceil_mode=false); static inline std::tuple max_pool3d_with_indices(const Tensor & self, IntArrayRef kernel_size, IntArrayRef stride={}, IntArrayRef padding=0, IntArrayRef dilation=1, bool ceil_mode=false); static inline Tensor & max_pool3d_with_indices_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, bool ceil_mode, const Tensor & indices); static inline Tensor max_pool3d_with_indices_backward(const Tensor & grad_output, const Tensor & self, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, bool ceil_mode, const Tensor & indices); static inline Tensor & max_unpool2d_out(Tensor & out, const Tensor & self, const Tensor & indices, IntArrayRef output_size); static inline Tensor max_unpool2d(const Tensor & self, const Tensor & indices, IntArrayRef output_size); static inline Tensor & max_unpool2d_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, const Tensor & indices, IntArrayRef output_size); static inline Tensor max_unpool2d_backward(const Tensor & grad_output, const Tensor & self, const Tensor & indices, IntArrayRef output_size); static inline Tensor & max_unpool3d_out(Tensor & out, const Tensor & self, const Tensor & indices, IntArrayRef output_size, IntArrayRef stride, IntArrayRef padding); static inline Tensor max_unpool3d(const Tensor & self, const Tensor & indices, IntArrayRef output_size, IntArrayRef stride, IntArrayRef padding); static inline Tensor & max_unpool3d_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, const Tensor & indices, IntArrayRef output_size, IntArrayRef stride, IntArrayRef padding); static inline Tensor max_unpool3d_backward(const Tensor & grad_output, const Tensor & self, const Tensor & indices, IntArrayRef output_size, IntArrayRef stride, IntArrayRef padding); static inline Tensor & reflection_pad1d_out(Tensor & out, const Tensor & self, IntArrayRef padding); static inline Tensor reflection_pad1d(const Tensor & self, IntArrayRef padding); static inline Tensor & reflection_pad1d_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, IntArrayRef padding); static inline Tensor reflection_pad1d_backward(const Tensor & grad_output, const Tensor & self, IntArrayRef padding); static inline Tensor & reflection_pad2d_out(Tensor & out, const Tensor & self, IntArrayRef padding); static inline Tensor reflection_pad2d(const Tensor & self, IntArrayRef padding); static inline Tensor & reflection_pad2d_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, IntArrayRef padding); static inline Tensor reflection_pad2d_backward(const Tensor & grad_output, const Tensor & self, IntArrayRef padding); static inline Tensor & replication_pad1d_out(Tensor & out, const Tensor & self, IntArrayRef padding); static inline Tensor replication_pad1d(const Tensor & self, IntArrayRef padding); static inline Tensor & replication_pad1d_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, IntArrayRef padding); static inline Tensor replication_pad1d_backward(const Tensor & grad_output, const Tensor & self, IntArrayRef padding); static inline Tensor & replication_pad2d_out(Tensor & out, const Tensor & self, IntArrayRef padding); static inline Tensor replication_pad2d(const Tensor & self, IntArrayRef padding); static inline Tensor & replication_pad2d_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, IntArrayRef padding); static inline Tensor replication_pad2d_backward(const Tensor & grad_output, const Tensor & self, IntArrayRef padding); static inline Tensor & replication_pad3d_out(Tensor & out, const Tensor & self, IntArrayRef padding); static inline Tensor replication_pad3d(const Tensor & self, IntArrayRef padding); static inline Tensor & replication_pad3d_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, IntArrayRef padding); static inline Tensor replication_pad3d_backward(const Tensor & grad_output, const Tensor & self, IntArrayRef padding); static inline Tensor & upsample_linear1d_out(Tensor & out, const Tensor & self, IntArrayRef output_size, bool align_corners); static inline Tensor upsample_linear1d(const Tensor & self, IntArrayRef output_size, bool align_corners); static inline Tensor & upsample_linear1d_backward_out(Tensor & grad_input, const Tensor & grad_output, IntArrayRef output_size, IntArrayRef input_size, bool align_corners); static inline Tensor upsample_linear1d_backward(const Tensor & grad_output, IntArrayRef output_size, IntArrayRef input_size, bool align_corners); static inline Tensor & upsample_bilinear2d_out(Tensor & out, const Tensor & self, IntArrayRef output_size, bool align_corners); static inline Tensor upsample_bilinear2d(const Tensor & self, IntArrayRef output_size, bool align_corners); static inline Tensor & upsample_bilinear2d_backward_out(Tensor & grad_input, const Tensor & grad_output, IntArrayRef output_size, IntArrayRef input_size, bool align_corners); static inline Tensor upsample_bilinear2d_backward(const Tensor & grad_output, IntArrayRef output_size, IntArrayRef input_size, bool align_corners); static inline Tensor & upsample_bicubic2d_out(Tensor & out, const Tensor & self, IntArrayRef output_size, bool align_corners); static inline Tensor upsample_bicubic2d(const Tensor & self, IntArrayRef output_size, bool align_corners); static inline Tensor & upsample_bicubic2d_backward_out(Tensor & grad_input, const Tensor & grad_output, IntArrayRef output_size, IntArrayRef input_size, bool align_corners); static inline Tensor upsample_bicubic2d_backward(const Tensor & grad_output, IntArrayRef output_size, IntArrayRef input_size, bool align_corners); static inline Tensor & upsample_trilinear3d_out(Tensor & out, const Tensor & self, IntArrayRef output_size, bool align_corners); static inline Tensor upsample_trilinear3d(const Tensor & self, IntArrayRef output_size, bool align_corners); static inline Tensor & upsample_trilinear3d_backward_out(Tensor & grad_input, const Tensor & grad_output, IntArrayRef output_size, IntArrayRef input_size, bool align_corners); static inline Tensor upsample_trilinear3d_backward(const Tensor & grad_output, IntArrayRef output_size, IntArrayRef input_size, bool align_corners); static inline Tensor & upsample_nearest1d_out(Tensor & out, const Tensor & self, IntArrayRef output_size); static inline Tensor upsample_nearest1d(const Tensor & self, IntArrayRef output_size); static inline Tensor & upsample_nearest1d_backward_out(Tensor & grad_input, const Tensor & grad_output, IntArrayRef output_size, IntArrayRef input_size); static inline Tensor upsample_nearest1d_backward(const Tensor & grad_output, IntArrayRef output_size, IntArrayRef input_size); static inline Tensor & upsample_nearest2d_out(Tensor & out, const Tensor & self, IntArrayRef output_size); static inline Tensor upsample_nearest2d(const Tensor & self, IntArrayRef output_size); static inline Tensor & upsample_nearest2d_backward_out(Tensor & grad_input, const Tensor & grad_output, IntArrayRef output_size, IntArrayRef input_size); static inline Tensor upsample_nearest2d_backward(const Tensor & grad_output, IntArrayRef output_size, IntArrayRef input_size); static inline Tensor & upsample_nearest3d_out(Tensor & out, const Tensor & self, IntArrayRef output_size); static inline Tensor upsample_nearest3d(const Tensor & self, IntArrayRef output_size); static inline Tensor & upsample_nearest3d_backward_out(Tensor & grad_input, const Tensor & grad_output, IntArrayRef output_size, IntArrayRef input_size); static inline Tensor upsample_nearest3d_backward(const Tensor & grad_output, IntArrayRef output_size, IntArrayRef input_size); static inline Tensor & sigmoid_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & output); static inline Tensor sigmoid_backward(const Tensor & grad_output, const Tensor & output); static inline Tensor & tanh_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & output); static inline Tensor tanh_backward(const Tensor & grad_output, const Tensor & output); static inline Tensor & slow_conv_transpose2d_out(Tensor & out, const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const Tensor & bias={}, IntArrayRef stride=1, IntArrayRef padding=0, IntArrayRef output_padding=0, IntArrayRef dilation=1); static inline Tensor slow_conv_transpose2d(const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const Tensor & bias={}, IntArrayRef stride=1, IntArrayRef padding=0, IntArrayRef output_padding=0, IntArrayRef dilation=1); static inline std::tuple slow_conv_transpose2d_backward_out(Tensor & grad_input, Tensor & grad_weight, Tensor & grad_bias, const Tensor & grad_output, const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef output_padding, IntArrayRef dilation, const Tensor & columns, const Tensor & ones); static inline std::tuple slow_conv_transpose2d_backward(const Tensor & grad_output, const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef output_padding, IntArrayRef dilation, const Tensor & columns, const Tensor & ones, std::array output_mask); static inline Tensor & slow_conv_transpose3d_out(Tensor & out, const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const Tensor & bias={}, IntArrayRef stride=1, IntArrayRef padding=0, IntArrayRef output_padding=0, IntArrayRef dilation=1); static inline Tensor slow_conv_transpose3d(const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const Tensor & bias={}, IntArrayRef stride=1, IntArrayRef padding=0, IntArrayRef output_padding=0, IntArrayRef dilation=1); static inline std::tuple slow_conv_transpose3d_backward_out(Tensor & grad_input, Tensor & grad_weight, Tensor & grad_bias, const Tensor & grad_output, const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef output_padding, IntArrayRef dilation, const Tensor & finput, const Tensor & fgrad_input); static inline std::tuple slow_conv_transpose3d_backward(const Tensor & grad_output, const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef output_padding, IntArrayRef dilation, const Tensor & finput, const Tensor & fgrad_input, std::array output_mask); static inline Tensor & thnn_conv2d_out(Tensor & out, const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const Tensor & bias={}, IntArrayRef stride=1, IntArrayRef padding=0); static inline Tensor thnn_conv2d(const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const Tensor & bias={}, IntArrayRef stride=1, IntArrayRef padding=0); static inline std::tuple thnn_conv2d_forward_out(Tensor & output, Tensor & finput, Tensor & fgrad_input, const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const Tensor & bias, IntArrayRef stride, IntArrayRef padding); static inline std::tuple thnn_conv2d_forward(const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const Tensor & bias, IntArrayRef stride, IntArrayRef padding); static inline std::tuple thnn_conv2d_backward_out(Tensor & grad_input, Tensor & grad_weight, Tensor & grad_bias, const Tensor & grad_output, const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, const Tensor & finput, const Tensor & fgrad_input); static inline std::tuple thnn_conv2d_backward(const Tensor & grad_output, const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, const Tensor & finput, const Tensor & fgrad_input, std::array output_mask); static inline Tensor & thnn_conv_depthwise2d_out(Tensor & out, const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const Tensor & bias={}, IntArrayRef stride=1, IntArrayRef padding=0, IntArrayRef dilation=1); static inline Tensor thnn_conv_depthwise2d(const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const Tensor & bias={}, IntArrayRef stride=1, IntArrayRef padding=0, IntArrayRef dilation=1); static inline Tensor & thnn_conv_depthwise2d_forward_out(Tensor & out, const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const Tensor & bias, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation); static inline Tensor thnn_conv_depthwise2d_forward(const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const Tensor & bias, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation); static inline std::tuple thnn_conv_depthwise2d_backward_out(Tensor & grad_input, Tensor & grad_weight, const Tensor & grad_output, const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation); static inline std::tuple thnn_conv_depthwise2d_backward(const Tensor & grad_output, const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, std::array output_mask); static inline Tensor & thnn_conv3d_out(Tensor & out, const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const Tensor & bias={}, IntArrayRef stride=1, IntArrayRef padding=0); static inline Tensor thnn_conv3d(const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const Tensor & bias={}, IntArrayRef stride=1, IntArrayRef padding=0); static inline std::tuple thnn_conv3d_forward_out(Tensor & output, Tensor & finput, Tensor & fgrad_input, const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const Tensor & bias, IntArrayRef stride, IntArrayRef padding); static inline std::tuple thnn_conv3d_forward(const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const Tensor & bias, IntArrayRef stride, IntArrayRef padding); static inline std::tuple thnn_conv3d_backward_out(Tensor & grad_input, Tensor & grad_weight, Tensor & grad_bias, const Tensor & grad_output, const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, const Tensor & finput, const Tensor & fgrad_input); static inline std::tuple thnn_conv3d_backward(const Tensor & grad_output, const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, const Tensor & finput, const Tensor & fgrad_input, std::array output_mask); static inline Tensor slow_conv_dilated2d(const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const Tensor & bias={}, IntArrayRef stride=1, IntArrayRef padding=0, IntArrayRef dilation=1); static inline std::tuple slow_conv_dilated2d_backward(const Tensor & grad_output, const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, std::array output_mask); static inline Tensor slow_conv_dilated3d(const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const Tensor & bias={}, IntArrayRef stride=1, IntArrayRef padding=0, IntArrayRef dilation=1); static inline std::tuple slow_conv_dilated3d_backward(const Tensor & grad_output, const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, std::array output_mask); static inline Tensor & col2im_out(Tensor & out, const Tensor & self, IntArrayRef output_size, IntArrayRef kernel_size, IntArrayRef dilation, IntArrayRef padding, IntArrayRef stride); static inline Tensor col2im(const Tensor & self, IntArrayRef output_size, IntArrayRef kernel_size, IntArrayRef dilation, IntArrayRef padding, IntArrayRef stride); static inline Tensor & col2im_backward_out(Tensor & grad_input, const Tensor & grad_output, IntArrayRef kernel_size, IntArrayRef dilation, IntArrayRef padding, IntArrayRef stride); static inline Tensor col2im_backward(const Tensor & grad_output, IntArrayRef kernel_size, IntArrayRef dilation, IntArrayRef padding, IntArrayRef stride); static inline Tensor & im2col_out(Tensor & out, const Tensor & self, IntArrayRef kernel_size, IntArrayRef dilation, IntArrayRef padding, IntArrayRef stride); static inline Tensor im2col(const Tensor & self, IntArrayRef kernel_size, IntArrayRef dilation, IntArrayRef padding, IntArrayRef stride); static inline Tensor & im2col_backward_out(Tensor & grad_input, const Tensor & grad_output, IntArrayRef input_size, IntArrayRef kernel_size, IntArrayRef dilation, IntArrayRef padding, IntArrayRef stride); static inline Tensor im2col_backward(const Tensor & grad_output, IntArrayRef input_size, IntArrayRef kernel_size, IntArrayRef dilation, IntArrayRef padding, IntArrayRef stride); inline Tensor from_blob( void* data, IntArrayRef sizes, IntArrayRef strides, const std::function& deleter, const TensorOptions& options = {}) { auto device = globalContext().getDeviceFromPtr(data, options.device().type()); if (options.device().has_index()) { TORCH_CHECK( options.device() == device, "Specified device ", options.device(), " does not match device of data ", device); } auto storage = Storage( options.dtype(), detail::computeStorageSize(sizes, strides), InefficientStdFunctionContext::makeDataPtr( data, deleter, device), /*allocator=*/nullptr, /*resizable=*/false); return empty({0}, options).set_(storage, 0, sizes, strides); } inline Tensor from_blob( void* data, IntArrayRef sizes, const std::function& deleter, const TensorOptions& options = {}) { return from_blob(data, sizes, detail::defaultStrides(sizes), deleter, options); } inline Tensor from_blob( void* data, IntArrayRef sizes, IntArrayRef strides, const TensorOptions& options = {}) { return from_blob(data, sizes, strides, [](void*) {}, options); } inline Tensor from_blob( void* data, IntArrayRef sizes, const TensorOptions& options = {}) { return from_blob(data, sizes, detail::defaultStrides(sizes), [](void*) {}, options); } // function definitions are all static inline because // they are one-line statically dispatched functions that // invoke the actual dynamic dispatch on the correct argument static inline Tensor _cast_Byte(const Tensor & self, bool non_blocking) { #ifdef USE_STATIC_DISPATCH return TypeDefault::_cast_Byte(self, non_blocking); #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::_cast_Byte", ""}).value(); return c10::Dispatcher::singleton().callUnboxed( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self)), self, non_blocking); #endif } static inline Tensor _cast_Char(const Tensor & self, bool non_blocking) { #ifdef USE_STATIC_DISPATCH return TypeDefault::_cast_Char(self, non_blocking); #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::_cast_Char", ""}).value(); return c10::Dispatcher::singleton().callUnboxed( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self)), self, non_blocking); #endif } static inline Tensor _cast_Double(const Tensor & self, bool non_blocking) { #ifdef USE_STATIC_DISPATCH return TypeDefault::_cast_Double(self, non_blocking); #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::_cast_Double", ""}).value(); return c10::Dispatcher::singleton().callUnboxed( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self)), self, non_blocking); #endif } static inline Tensor _cast_Float(const Tensor & self, bool non_blocking) { #ifdef USE_STATIC_DISPATCH return TypeDefault::_cast_Float(self, non_blocking); #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::_cast_Float", ""}).value(); return c10::Dispatcher::singleton().callUnboxed( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self)), self, non_blocking); #endif } static inline Tensor _cast_Int(const Tensor & self, bool non_blocking) { #ifdef USE_STATIC_DISPATCH return TypeDefault::_cast_Int(self, non_blocking); #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::_cast_Int", ""}).value(); return c10::Dispatcher::singleton().callUnboxed( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self)), self, non_blocking); #endif } static inline Tensor _cast_Long(const Tensor & self, bool non_blocking) { #ifdef USE_STATIC_DISPATCH return TypeDefault::_cast_Long(self, non_blocking); #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::_cast_Long", ""}).value(); return c10::Dispatcher::singleton().callUnboxed( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self)), self, non_blocking); #endif } static inline Tensor _cast_Short(const Tensor & self, bool non_blocking) { #ifdef USE_STATIC_DISPATCH return TypeDefault::_cast_Short(self, non_blocking); #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::_cast_Short", ""}).value(); return c10::Dispatcher::singleton().callUnboxed( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self)), self, non_blocking); #endif } static inline Tensor _cast_Half(const Tensor & self, bool non_blocking) { #ifdef USE_STATIC_DISPATCH return TypeDefault::_cast_Half(self, non_blocking); #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::_cast_Half", ""}).value(); return c10::Dispatcher::singleton().callUnboxed( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self)), self, non_blocking); #endif } #ifdef BUILD_NAMEDTENSOR static inline std::vector align_tensors(TensorList tensors) { #ifdef USE_STATIC_DISPATCH return TypeDefault::align_tensors(tensors); #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::align_tensors", ""}).value(); return c10::Dispatcher::singleton().callUnboxedOnly, TensorList>( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(tensors)), tensors); #endif } #endif static inline std::tuple _cudnn_ctc_loss(const Tensor & log_probs, const Tensor & targets, IntArrayRef input_lengths, IntArrayRef target_lengths, int64_t blank, bool deterministic, bool zero_infinity) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(log_probs, targets)))) { default: AT_ERROR("_cudnn_ctc_loss not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(log_probs, targets))); } #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::_cudnn_ctc_loss", ""}).value(); return c10::Dispatcher::singleton().callUnboxedOnly, const Tensor &, const Tensor &, IntArrayRef, IntArrayRef, int64_t, bool, bool>( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(log_probs, targets)), log_probs, targets, input_lengths, target_lengths, blank, deterministic, zero_infinity); #endif } static inline Tensor _cudnn_rnn_flatten_weight(TensorList weight_arr, int64_t weight_stride0, int64_t input_size, int64_t mode, int64_t hidden_size, int64_t num_layers, bool batch_first, bool bidirectional) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(weight_arr)))) { default: AT_ERROR("_cudnn_rnn_flatten_weight not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(weight_arr))); } #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::_cudnn_rnn_flatten_weight", ""}).value(); return c10::Dispatcher::singleton().callUnboxedOnly( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(weight_arr)), weight_arr, weight_stride0, input_size, mode, hidden_size, num_layers, batch_first, bidirectional); #endif } static inline std::tuple _cudnn_rnn(const Tensor & input, TensorList weight, int64_t weight_stride0, const Tensor & weight_buf, const Tensor & hx, const Tensor & cx, int64_t mode, int64_t hidden_size, int64_t num_layers, bool batch_first, double dropout, bool train, bool bidirectional, IntArrayRef batch_sizes, const Tensor & dropout_state) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(input, weight, weight_buf, hx, cx, dropout_state)))) { default: AT_ERROR("_cudnn_rnn not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(input, weight, weight_buf, hx, cx, dropout_state))); } #else static auto table = globalATenDispatch().getOpTable("aten::_cudnn_rnn(Tensor input, Tensor[] weight, int weight_stride0, Tensor? weight_buf, Tensor hx, Tensor? cx, int mode, int hidden_size, int num_layers, bool batch_first, float dropout, bool train, bool bidirectional, int[] batch_sizes, Tensor? dropout_state) -> (Tensor, Tensor, Tensor, Tensor, Tensor)"); return table->callUnboxed, const Tensor &, TensorList, int64_t, const Tensor &, const Tensor &, const Tensor &, int64_t, int64_t, int64_t, bool, double, bool, bool, IntArrayRef, const Tensor &>(input, weight, weight_stride0, weight_buf, hx, cx, mode, hidden_size, num_layers, batch_first, dropout, train, bidirectional, batch_sizes, dropout_state); #endif } static inline std::tuple> _cudnn_rnn_backward(const Tensor & input, TensorList weight, int64_t weight_stride0, const Tensor & weight_buf, const Tensor & hx, const Tensor & cx, const Tensor & output, const Tensor & grad_output, const Tensor & grad_hy, const Tensor & grad_cy, int64_t mode, int64_t hidden_size, int64_t num_layers, bool batch_first, double dropout, bool train, bool bidirectional, IntArrayRef batch_sizes, const Tensor & dropout_state, const Tensor & reserve, std::array output_mask) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(input, weight, weight_buf, hx, cx, output, grad_output, grad_hy, grad_cy, dropout_state, reserve)))) { default: AT_ERROR("_cudnn_rnn_backward not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(input, weight, weight_buf, hx, cx, output, grad_output, grad_hy, grad_cy, dropout_state, reserve))); } #else static auto table = globalATenDispatch().getOpTable("aten::_cudnn_rnn_backward(Tensor input, Tensor[] weight, int weight_stride0, Tensor weight_buf, Tensor hx, Tensor? cx, Tensor output, Tensor? grad_output, Tensor? grad_hy, Tensor? grad_cy, int mode, int hidden_size, int num_layers, bool batch_first, float dropout, bool train, bool bidirectional, int[] batch_sizes, Tensor? dropout_state, Tensor reserve, bool[4] output_mask) -> (Tensor, Tensor, Tensor, Tensor[])"); return table->callUnboxed>, const Tensor &, TensorList, int64_t, const Tensor &, const Tensor &, const Tensor &, const Tensor &, const Tensor &, const Tensor &, const Tensor &, int64_t, int64_t, int64_t, bool, double, bool, bool, IntArrayRef, const Tensor &, const Tensor &, std::array>(input, weight, weight_stride0, weight_buf, hx, cx, output, grad_output, grad_hy, grad_cy, mode, hidden_size, num_layers, batch_first, dropout, train, bidirectional, batch_sizes, dropout_state, reserve, output_mask); #endif } static inline Tensor _cudnn_init_dropout_state(double dropout, bool train, int64_t dropout_seed, const TensorOptions & options) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(options)))) { default: AT_ERROR("_cudnn_init_dropout_state not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(options))); } #else globalLegacyTypeDispatch().initForTensorTypeSet(at::detail::multi_dispatch_tensor_type_set(options)); static auto table = globalATenDispatch().getOpTable("aten::_cudnn_init_dropout_state(float dropout, bool train, int dropout_seed, *, ScalarType dtype, Layout layout, Device device, bool pin_memory=False) -> Tensor"); return table->callUnboxed(dropout, train, dropout_seed, options); #endif } static inline int64_t _debug_has_internal_overlap(const Tensor & self) { #ifdef USE_STATIC_DISPATCH return TypeDefault::_debug_has_internal_overlap(self); #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::_debug_has_internal_overlap", ""}).value(); return c10::Dispatcher::singleton().callUnboxed( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self)), self); #endif } static inline std::tuple _fused_dropout(const Tensor & self, double p, Generator * generator) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self)))) { default: AT_ERROR("_fused_dropout not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(self))); } #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::_fused_dropout", ""}).value(); return c10::Dispatcher::singleton().callUnboxedOnly, const Tensor &, double, Generator *>( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self)), self, p, generator); #endif } static inline Tensor _masked_scale(const Tensor & self, const Tensor & mask, double scale) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self, mask)))) { default: AT_ERROR("_masked_scale not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(self, mask))); } #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::_masked_scale", ""}).value(); return c10::Dispatcher::singleton().callUnboxed( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self, mask)), self, mask, scale); #endif } static inline std::tuple _sobol_engine_draw(const Tensor & quasi, int64_t n, const Tensor & sobolstate, int64_t dimension, int64_t num_generated, c10::optional dtype) { #ifdef USE_STATIC_DISPATCH return TypeDefault::_sobol_engine_draw(quasi, n, sobolstate, dimension, num_generated, dtype); #else static auto table = globalATenDispatch().getOpTable("aten::_sobol_engine_draw(Tensor quasi, int n, Tensor sobolstate, int dimension, int num_generated, ScalarType? dtype) -> (Tensor, Tensor)"); return table->callUnboxed, const Tensor &, int64_t, const Tensor &, int64_t, int64_t, c10::optional>(quasi, n, sobolstate, dimension, num_generated, dtype); #endif } static inline Tensor & _sobol_engine_ff_(Tensor & self, int64_t n, const Tensor & sobolstate, int64_t dimension, int64_t num_generated) { #ifdef USE_STATIC_DISPATCH return TypeDefault::_sobol_engine_ff_(self, n, sobolstate, dimension, num_generated); #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::_sobol_engine_ff_", ""}).value(); return c10::Dispatcher::singleton().callUnboxedOnly( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self, sobolstate)), self, n, sobolstate, dimension, num_generated); #endif } static inline Tensor & _sobol_engine_scramble_(Tensor & self, const Tensor & ltm, int64_t dimension) { #ifdef USE_STATIC_DISPATCH return TypeDefault::_sobol_engine_scramble_(self, ltm, dimension); #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::_sobol_engine_scramble_", ""}).value(); return c10::Dispatcher::singleton().callUnboxedOnly( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self, ltm)), self, ltm, dimension); #endif } static inline Tensor & _sobol_engine_initialize_state_(Tensor & self, int64_t dimension) { #ifdef USE_STATIC_DISPATCH return TypeDefault::_sobol_engine_initialize_state_(self, dimension); #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::_sobol_engine_initialize_state_", ""}).value(); return c10::Dispatcher::singleton().callUnboxedOnly( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self)), self, dimension); #endif } static inline Tensor _reshape_from_tensor(const Tensor & self, const Tensor & shape) { #ifdef USE_STATIC_DISPATCH return TypeDefault::_reshape_from_tensor(self, shape); #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::_reshape_from_tensor", ""}).value(); return c10::Dispatcher::singleton().callUnboxed( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self, shape)), self, shape); #endif } static inline Tensor _shape_as_tensor(const Tensor & self) { #ifdef USE_STATIC_DISPATCH return TypeDefault::_shape_as_tensor(self); #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::_shape_as_tensor", ""}).value(); return c10::Dispatcher::singleton().callUnboxed( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self)), self); #endif } static inline Tensor dropout(const Tensor & input, double p, bool train) { #ifdef USE_STATIC_DISPATCH return TypeDefault::dropout(input, p, train); #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::dropout", ""}).value(); return c10::Dispatcher::singleton().callUnboxed( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(input)), input, p, train); #endif } static inline Tensor & dropout_(Tensor & self, double p, bool train) { #ifdef USE_STATIC_DISPATCH return TypeDefault::dropout_(self, p, train); #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::dropout_", ""}).value(); return c10::Dispatcher::singleton().callUnboxedOnly( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self)), self, p, train); #endif } static inline Tensor feature_dropout(const Tensor & input, double p, bool train) { #ifdef USE_STATIC_DISPATCH return TypeDefault::feature_dropout(input, p, train); #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::feature_dropout", ""}).value(); return c10::Dispatcher::singleton().callUnboxed( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(input)), input, p, train); #endif } static inline Tensor & feature_dropout_(Tensor & self, double p, bool train) { #ifdef USE_STATIC_DISPATCH return TypeDefault::feature_dropout_(self, p, train); #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::feature_dropout_", ""}).value(); return c10::Dispatcher::singleton().callUnboxedOnly( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self)), self, p, train); #endif } static inline Tensor alpha_dropout(const Tensor & input, double p, bool train) { #ifdef USE_STATIC_DISPATCH return TypeDefault::alpha_dropout(input, p, train); #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::alpha_dropout", ""}).value(); return c10::Dispatcher::singleton().callUnboxed( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(input)), input, p, train); #endif } static inline Tensor & alpha_dropout_(Tensor & self, double p, bool train) { #ifdef USE_STATIC_DISPATCH return TypeDefault::alpha_dropout_(self, p, train); #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::alpha_dropout_", ""}).value(); return c10::Dispatcher::singleton().callUnboxedOnly( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self)), self, p, train); #endif } static inline Tensor feature_alpha_dropout(const Tensor & input, double p, bool train) { #ifdef USE_STATIC_DISPATCH return TypeDefault::feature_alpha_dropout(input, p, train); #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::feature_alpha_dropout", ""}).value(); return c10::Dispatcher::singleton().callUnboxed( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(input)), input, p, train); #endif } static inline Tensor & feature_alpha_dropout_(Tensor & self, double p, bool train) { #ifdef USE_STATIC_DISPATCH return TypeDefault::feature_alpha_dropout_(self, p, train); #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::feature_alpha_dropout_", ""}).value(); return c10::Dispatcher::singleton().callUnboxedOnly( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self)), self, p, train); #endif } static inline Tensor abs(const Tensor & self) { #ifdef USE_STATIC_DISPATCH return TypeDefault::abs(self); #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::abs", ""}).value(); return c10::Dispatcher::singleton().callUnboxed( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self)), self); #endif } static inline Tensor & abs_(Tensor & self) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self)))) { case Backend::CPU: return CPUType::abs_(self); break; default: AT_ERROR("abs_ not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(self))); } #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::abs_", ""}).value(); return c10::Dispatcher::singleton().callUnboxedOnly( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self)), self); #endif } static inline Tensor & abs_out(Tensor & out, const Tensor & self) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(out, self)))) { case Backend::CPU: return CPUType::abs_out(out, self); break; default: AT_ERROR("abs_out not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(out, self))); } #else static auto table = globalATenDispatch().getOpTable("aten::abs.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)"); return table->callUnboxed(out, self); #endif } static inline Tensor acos(const Tensor & self) { #ifdef USE_STATIC_DISPATCH return TypeDefault::acos(self); #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::acos", ""}).value(); return c10::Dispatcher::singleton().callUnboxed( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self)), self); #endif } static inline Tensor & acos_(Tensor & self) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self)))) { case Backend::CPU: return CPUType::acos_(self); break; default: AT_ERROR("acos_ not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(self))); } #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::acos_", ""}).value(); return c10::Dispatcher::singleton().callUnboxedOnly( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self)), self); #endif } static inline Tensor & acos_out(Tensor & out, const Tensor & self) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(out, self)))) { case Backend::CPU: return CPUType::acos_out(out, self); break; default: AT_ERROR("acos_out not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(out, self))); } #else static auto table = globalATenDispatch().getOpTable("aten::acos.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)"); return table->callUnboxed(out, self); #endif } static inline Tensor avg_pool1d(const Tensor & self, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, bool ceil_mode, bool count_include_pad) { #ifdef USE_STATIC_DISPATCH return TypeDefault::avg_pool1d(self, kernel_size, stride, padding, ceil_mode, count_include_pad); #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::avg_pool1d", ""}).value(); return c10::Dispatcher::singleton().callUnboxedOnly( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self)), self, kernel_size, stride, padding, ceil_mode, count_include_pad); #endif } static inline Tensor adaptive_avg_pool1d(const Tensor & self, IntArrayRef output_size) { #ifdef USE_STATIC_DISPATCH return TypeDefault::adaptive_avg_pool1d(self, output_size); #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::adaptive_avg_pool1d", ""}).value(); return c10::Dispatcher::singleton().callUnboxedOnly( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self)), self, output_size); #endif } static inline std::tuple adaptive_max_pool1d(const Tensor & self, IntArrayRef output_size) { #ifdef USE_STATIC_DISPATCH return TypeDefault::adaptive_max_pool1d(self, output_size); #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::adaptive_max_pool1d", ""}).value(); return c10::Dispatcher::singleton().callUnboxedOnly, const Tensor &, IntArrayRef>( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self)), self, output_size); #endif } static inline Tensor add(const Tensor & self, const Tensor & other, Scalar alpha) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self, other)))) { case Backend::CPU: return CPUType::add(self, other, alpha); break; case Backend::SparseCPU: return SparseCPUType::add(self, other, alpha); break; default: AT_ERROR("add not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(self, other))); } #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::add", "Tensor"}).value(); return c10::Dispatcher::singleton().callUnboxed( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self, other)), self, other, alpha); #endif } static inline Tensor & add_out(Tensor & out, const Tensor & self, const Tensor & other, Scalar alpha) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(out, self, other)))) { case Backend::CPU: return CPUType::add_out(out, self, other, alpha); break; case Backend::SparseCPU: return SparseCPUType::add_out(out, self, other, alpha); break; default: AT_ERROR("add_out not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(out, self, other))); } #else static auto table = globalATenDispatch().getOpTable("aten::add.out(Tensor self, Tensor other, *, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!)"); return table->callUnboxed(out, self, other, alpha); #endif } static inline Tensor add(const Tensor & self, Scalar other, Scalar alpha) { #ifdef USE_STATIC_DISPATCH return TypeDefault::add(self, other, alpha); #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::add", "Scalar"}).value(); return c10::Dispatcher::singleton().callUnboxed( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self)), self, other, alpha); #endif } static inline Tensor addmv(const Tensor & self, const Tensor & mat, const Tensor & vec, Scalar beta, Scalar alpha) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self, mat, vec)))) { case Backend::CPU: return CPUType::addmv(self, mat, vec, beta, alpha); break; default: AT_ERROR("addmv not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(self, mat, vec))); } #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::addmv", ""}).value(); return c10::Dispatcher::singleton().callUnboxed( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self, mat, vec)), self, mat, vec, beta, alpha); #endif } static inline Tensor & addmv_(Tensor & self, const Tensor & mat, const Tensor & vec, Scalar beta, Scalar alpha) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self, mat, vec)))) { case Backend::CPU: return CPUType::addmv_(self, mat, vec, beta, alpha); break; default: AT_ERROR("addmv_ not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(self, mat, vec))); } #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::addmv_", ""}).value(); return c10::Dispatcher::singleton().callUnboxedOnly( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self, mat, vec)), self, mat, vec, beta, alpha); #endif } static inline Tensor & addmv_out(Tensor & out, const Tensor & self, const Tensor & mat, const Tensor & vec, Scalar beta, Scalar alpha) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(out, self, mat, vec)))) { case Backend::CPU: return CPUType::addmv_out(out, self, mat, vec, beta, alpha); break; default: AT_ERROR("addmv_out not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(out, self, mat, vec))); } #else static auto table = globalATenDispatch().getOpTable("aten::addmv.out(Tensor self, Tensor mat, Tensor vec, *, Scalar beta=1, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!)"); return table->callUnboxed(out, self, mat, vec, beta, alpha); #endif } static inline Tensor addr(const Tensor & self, const Tensor & vec1, const Tensor & vec2, Scalar beta, Scalar alpha) { #ifdef USE_STATIC_DISPATCH return TypeDefault::addr(self, vec1, vec2, beta, alpha); #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::addr", ""}).value(); return c10::Dispatcher::singleton().callUnboxed( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self, vec1, vec2)), self, vec1, vec2, beta, alpha); #endif } static inline Tensor & addr_out(Tensor & out, const Tensor & self, const Tensor & vec1, const Tensor & vec2, Scalar beta, Scalar alpha) { #ifdef USE_STATIC_DISPATCH return TypeDefault::addr_out(out, self, vec1, vec2, beta, alpha); #else static auto table = globalATenDispatch().getOpTable("aten::addr.out(Tensor self, Tensor vec1, Tensor vec2, *, Scalar beta=1, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!)"); return table->callUnboxed(out, self, vec1, vec2, beta, alpha); #endif } static inline Tensor affine_grid_generator(const Tensor & theta, IntArrayRef size, bool align_corners) { #ifdef USE_STATIC_DISPATCH return TypeDefault::affine_grid_generator(theta, size, align_corners); #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::affine_grid_generator", ""}).value(); return c10::Dispatcher::singleton().callUnboxedOnly( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(theta)), theta, size, align_corners); #endif } static inline Tensor affine_grid_generator_backward(const Tensor & grad, IntArrayRef size, bool align_corners) { #ifdef USE_STATIC_DISPATCH return TypeDefault::affine_grid_generator_backward(grad, size, align_corners); #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::affine_grid_generator_backward", ""}).value(); return c10::Dispatcher::singleton().callUnboxedOnly( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(grad)), grad, size, align_corners); #endif } static inline Tensor all(const Tensor & self, int64_t dim, bool keepdim) { #ifdef USE_STATIC_DISPATCH return TypeDefault::all(self, dim, keepdim); #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::all", "dim"}).value(); return c10::Dispatcher::singleton().callUnboxed( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self)), self, dim, keepdim); #endif } static inline Tensor & all_out(Tensor & out, const Tensor & self, int64_t dim, bool keepdim) { #ifdef USE_STATIC_DISPATCH return TypeDefault::all_out(out, self, dim, keepdim); #else static auto table = globalATenDispatch().getOpTable("aten::all.out(Tensor self, int dim, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)"); return table->callUnboxed(out, self, dim, keepdim); #endif } #ifdef BUILD_NAMEDTENSOR static inline Tensor all(const Tensor & self, Dimname dim, bool keepdim) { #ifdef USE_STATIC_DISPATCH return TypeDefault::all(self, dim, keepdim); #else static auto table = globalATenDispatch().getOpTable("aten::all.dimname(Tensor self, Dimname dim, bool keepdim=False) -> Tensor"); return table->callUnboxed(self, dim, keepdim); #endif } #endif #ifdef BUILD_NAMEDTENSOR static inline Tensor & all_out(Tensor & out, const Tensor & self, Dimname dim, bool keepdim) { #ifdef USE_STATIC_DISPATCH return TypeDefault::all_out(out, self, dim, keepdim); #else static auto table = globalATenDispatch().getOpTable("aten::all.dimname_out(Tensor self, Dimname dim, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)"); return table->callUnboxed(out, self, dim, keepdim); #endif } #endif static inline bool allclose(const Tensor & self, const Tensor & other, double rtol, double atol, bool equal_nan) { #ifdef USE_STATIC_DISPATCH return TypeDefault::allclose(self, other, rtol, atol, equal_nan); #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::allclose", ""}).value(); return c10::Dispatcher::singleton().callUnboxed( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self, other)), self, other, rtol, atol, equal_nan); #endif } static inline Tensor any(const Tensor & self, int64_t dim, bool keepdim) { #ifdef USE_STATIC_DISPATCH return TypeDefault::any(self, dim, keepdim); #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::any", "dim"}).value(); return c10::Dispatcher::singleton().callUnboxed( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self)), self, dim, keepdim); #endif } static inline Tensor & any_out(Tensor & out, const Tensor & self, int64_t dim, bool keepdim) { #ifdef USE_STATIC_DISPATCH return TypeDefault::any_out(out, self, dim, keepdim); #else static auto table = globalATenDispatch().getOpTable("aten::any.out(Tensor self, int dim, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)"); return table->callUnboxed(out, self, dim, keepdim); #endif } #ifdef BUILD_NAMEDTENSOR static inline Tensor any(const Tensor & self, Dimname dim, bool keepdim) { #ifdef USE_STATIC_DISPATCH return TypeDefault::any(self, dim, keepdim); #else static auto table = globalATenDispatch().getOpTable("aten::any.dimname(Tensor self, Dimname dim, bool keepdim=False) -> Tensor"); return table->callUnboxed(self, dim, keepdim); #endif } #endif #ifdef BUILD_NAMEDTENSOR static inline Tensor & any_out(Tensor & out, const Tensor & self, Dimname dim, bool keepdim) { #ifdef USE_STATIC_DISPATCH return TypeDefault::any_out(out, self, dim, keepdim); #else static auto table = globalATenDispatch().getOpTable("aten::any.dimname_out(Tensor self, Dimname dim, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)"); return table->callUnboxed(out, self, dim, keepdim); #endif } #endif static inline Tensor arange(Scalar end, const TensorOptions & options) { #ifdef USE_STATIC_DISPATCH return TypeDefault::arange(end, options); #else globalLegacyTypeDispatch().initForTensorTypeSet(at::detail::multi_dispatch_tensor_type_set(options)); static auto table = globalATenDispatch().getOpTable("aten::arange(Scalar end, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor"); return table->callUnboxed(end, options); #endif } static inline Tensor arange(Scalar start, Scalar end, const TensorOptions & options) { #ifdef USE_STATIC_DISPATCH return TypeDefault::arange(start, end, options); #else globalLegacyTypeDispatch().initForTensorTypeSet(at::detail::multi_dispatch_tensor_type_set(options)); static auto table = globalATenDispatch().getOpTable("aten::arange.start(Scalar start, Scalar end, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor"); return table->callUnboxed(start, end, options); #endif } static inline Tensor arange(Scalar start, Scalar end, Scalar step, const TensorOptions & options) { #ifdef USE_STATIC_DISPATCH return TypeDefault::arange(start, end, step, options); #else globalLegacyTypeDispatch().initForTensorTypeSet(at::detail::multi_dispatch_tensor_type_set(options)); static auto table = globalATenDispatch().getOpTable("aten::arange.start_step(Scalar start, Scalar end, Scalar step, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor"); return table->callUnboxed(start, end, step, options); #endif } static inline Tensor & arange_out(Tensor & out, Scalar end) { #ifdef USE_STATIC_DISPATCH return TypeDefault::arange_out(out, end); #else static auto table = globalATenDispatch().getOpTable("aten::arange.out(Scalar end, *, Tensor(a!) out) -> Tensor(a!)"); return table->callUnboxed(out, end); #endif } static inline Tensor & arange_out(Tensor & out, Scalar start, Scalar end, Scalar step) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(out)))) { case Backend::CPU: return CPUType::arange_out(out, start, end, step); break; default: AT_ERROR("arange_out not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(out))); } #else static auto table = globalATenDispatch().getOpTable("aten::arange.start_out(Scalar start, Scalar end, Scalar step=1, *, Tensor(a!) out) -> Tensor(a!)"); return table->callUnboxed(out, start, end, step); #endif } static inline Tensor _dim_arange(const Tensor & like, int64_t dim) { #ifdef USE_STATIC_DISPATCH return TypeDefault::_dim_arange(like, dim); #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::_dim_arange", ""}).value(); return c10::Dispatcher::singleton().callUnboxed( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(like)), like, dim); #endif } static inline Tensor argmax(const Tensor & self, c10::optional dim, bool keepdim) { #ifdef USE_STATIC_DISPATCH return TypeDefault::argmax(self, dim, keepdim); #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::argmax", ""}).value(); return c10::Dispatcher::singleton().callUnboxed, bool>( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self)), self, dim, keepdim); #endif } static inline Tensor argmin(const Tensor & self, c10::optional dim, bool keepdim) { #ifdef USE_STATIC_DISPATCH return TypeDefault::argmin(self, dim, keepdim); #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::argmin", ""}).value(); return c10::Dispatcher::singleton().callUnboxed, bool>( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self)), self, dim, keepdim); #endif } static inline Tensor as_strided(const Tensor & self, IntArrayRef size, IntArrayRef stride, c10::optional storage_offset) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self)))) { case Backend::CPU: return CPUType::as_strided(self, size, stride, storage_offset); break; case Backend::QuantizedCPU: return QuantizedCPUType::as_strided(self, size, stride, storage_offset); break; default: AT_ERROR("as_strided not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(self))); } #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::as_strided", ""}).value(); return c10::Dispatcher::singleton().callUnboxedOnly>( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self)), self, size, stride, storage_offset); #endif } static inline Tensor & as_strided_(Tensor & self, IntArrayRef size, IntArrayRef stride, c10::optional storage_offset) { #ifdef USE_STATIC_DISPATCH return TypeDefault::as_strided_(self, size, stride, storage_offset); #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::as_strided_", ""}).value(); return c10::Dispatcher::singleton().callUnboxedOnly>( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self)), self, size, stride, storage_offset); #endif } static inline Tensor asin(const Tensor & self) { #ifdef USE_STATIC_DISPATCH return TypeDefault::asin(self); #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::asin", ""}).value(); return c10::Dispatcher::singleton().callUnboxed( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self)), self); #endif } static inline Tensor & asin_(Tensor & self) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self)))) { case Backend::CPU: return CPUType::asin_(self); break; default: AT_ERROR("asin_ not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(self))); } #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::asin_", ""}).value(); return c10::Dispatcher::singleton().callUnboxedOnly( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self)), self); #endif } static inline Tensor & asin_out(Tensor & out, const Tensor & self) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(out, self)))) { case Backend::CPU: return CPUType::asin_out(out, self); break; default: AT_ERROR("asin_out not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(out, self))); } #else static auto table = globalATenDispatch().getOpTable("aten::asin.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)"); return table->callUnboxed(out, self); #endif } static inline Tensor atan(const Tensor & self) { #ifdef USE_STATIC_DISPATCH return TypeDefault::atan(self); #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::atan", ""}).value(); return c10::Dispatcher::singleton().callUnboxed( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self)), self); #endif } static inline Tensor & atan_(Tensor & self) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self)))) { case Backend::CPU: return CPUType::atan_(self); break; default: AT_ERROR("atan_ not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(self))); } #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::atan_", ""}).value(); return c10::Dispatcher::singleton().callUnboxedOnly( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self)), self); #endif } static inline Tensor & atan_out(Tensor & out, const Tensor & self) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(out, self)))) { case Backend::CPU: return CPUType::atan_out(out, self); break; default: AT_ERROR("atan_out not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(out, self))); } #else static auto table = globalATenDispatch().getOpTable("aten::atan.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)"); return table->callUnboxed(out, self); #endif } static inline Tensor baddbmm(const Tensor & self, const Tensor & batch1, const Tensor & batch2, Scalar beta, Scalar alpha) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self, batch1, batch2)))) { case Backend::CPU: return CPUType::baddbmm(self, batch1, batch2, beta, alpha); break; default: AT_ERROR("baddbmm not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(self, batch1, batch2))); } #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::baddbmm", ""}).value(); return c10::Dispatcher::singleton().callUnboxed( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self, batch1, batch2)), self, batch1, batch2, beta, alpha); #endif } static inline Tensor & _baddbmm_mkl_(Tensor & self, const Tensor & batch1, const Tensor & batch2, Scalar beta, Scalar alpha) { #ifdef USE_STATIC_DISPATCH return TypeDefault::_baddbmm_mkl_(self, batch1, batch2, beta, alpha); #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::_baddbmm_mkl_", ""}).value(); return c10::Dispatcher::singleton().callUnboxedOnly( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self, batch1, batch2)), self, batch1, batch2, beta, alpha); #endif } static inline Tensor & baddbmm_out(Tensor & out, const Tensor & self, const Tensor & batch1, const Tensor & batch2, Scalar beta, Scalar alpha) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(out, self, batch1, batch2)))) { case Backend::CPU: return CPUType::baddbmm_out(out, self, batch1, batch2, beta, alpha); break; default: AT_ERROR("baddbmm_out not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(out, self, batch1, batch2))); } #else static auto table = globalATenDispatch().getOpTable("aten::baddbmm.out(Tensor self, Tensor batch1, Tensor batch2, *, Scalar beta=1, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!)"); return table->callUnboxed(out, self, batch1, batch2, beta, alpha); #endif } static inline Tensor bartlett_window(int64_t window_length, const TensorOptions & options) { #ifdef USE_STATIC_DISPATCH return TypeDefault::bartlett_window(window_length, options); #else globalLegacyTypeDispatch().initForTensorTypeSet(at::detail::multi_dispatch_tensor_type_set(options)); static auto table = globalATenDispatch().getOpTable("aten::bartlett_window(int window_length, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor"); return table->callUnboxed(window_length, options); #endif } static inline Tensor bartlett_window(int64_t window_length, bool periodic, const TensorOptions & options) { #ifdef USE_STATIC_DISPATCH return TypeDefault::bartlett_window(window_length, periodic, options); #else globalLegacyTypeDispatch().initForTensorTypeSet(at::detail::multi_dispatch_tensor_type_set(options)); static auto table = globalATenDispatch().getOpTable("aten::bartlett_window.periodic(int window_length, bool periodic, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor"); return table->callUnboxed(window_length, periodic, options); #endif } static inline Tensor batch_norm(const Tensor & input, const Tensor & weight, const Tensor & bias, const Tensor & running_mean, const Tensor & running_var, bool training, double momentum, double eps, bool cudnn_enabled) { #ifdef USE_STATIC_DISPATCH return TypeDefault::batch_norm(input, weight, bias, running_mean, running_var, training, momentum, eps, cudnn_enabled); #else static auto table = globalATenDispatch().getOpTable("aten::batch_norm(Tensor input, Tensor? weight, Tensor? bias, Tensor? running_mean, Tensor? running_var, bool training, float momentum, float eps, bool cudnn_enabled) -> Tensor"); return table->callUnboxed(input, weight, bias, running_mean, running_var, training, momentum, eps, cudnn_enabled); #endif } static inline std::tuple _batch_norm_impl_index(const Tensor & input, const Tensor & weight, const Tensor & bias, const Tensor & running_mean, const Tensor & running_var, bool training, double momentum, double eps, bool cudnn_enabled) { #ifdef USE_STATIC_DISPATCH return TypeDefault::_batch_norm_impl_index(input, weight, bias, running_mean, running_var, training, momentum, eps, cudnn_enabled); #else static auto table = globalATenDispatch().getOpTable("aten::_batch_norm_impl_index(Tensor input, Tensor? weight, Tensor? bias, Tensor? running_mean, Tensor? running_var, bool training, float momentum, float eps, bool cudnn_enabled) -> (Tensor, Tensor, Tensor, int)"); return table->callUnboxed, const Tensor &, const Tensor &, const Tensor &, const Tensor &, const Tensor &, bool, double, double, bool>(input, weight, bias, running_mean, running_var, training, momentum, eps, cudnn_enabled); #endif } static inline std::tuple _batch_norm_impl_index_backward(int64_t impl_index, const Tensor & input, const Tensor & grad_output, const Tensor & weight, const Tensor & running_mean, const Tensor & running_var, const Tensor & save_mean, const Tensor & save_var_transform, bool train, double eps, std::array output_mask) { #ifdef USE_STATIC_DISPATCH return TypeDefault::_batch_norm_impl_index_backward(impl_index, input, grad_output, weight, running_mean, running_var, save_mean, save_var_transform, train, eps, output_mask); #else static auto table = globalATenDispatch().getOpTable("aten::_batch_norm_impl_index_backward(int impl_index, Tensor input, Tensor grad_output, Tensor? weight, Tensor? running_mean, Tensor? running_var, Tensor? save_mean, Tensor? save_var_transform, bool train, float eps, bool[3] output_mask) -> (Tensor, Tensor, Tensor)"); return table->callUnboxed, int64_t, const Tensor &, const Tensor &, const Tensor &, const Tensor &, const Tensor &, const Tensor &, const Tensor &, bool, double, std::array>(impl_index, input, grad_output, weight, running_mean, running_var, save_mean, save_var_transform, train, eps, output_mask); #endif } static inline Tensor bernoulli(const Tensor & self, Generator * generator) { #ifdef USE_STATIC_DISPATCH return TypeDefault::bernoulli(self, generator); #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::bernoulli", ""}).value(); return c10::Dispatcher::singleton().callUnboxedOnly( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self)), self, generator); #endif } static inline Tensor & bernoulli_out(Tensor & out, const Tensor & self, Generator * generator) { #ifdef USE_STATIC_DISPATCH return TypeDefault::bernoulli_out(out, self, generator); #else static auto table = globalATenDispatch().getOpTable("aten::bernoulli.out(Tensor self, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!)"); return table->callUnboxed(out, self, generator); #endif } static inline Tensor bernoulli(const Tensor & self, double p, Generator * generator) { #ifdef USE_STATIC_DISPATCH return TypeDefault::bernoulli(self, p, generator); #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::bernoulli", "p"}).value(); return c10::Dispatcher::singleton().callUnboxedOnly( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self)), self, p, generator); #endif } static inline Tensor bilinear(const Tensor & input1, const Tensor & input2, const Tensor & weight, const Tensor & bias) { #ifdef USE_STATIC_DISPATCH return TypeDefault::bilinear(input1, input2, weight, bias); #else static auto table = globalATenDispatch().getOpTable("aten::bilinear(Tensor input1, Tensor input2, Tensor weight, Tensor? bias) -> Tensor"); return table->callUnboxed(input1, input2, weight, bias); #endif } static inline Tensor binary_cross_entropy_with_logits(const Tensor & self, const Tensor & target, const Tensor & weight, const Tensor & pos_weight, int64_t reduction) { #ifdef USE_STATIC_DISPATCH return TypeDefault::binary_cross_entropy_with_logits(self, target, weight, pos_weight, reduction); #else static auto table = globalATenDispatch().getOpTable("aten::binary_cross_entropy_with_logits(Tensor self, Tensor target, Tensor? weight=None, Tensor? pos_weight=None, int reduction=Mean) -> Tensor"); return table->callUnboxed(self, target, weight, pos_weight, reduction); #endif } static inline Tensor binary_cross_entropy_with_logits_backward(const Tensor & grad_output, const Tensor & self, const Tensor & target, const Tensor & weight, const Tensor & pos_weight, int64_t reduction) { #ifdef USE_STATIC_DISPATCH return TypeDefault::binary_cross_entropy_with_logits_backward(grad_output, self, target, weight, pos_weight, reduction); #else static auto table = globalATenDispatch().getOpTable("aten::binary_cross_entropy_with_logits_backward(Tensor grad_output, Tensor self, Tensor target, Tensor? weight=None, Tensor? pos_weight=None, int reduction=Mean) -> Tensor"); return table->callUnboxed(grad_output, self, target, weight, pos_weight, reduction); #endif } static inline Tensor bincount(const Tensor & self, const Tensor & weights, int64_t minlength) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self, weights)))) { case Backend::CPU: return CPUType::bincount(self, weights, minlength); break; default: AT_ERROR("bincount not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(self, weights))); } #else static auto table = globalATenDispatch().getOpTable("aten::bincount(Tensor self, Tensor? weights=None, int minlength=0) -> Tensor"); return table->callUnboxed(self, weights, minlength); #endif } static inline Tensor bitwise_not(const Tensor & self) { #ifdef USE_STATIC_DISPATCH return TypeDefault::bitwise_not(self); #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::bitwise_not", ""}).value(); return c10::Dispatcher::singleton().callUnboxed( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self)), self); #endif } static inline Tensor & bitwise_not_out(Tensor & out, const Tensor & self) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(out, self)))) { case Backend::CPU: return CPUType::bitwise_not_out(out, self); break; default: AT_ERROR("bitwise_not_out not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(out, self))); } #else static auto table = globalATenDispatch().getOpTable("aten::bitwise_not.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)"); return table->callUnboxed(out, self); #endif } static inline Tensor logical_not(const Tensor & self) { #ifdef USE_STATIC_DISPATCH return TypeDefault::logical_not(self); #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::logical_not", ""}).value(); return c10::Dispatcher::singleton().callUnboxedOnly( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self)), self); #endif } static inline Tensor & logical_not_out(Tensor & out, const Tensor & self) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(out, self)))) { case Backend::CPU: return CPUType::logical_not_out(out, self); break; default: AT_ERROR("logical_not_out not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(out, self))); } #else static auto table = globalATenDispatch().getOpTable("aten::logical_not.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)"); return table->callUnboxed(out, self); #endif } static inline Tensor logical_xor(const Tensor & self, const Tensor & other) { #ifdef USE_STATIC_DISPATCH return TypeDefault::logical_xor(self, other); #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::logical_xor", ""}).value(); return c10::Dispatcher::singleton().callUnboxedOnly( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self, other)), self, other); #endif } static inline Tensor & logical_xor_out(Tensor & out, const Tensor & self, const Tensor & other) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(out, self, other)))) { case Backend::CPU: return CPUType::logical_xor_out(out, self, other); break; default: AT_ERROR("logical_xor_out not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(out, self, other))); } #else static auto table = globalATenDispatch().getOpTable("aten::logical_xor.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)"); return table->callUnboxed(out, self, other); #endif } static inline Tensor blackman_window(int64_t window_length, const TensorOptions & options) { #ifdef USE_STATIC_DISPATCH return TypeDefault::blackman_window(window_length, options); #else globalLegacyTypeDispatch().initForTensorTypeSet(at::detail::multi_dispatch_tensor_type_set(options)); static auto table = globalATenDispatch().getOpTable("aten::blackman_window(int window_length, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor"); return table->callUnboxed(window_length, options); #endif } static inline Tensor blackman_window(int64_t window_length, bool periodic, const TensorOptions & options) { #ifdef USE_STATIC_DISPATCH return TypeDefault::blackman_window(window_length, periodic, options); #else globalLegacyTypeDispatch().initForTensorTypeSet(at::detail::multi_dispatch_tensor_type_set(options)); static auto table = globalATenDispatch().getOpTable("aten::blackman_window.periodic(int window_length, bool periodic, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor"); return table->callUnboxed(window_length, periodic, options); #endif } static inline Tensor bmm(const Tensor & self, const Tensor & mat2) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self, mat2)))) { case Backend::CPU: return CPUType::bmm(self, mat2); break; default: AT_ERROR("bmm not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(self, mat2))); } #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::bmm", ""}).value(); return c10::Dispatcher::singleton().callUnboxed( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self, mat2)), self, mat2); #endif } static inline Tensor & bmm_out(Tensor & out, const Tensor & self, const Tensor & mat2) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(out, self, mat2)))) { case Backend::CPU: return CPUType::bmm_out(out, self, mat2); break; default: AT_ERROR("bmm_out not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(out, self, mat2))); } #else static auto table = globalATenDispatch().getOpTable("aten::bmm.out(Tensor self, Tensor mat2, *, Tensor(a!) out) -> Tensor(a!)"); return table->callUnboxed(out, self, mat2); #endif } static inline std::vector broadcast_tensors(TensorList tensors) { #ifdef USE_STATIC_DISPATCH return TypeDefault::broadcast_tensors(tensors); #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::broadcast_tensors", ""}).value(); return c10::Dispatcher::singleton().callUnboxedOnly, TensorList>( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(tensors)), tensors); #endif } static inline Tensor cat(TensorList tensors, int64_t dim) { #ifdef USE_STATIC_DISPATCH return TypeDefault::cat(tensors, dim); #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::cat", ""}).value(); return c10::Dispatcher::singleton().callUnboxedOnly( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(tensors)), tensors, dim); #endif } static inline Tensor & cat_out(Tensor & out, TensorList tensors, int64_t dim) { #ifdef USE_STATIC_DISPATCH return TypeDefault::cat_out(out, tensors, dim); #else static auto table = globalATenDispatch().getOpTable("aten::cat.out(Tensor[] tensors, int dim=0, *, Tensor(a!) out) -> Tensor(a!)"); return table->callUnboxed(out, tensors, dim); #endif } #ifdef BUILD_NAMEDTENSOR static inline Tensor cat(TensorList tensors, Dimname dim) { #ifdef USE_STATIC_DISPATCH return TypeDefault::cat(tensors, dim); #else static auto table = globalATenDispatch().getOpTable("aten::cat.names(Tensor[] tensors, Dimname dim) -> Tensor"); return table->callUnboxed(tensors, dim); #endif } #endif #ifdef BUILD_NAMEDTENSOR static inline Tensor & cat_out(Tensor & out, TensorList tensors, Dimname dim) { #ifdef USE_STATIC_DISPATCH return TypeDefault::cat_out(out, tensors, dim); #else static auto table = globalATenDispatch().getOpTable("aten::cat.names_out(Tensor[] tensors, Dimname dim, *, Tensor(a!) out) -> Tensor(a!)"); return table->callUnboxed(out, tensors, dim); #endif } #endif static inline Tensor ceil(const Tensor & self) { #ifdef USE_STATIC_DISPATCH return TypeDefault::ceil(self); #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::ceil", ""}).value(); return c10::Dispatcher::singleton().callUnboxed( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self)), self); #endif } static inline Tensor & ceil_(Tensor & self) { #ifdef USE_STATIC_DISPATCH return TypeDefault::ceil_(self); #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::ceil_", ""}).value(); return c10::Dispatcher::singleton().callUnboxedOnly( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self)), self); #endif } static inline Tensor & ceil_out(Tensor & out, const Tensor & self) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(out, self)))) { case Backend::CPU: return CPUType::ceil_out(out, self); break; default: AT_ERROR("ceil_out not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(out, self))); } #else static auto table = globalATenDispatch().getOpTable("aten::ceil.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)"); return table->callUnboxed(out, self); #endif } static inline Tensor chain_matmul(TensorList matrices) { #ifdef USE_STATIC_DISPATCH return TypeDefault::chain_matmul(matrices); #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::chain_matmul", ""}).value(); return c10::Dispatcher::singleton().callUnboxedOnly( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(matrices)), matrices); #endif } static inline std::vector chunk(const Tensor & self, int64_t chunks, int64_t dim) { #ifdef USE_STATIC_DISPATCH return TypeDefault::chunk(self, chunks, dim); #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::chunk", ""}).value(); return c10::Dispatcher::singleton().callUnboxedOnly, const Tensor &, int64_t, int64_t>( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self)), self, chunks, dim); #endif } static inline Tensor clamp(const Tensor & self, c10::optional min, c10::optional max) { #ifdef USE_STATIC_DISPATCH return TypeDefault::clamp(self, min, max); #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::clamp", ""}).value(); return c10::Dispatcher::singleton().callUnboxed, c10::optional>( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self)), self, min, max); #endif } static inline Tensor & clamp_(Tensor & self, c10::optional min, c10::optional max) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self)))) { case Backend::CPU: return CPUType::clamp_(self, min, max); break; default: AT_ERROR("clamp_ not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(self))); } #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::clamp_", ""}).value(); return c10::Dispatcher::singleton().callUnboxedOnly, c10::optional>( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self)), self, min, max); #endif } static inline Tensor & clamp_out(Tensor & out, const Tensor & self, c10::optional min, c10::optional max) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(out, self)))) { case Backend::CPU: return CPUType::clamp_out(out, self, min, max); break; default: AT_ERROR("clamp_out not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(out, self))); } #else static auto table = globalATenDispatch().getOpTable("aten::clamp.out(Tensor self, Scalar? min=None, Scalar? max=None, *, Tensor(a!) out) -> Tensor(a!)"); return table->callUnboxed, c10::optional>(out, self, min, max); #endif } static inline Tensor clamp_max(const Tensor & self, Scalar max) { #ifdef USE_STATIC_DISPATCH return TypeDefault::clamp_max(self, max); #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::clamp_max", ""}).value(); return c10::Dispatcher::singleton().callUnboxed( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self)), self, max); #endif } static inline Tensor & clamp_max_(Tensor & self, Scalar max) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self)))) { case Backend::CPU: return CPUType::clamp_max_(self, max); break; default: AT_ERROR("clamp_max_ not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(self))); } #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::clamp_max_", ""}).value(); return c10::Dispatcher::singleton().callUnboxedOnly( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self)), self, max); #endif } static inline Tensor & clamp_max_out(Tensor & out, const Tensor & self, Scalar max) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(out, self)))) { case Backend::CPU: return CPUType::clamp_max_out(out, self, max); break; default: AT_ERROR("clamp_max_out not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(out, self))); } #else static auto table = globalATenDispatch().getOpTable("aten::clamp_max.out(Tensor self, Scalar max, *, Tensor(a!) out) -> Tensor(a!)"); return table->callUnboxed(out, self, max); #endif } static inline Tensor clamp_min(const Tensor & self, Scalar min) { #ifdef USE_STATIC_DISPATCH return TypeDefault::clamp_min(self, min); #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::clamp_min", ""}).value(); return c10::Dispatcher::singleton().callUnboxed( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self)), self, min); #endif } static inline Tensor & clamp_min_(Tensor & self, Scalar min) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self)))) { case Backend::CPU: return CPUType::clamp_min_(self, min); break; default: AT_ERROR("clamp_min_ not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(self))); } #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::clamp_min_", ""}).value(); return c10::Dispatcher::singleton().callUnboxedOnly( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self)), self, min); #endif } static inline Tensor & clamp_min_out(Tensor & out, const Tensor & self, Scalar min) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(out, self)))) { case Backend::CPU: return CPUType::clamp_min_out(out, self, min); break; default: AT_ERROR("clamp_min_out not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(out, self))); } #else static auto table = globalATenDispatch().getOpTable("aten::clamp_min.out(Tensor self, Scalar min, *, Tensor(a!) out) -> Tensor(a!)"); return table->callUnboxed(out, self, min); #endif } static inline bool cudnn_is_acceptable(const Tensor & self) { #ifdef USE_STATIC_DISPATCH return TypeDefault::cudnn_is_acceptable(self); #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::cudnn_is_acceptable", ""}).value(); return c10::Dispatcher::singleton().callUnboxed( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self)), self); #endif } static inline Tensor constant_pad_nd(const Tensor & self, IntArrayRef pad, Scalar value) { #ifdef USE_STATIC_DISPATCH return TypeDefault::constant_pad_nd(self, pad, value); #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::constant_pad_nd", ""}).value(); return c10::Dispatcher::singleton().callUnboxedOnly( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self)), self, pad, value); #endif } static inline Tensor convolution(const Tensor & input, const Tensor & weight, const Tensor & bias, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, bool transposed, IntArrayRef output_padding, int64_t groups) { #ifdef USE_STATIC_DISPATCH return TypeDefault::convolution(input, weight, bias, stride, padding, dilation, transposed, output_padding, groups); #else static auto table = globalATenDispatch().getOpTable("aten::convolution(Tensor input, Tensor weight, Tensor? bias, int[] stride, int[] padding, int[] dilation, bool transposed, int[] output_padding, int groups) -> Tensor"); return table->callUnboxed(input, weight, bias, stride, padding, dilation, transposed, output_padding, groups); #endif } static inline Tensor convolution_overrideable(const Tensor & input, const Tensor & weight, const Tensor & bias, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, bool transposed, IntArrayRef output_padding, int64_t groups) { #ifdef USE_STATIC_DISPATCH return TypeDefault::convolution_overrideable(input, weight, bias, stride, padding, dilation, transposed, output_padding, groups); #else static auto table = globalATenDispatch().getOpTable("aten::convolution_overrideable(Tensor input, Tensor weight, Tensor? bias, int[] stride, int[] padding, int[] dilation, bool transposed, int[] output_padding, int groups) -> Tensor"); return table->callUnboxed(input, weight, bias, stride, padding, dilation, transposed, output_padding, groups); #endif } static inline std::tuple convolution_backward_overrideable(const Tensor & grad_output, const Tensor & input, const Tensor & weight, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, bool transposed, IntArrayRef output_padding, int64_t groups, std::array output_mask) { #ifdef USE_STATIC_DISPATCH return TypeDefault::convolution_backward_overrideable(grad_output, input, weight, stride, padding, dilation, transposed, output_padding, groups, output_mask); #else static auto table = globalATenDispatch().getOpTable("aten::convolution_backward_overrideable(Tensor grad_output, Tensor input, Tensor weight, int[] stride, int[] padding, int[] dilation, bool transposed, int[] output_padding, int groups, bool[3] output_mask) -> (Tensor grad_input, Tensor grad_weight, Tensor grad_bias)"); return table->callUnboxed, const Tensor &, const Tensor &, const Tensor &, IntArrayRef, IntArrayRef, IntArrayRef, bool, IntArrayRef, int64_t, std::array>(grad_output, input, weight, stride, padding, dilation, transposed, output_padding, groups, output_mask); #endif } static inline Tensor _convolution(const Tensor & input, const Tensor & weight, const Tensor & bias, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, bool transposed, IntArrayRef output_padding, int64_t groups, bool benchmark, bool deterministic, bool cudnn_enabled) { #ifdef USE_STATIC_DISPATCH return TypeDefault::_convolution(input, weight, bias, stride, padding, dilation, transposed, output_padding, groups, benchmark, deterministic, cudnn_enabled); #else static auto table = globalATenDispatch().getOpTable("aten::_convolution(Tensor input, Tensor weight, Tensor? bias, int[] stride, int[] padding, int[] dilation, bool transposed, int[] output_padding, int groups, bool benchmark, bool deterministic, bool cudnn_enabled) -> Tensor"); return table->callUnboxed(input, weight, bias, stride, padding, dilation, transposed, output_padding, groups, benchmark, deterministic, cudnn_enabled); #endif } static inline Tensor _convolution_nogroup(const Tensor & input, const Tensor & weight, const Tensor & bias, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, bool transposed, IntArrayRef output_padding) { #ifdef USE_STATIC_DISPATCH return TypeDefault::_convolution_nogroup(input, weight, bias, stride, padding, dilation, transposed, output_padding); #else static auto table = globalATenDispatch().getOpTable("aten::_convolution_nogroup(Tensor input, Tensor weight, Tensor? bias, int[] stride, int[] padding, int[] dilation, bool transposed, int[] output_padding) -> Tensor"); return table->callUnboxed(input, weight, bias, stride, padding, dilation, transposed, output_padding); #endif } static inline std::tuple _convolution_double_backward(const Tensor & ggI, const Tensor & ggW, const Tensor & ggb, const Tensor & gO, const Tensor & weight, const Tensor & self, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, bool transposed, IntArrayRef output_padding, int64_t groups, bool benchmark, bool deterministic, bool cudnn_enabled, std::array output_mask) { #ifdef USE_STATIC_DISPATCH return TypeDefault::_convolution_double_backward(ggI, ggW, ggb, gO, weight, self, stride, padding, dilation, transposed, output_padding, groups, benchmark, deterministic, cudnn_enabled, output_mask); #else static auto table = globalATenDispatch().getOpTable("aten::_convolution_double_backward(Tensor? ggI, Tensor? ggW, Tensor? ggb, Tensor gO, Tensor weight, Tensor self, int[] stride, int[] padding, int[] dilation, bool transposed, int[] output_padding, int groups, bool benchmark, bool deterministic, bool cudnn_enabled, bool[3] output_mask) -> (Tensor, Tensor, Tensor)"); return table->callUnboxed, const Tensor &, const Tensor &, const Tensor &, const Tensor &, const Tensor &, const Tensor &, IntArrayRef, IntArrayRef, IntArrayRef, bool, IntArrayRef, int64_t, bool, bool, bool, std::array>(ggI, ggW, ggb, gO, weight, self, stride, padding, dilation, transposed, output_padding, groups, benchmark, deterministic, cudnn_enabled, output_mask); #endif } static inline Tensor conv1d(const Tensor & input, const Tensor & weight, const Tensor & bias, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, int64_t groups) { #ifdef USE_STATIC_DISPATCH return TypeDefault::conv1d(input, weight, bias, stride, padding, dilation, groups); #else static auto table = globalATenDispatch().getOpTable("aten::conv1d(Tensor input, Tensor weight, Tensor? bias=None, int[1] stride=1, int[1] padding=0, int[1] dilation=1, int groups=1) -> Tensor"); return table->callUnboxed(input, weight, bias, stride, padding, dilation, groups); #endif } static inline Tensor conv2d(const Tensor & input, const Tensor & weight, const Tensor & bias, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, int64_t groups) { #ifdef USE_STATIC_DISPATCH return TypeDefault::conv2d(input, weight, bias, stride, padding, dilation, groups); #else static auto table = globalATenDispatch().getOpTable("aten::conv2d(Tensor input, Tensor weight, Tensor? bias=None, int[2] stride=1, int[2] padding=0, int[2] dilation=1, int groups=1) -> Tensor"); return table->callUnboxed(input, weight, bias, stride, padding, dilation, groups); #endif } static inline Tensor conv3d(const Tensor & input, const Tensor & weight, const Tensor & bias, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, int64_t groups) { #ifdef USE_STATIC_DISPATCH return TypeDefault::conv3d(input, weight, bias, stride, padding, dilation, groups); #else static auto table = globalATenDispatch().getOpTable("aten::conv3d(Tensor input, Tensor weight, Tensor? bias=None, int[3] stride=1, int[3] padding=0, int[3] dilation=1, int groups=1) -> Tensor"); return table->callUnboxed(input, weight, bias, stride, padding, dilation, groups); #endif } static inline Tensor conv_tbc(const Tensor & self, const Tensor & weight, const Tensor & bias, int64_t pad) { #ifdef USE_STATIC_DISPATCH return TypeDefault::conv_tbc(self, weight, bias, pad); #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::conv_tbc", ""}).value(); return c10::Dispatcher::singleton().callUnboxed( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self, weight, bias)), self, weight, bias, pad); #endif } static inline std::tuple conv_tbc_backward(const Tensor & self, const Tensor & input, const Tensor & weight, const Tensor & bias, int64_t pad) { #ifdef USE_STATIC_DISPATCH return TypeDefault::conv_tbc_backward(self, input, weight, bias, pad); #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::conv_tbc_backward", ""}).value(); return c10::Dispatcher::singleton().callUnboxedOnly, const Tensor &, const Tensor &, const Tensor &, const Tensor &, int64_t>( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self, input, weight, bias)), self, input, weight, bias, pad); #endif } static inline Tensor conv_transpose1d(const Tensor & input, const Tensor & weight, const Tensor & bias, IntArrayRef stride, IntArrayRef padding, IntArrayRef output_padding, int64_t groups, IntArrayRef dilation) { #ifdef USE_STATIC_DISPATCH return TypeDefault::conv_transpose1d(input, weight, bias, stride, padding, output_padding, groups, dilation); #else static auto table = globalATenDispatch().getOpTable("aten::conv_transpose1d(Tensor input, Tensor weight, Tensor? bias=None, int[1] stride=1, int[1] padding=0, int[1] output_padding=0, int groups=1, int[1] dilation=1) -> Tensor"); return table->callUnboxed(input, weight, bias, stride, padding, output_padding, groups, dilation); #endif } static inline Tensor conv_transpose2d(const Tensor & input, const Tensor & weight, const Tensor & bias, IntArrayRef stride, IntArrayRef padding, IntArrayRef output_padding, int64_t groups, IntArrayRef dilation) { #ifdef USE_STATIC_DISPATCH return TypeDefault::conv_transpose2d(input, weight, bias, stride, padding, output_padding, groups, dilation); #else static auto table = globalATenDispatch().getOpTable("aten::conv_transpose2d.input(Tensor input, Tensor weight, Tensor? bias=None, int[2] stride=1, int[2] padding=0, int[2] output_padding=0, int groups=1, int[2] dilation=1) -> Tensor"); return table->callUnboxed(input, weight, bias, stride, padding, output_padding, groups, dilation); #endif } static inline Tensor conv_transpose3d(const Tensor & input, const Tensor & weight, const Tensor & bias, IntArrayRef stride, IntArrayRef padding, IntArrayRef output_padding, int64_t groups, IntArrayRef dilation) { #ifdef USE_STATIC_DISPATCH return TypeDefault::conv_transpose3d(input, weight, bias, stride, padding, output_padding, groups, dilation); #else static auto table = globalATenDispatch().getOpTable("aten::conv_transpose3d.input(Tensor input, Tensor weight, Tensor? bias=None, int[3] stride=1, int[3] padding=0, int[3] output_padding=0, int groups=1, int[3] dilation=1) -> Tensor"); return table->callUnboxed(input, weight, bias, stride, padding, output_padding, groups, dilation); #endif } static inline Tensor _copy_from(const Tensor & self, const Tensor & dst, bool non_blocking) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self, dst)))) { default: AT_ERROR("_copy_from not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(self, dst))); } #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::_copy_from", ""}).value(); return c10::Dispatcher::singleton().callUnboxed( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self, dst)), self, dst, non_blocking); #endif } static inline Tensor cos(const Tensor & self) { #ifdef USE_STATIC_DISPATCH return TypeDefault::cos(self); #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::cos", ""}).value(); return c10::Dispatcher::singleton().callUnboxed( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self)), self); #endif } static inline Tensor & cos_(Tensor & self) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self)))) { case Backend::CPU: return CPUType::cos_(self); break; default: AT_ERROR("cos_ not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(self))); } #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::cos_", ""}).value(); return c10::Dispatcher::singleton().callUnboxedOnly( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self)), self); #endif } static inline Tensor & cos_out(Tensor & out, const Tensor & self) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(out, self)))) { case Backend::CPU: return CPUType::cos_out(out, self); break; default: AT_ERROR("cos_out not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(out, self))); } #else static auto table = globalATenDispatch().getOpTable("aten::cos.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)"); return table->callUnboxed(out, self); #endif } static inline Tensor cosh(const Tensor & self) { #ifdef USE_STATIC_DISPATCH return TypeDefault::cosh(self); #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::cosh", ""}).value(); return c10::Dispatcher::singleton().callUnboxed( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self)), self); #endif } static inline Tensor & cosh_(Tensor & self) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self)))) { case Backend::CPU: return CPUType::cosh_(self); break; default: AT_ERROR("cosh_ not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(self))); } #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::cosh_", ""}).value(); return c10::Dispatcher::singleton().callUnboxedOnly( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self)), self); #endif } static inline Tensor & cosh_out(Tensor & out, const Tensor & self) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(out, self)))) { case Backend::CPU: return CPUType::cosh_out(out, self); break; default: AT_ERROR("cosh_out not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(out, self))); } #else static auto table = globalATenDispatch().getOpTable("aten::cosh.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)"); return table->callUnboxed(out, self); #endif } static inline Tensor cosine_embedding_loss(const Tensor & input1, const Tensor & input2, const Tensor & target, double margin, int64_t reduction) { #ifdef USE_STATIC_DISPATCH return TypeDefault::cosine_embedding_loss(input1, input2, target, margin, reduction); #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::cosine_embedding_loss", ""}).value(); return c10::Dispatcher::singleton().callUnboxed( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(input1, input2, target)), input1, input2, target, margin, reduction); #endif } static inline Tensor cudnn_affine_grid_generator(const Tensor & theta, int64_t N, int64_t C, int64_t H, int64_t W) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(theta)))) { default: AT_ERROR("cudnn_affine_grid_generator not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(theta))); } #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::cudnn_affine_grid_generator", ""}).value(); return c10::Dispatcher::singleton().callUnboxed( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(theta)), theta, N, C, H, W); #endif } static inline Tensor cudnn_affine_grid_generator_backward(const Tensor & grad, int64_t N, int64_t C, int64_t H, int64_t W) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(grad)))) { default: AT_ERROR("cudnn_affine_grid_generator_backward not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(grad))); } #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::cudnn_affine_grid_generator_backward", ""}).value(); return c10::Dispatcher::singleton().callUnboxed( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(grad)), grad, N, C, H, W); #endif } static inline std::tuple cudnn_batch_norm(const Tensor & input, const Tensor & weight, const Tensor & bias, const Tensor & running_mean, const Tensor & running_var, bool training, double exponential_average_factor, double epsilon) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(input, weight, bias, running_mean, running_var)))) { default: AT_ERROR("cudnn_batch_norm not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(input, weight, bias, running_mean, running_var))); } #else static auto table = globalATenDispatch().getOpTable("aten::cudnn_batch_norm(Tensor input, Tensor weight, Tensor? bias, Tensor? running_mean, Tensor? running_var, bool training, float exponential_average_factor, float epsilon) -> (Tensor, Tensor, Tensor)"); return table->callUnboxed, const Tensor &, const Tensor &, const Tensor &, const Tensor &, const Tensor &, bool, double, double>(input, weight, bias, running_mean, running_var, training, exponential_average_factor, epsilon); #endif } static inline std::tuple cudnn_batch_norm_backward(const Tensor & input, const Tensor & grad_output, const Tensor & weight, const Tensor & running_mean, const Tensor & running_var, const Tensor & save_mean, const Tensor & save_var, double epsilon) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(input, grad_output, weight, running_mean, running_var, save_mean, save_var)))) { default: AT_ERROR("cudnn_batch_norm_backward not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(input, grad_output, weight, running_mean, running_var, save_mean, save_var))); } #else static auto table = globalATenDispatch().getOpTable("aten::cudnn_batch_norm_backward(Tensor input, Tensor grad_output, Tensor weight, Tensor? running_mean, Tensor? running_var, Tensor? save_mean, Tensor? save_var, float epsilon) -> (Tensor, Tensor, Tensor)"); return table->callUnboxed, const Tensor &, const Tensor &, const Tensor &, const Tensor &, const Tensor &, const Tensor &, const Tensor &, double>(input, grad_output, weight, running_mean, running_var, save_mean, save_var, epsilon); #endif } static inline Tensor cudnn_convolution(const Tensor & self, const Tensor & weight, const Tensor & bias, IntArrayRef padding, IntArrayRef stride, IntArrayRef dilation, int64_t groups, bool benchmark, bool deterministic) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self, weight, bias)))) { default: AT_ERROR("cudnn_convolution not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(self, weight, bias))); } #else static auto table = globalATenDispatch().getOpTable("aten::cudnn_convolution(Tensor self, Tensor weight, Tensor? bias, int[] padding, int[] stride, int[] dilation, int groups, bool benchmark, bool deterministic) -> Tensor"); return table->callUnboxed(self, weight, bias, padding, stride, dilation, groups, benchmark, deterministic); #endif } static inline Tensor cudnn_convolution_backward_input(IntArrayRef self_size, const Tensor & grad_output, const Tensor & weight, IntArrayRef padding, IntArrayRef stride, IntArrayRef dilation, int64_t groups, bool benchmark, bool deterministic) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(grad_output, weight)))) { default: AT_ERROR("cudnn_convolution_backward_input not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(grad_output, weight))); } #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::cudnn_convolution_backward_input", ""}).value(); return c10::Dispatcher::singleton().callUnboxedOnly( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(grad_output, weight)), self_size, grad_output, weight, padding, stride, dilation, groups, benchmark, deterministic); #endif } static inline std::tuple cudnn_convolution_backward(const Tensor & self, const Tensor & grad_output, const Tensor & weight, IntArrayRef padding, IntArrayRef stride, IntArrayRef dilation, int64_t groups, bool benchmark, bool deterministic, std::array output_mask) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self, grad_output, weight)))) { default: AT_ERROR("cudnn_convolution_backward not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(self, grad_output, weight))); } #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::cudnn_convolution_backward", ""}).value(); return c10::Dispatcher::singleton().callUnboxedOnly, const Tensor &, const Tensor &, const Tensor &, IntArrayRef, IntArrayRef, IntArrayRef, int64_t, bool, bool, std::array>( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self, grad_output, weight)), self, grad_output, weight, padding, stride, dilation, groups, benchmark, deterministic, output_mask); #endif } static inline Tensor cudnn_convolution_backward_bias(const Tensor & grad_output) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(grad_output)))) { default: AT_ERROR("cudnn_convolution_backward_bias not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(grad_output))); } #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::cudnn_convolution_backward_bias", ""}).value(); return c10::Dispatcher::singleton().callUnboxed( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(grad_output)), grad_output); #endif } static inline Tensor cudnn_convolution_backward_weight(IntArrayRef weight_size, const Tensor & grad_output, const Tensor & self, IntArrayRef padding, IntArrayRef stride, IntArrayRef dilation, int64_t groups, bool benchmark, bool deterministic) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(grad_output, self)))) { default: AT_ERROR("cudnn_convolution_backward_weight not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(grad_output, self))); } #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::cudnn_convolution_backward_weight", ""}).value(); return c10::Dispatcher::singleton().callUnboxedOnly( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(grad_output, self)), weight_size, grad_output, self, padding, stride, dilation, groups, benchmark, deterministic); #endif } static inline Tensor cudnn_convolution_transpose(const Tensor & self, const Tensor & weight, const Tensor & bias, IntArrayRef padding, IntArrayRef output_padding, IntArrayRef stride, IntArrayRef dilation, int64_t groups, bool benchmark, bool deterministic) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self, weight, bias)))) { default: AT_ERROR("cudnn_convolution_transpose not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(self, weight, bias))); } #else static auto table = globalATenDispatch().getOpTable("aten::cudnn_convolution_transpose(Tensor self, Tensor weight, Tensor? bias, int[] padding, int[] output_padding, int[] stride, int[] dilation, int groups, bool benchmark, bool deterministic) -> Tensor"); return table->callUnboxed(self, weight, bias, padding, output_padding, stride, dilation, groups, benchmark, deterministic); #endif } static inline std::tuple cudnn_convolution_transpose_backward(const Tensor & self, const Tensor & grad_output, const Tensor & weight, IntArrayRef padding, IntArrayRef output_padding, IntArrayRef stride, IntArrayRef dilation, int64_t groups, bool benchmark, bool deterministic, std::array output_mask) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self, grad_output, weight)))) { default: AT_ERROR("cudnn_convolution_transpose_backward not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(self, grad_output, weight))); } #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::cudnn_convolution_transpose_backward", ""}).value(); return c10::Dispatcher::singleton().callUnboxedOnly, const Tensor &, const Tensor &, const Tensor &, IntArrayRef, IntArrayRef, IntArrayRef, IntArrayRef, int64_t, bool, bool, std::array>( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self, grad_output, weight)), self, grad_output, weight, padding, output_padding, stride, dilation, groups, benchmark, deterministic, output_mask); #endif } static inline Tensor cudnn_convolution_transpose_backward_bias(const Tensor & grad_output) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(grad_output)))) { default: AT_ERROR("cudnn_convolution_transpose_backward_bias not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(grad_output))); } #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::cudnn_convolution_transpose_backward_bias", ""}).value(); return c10::Dispatcher::singleton().callUnboxed( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(grad_output)), grad_output); #endif } static inline Tensor cudnn_convolution_transpose_backward_input(const Tensor & grad_output, const Tensor & weight, IntArrayRef padding, IntArrayRef stride, IntArrayRef dilation, int64_t groups, bool benchmark, bool deterministic) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(grad_output, weight)))) { default: AT_ERROR("cudnn_convolution_transpose_backward_input not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(grad_output, weight))); } #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::cudnn_convolution_transpose_backward_input", ""}).value(); return c10::Dispatcher::singleton().callUnboxedOnly( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(grad_output, weight)), grad_output, weight, padding, stride, dilation, groups, benchmark, deterministic); #endif } static inline Tensor cudnn_convolution_transpose_backward_weight(IntArrayRef weight_size, const Tensor & grad_output, const Tensor & self, IntArrayRef padding, IntArrayRef stride, IntArrayRef dilation, int64_t groups, bool benchmark, bool deterministic) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(grad_output, self)))) { default: AT_ERROR("cudnn_convolution_transpose_backward_weight not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(grad_output, self))); } #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::cudnn_convolution_transpose_backward_weight", ""}).value(); return c10::Dispatcher::singleton().callUnboxedOnly( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(grad_output, self)), weight_size, grad_output, self, padding, stride, dilation, groups, benchmark, deterministic); #endif } static inline Tensor cudnn_grid_sampler(const Tensor & self, const Tensor & grid) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self, grid)))) { default: AT_ERROR("cudnn_grid_sampler not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(self, grid))); } #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::cudnn_grid_sampler", ""}).value(); return c10::Dispatcher::singleton().callUnboxed( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self, grid)), self, grid); #endif } static inline std::tuple cudnn_grid_sampler_backward(const Tensor & self, const Tensor & grid, const Tensor & grad_output) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self, grid, grad_output)))) { default: AT_ERROR("cudnn_grid_sampler_backward not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(self, grid, grad_output))); } #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::cudnn_grid_sampler_backward", ""}).value(); return c10::Dispatcher::singleton().callUnboxedOnly, const Tensor &, const Tensor &, const Tensor &>( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self, grid, grad_output)), self, grid, grad_output); #endif } static inline Tensor cumsum(const Tensor & self, int64_t dim, c10::optional dtype) { #ifdef USE_STATIC_DISPATCH return TypeDefault::cumsum(self, dim, dtype); #else static auto table = globalATenDispatch().getOpTable("aten::cumsum(Tensor self, int dim, *, ScalarType? dtype=None) -> Tensor"); return table->callUnboxed>(self, dim, dtype); #endif } static inline Tensor & cumsum_out(Tensor & out, const Tensor & self, int64_t dim, c10::optional dtype) { #ifdef USE_STATIC_DISPATCH return TypeDefault::cumsum_out(out, self, dim, dtype); #else static auto table = globalATenDispatch().getOpTable("aten::cumsum.out(Tensor self, int dim, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)"); return table->callUnboxed>(out, self, dim, dtype); #endif } #ifdef BUILD_NAMEDTENSOR static inline Tensor cumsum(const Tensor & self, Dimname dim, c10::optional dtype) { #ifdef USE_STATIC_DISPATCH return TypeDefault::cumsum(self, dim, dtype); #else static auto table = globalATenDispatch().getOpTable("aten::cumsum.dimname(Tensor self, Dimname dim, *, ScalarType? dtype=None) -> Tensor"); return table->callUnboxed>(self, dim, dtype); #endif } #endif #ifdef BUILD_NAMEDTENSOR static inline Tensor & cumsum_out(Tensor & out, const Tensor & self, Dimname dim, c10::optional dtype) { #ifdef USE_STATIC_DISPATCH return TypeDefault::cumsum_out(out, self, dim, dtype); #else static auto table = globalATenDispatch().getOpTable("aten::cumsum.dimname_out(Tensor self, Dimname dim, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)"); return table->callUnboxed>(out, self, dim, dtype); #endif } #endif static inline Tensor cumprod(const Tensor & self, int64_t dim, c10::optional dtype) { #ifdef USE_STATIC_DISPATCH return TypeDefault::cumprod(self, dim, dtype); #else static auto table = globalATenDispatch().getOpTable("aten::cumprod(Tensor self, int dim, *, ScalarType? dtype=None) -> Tensor"); return table->callUnboxed>(self, dim, dtype); #endif } static inline Tensor & cumprod_out(Tensor & out, const Tensor & self, int64_t dim, c10::optional dtype) { #ifdef USE_STATIC_DISPATCH return TypeDefault::cumprod_out(out, self, dim, dtype); #else static auto table = globalATenDispatch().getOpTable("aten::cumprod.out(Tensor self, int dim, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)"); return table->callUnboxed>(out, self, dim, dtype); #endif } #ifdef BUILD_NAMEDTENSOR static inline Tensor cumprod(const Tensor & self, Dimname dim, c10::optional dtype) { #ifdef USE_STATIC_DISPATCH return TypeDefault::cumprod(self, dim, dtype); #else static auto table = globalATenDispatch().getOpTable("aten::cumprod.dimname(Tensor self, Dimname dim, *, ScalarType? dtype=None) -> Tensor"); return table->callUnboxed>(self, dim, dtype); #endif } #endif #ifdef BUILD_NAMEDTENSOR static inline Tensor & cumprod_out(Tensor & out, const Tensor & self, Dimname dim, c10::optional dtype) { #ifdef USE_STATIC_DISPATCH return TypeDefault::cumprod_out(out, self, dim, dtype); #else static auto table = globalATenDispatch().getOpTable("aten::cumprod.dimname_out(Tensor self, Dimname dim, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)"); return table->callUnboxed>(out, self, dim, dtype); #endif } #endif static inline Tensor ctc_loss(const Tensor & log_probs, const Tensor & targets, IntArrayRef input_lengths, IntArrayRef target_lengths, int64_t blank, int64_t reduction, bool zero_infinity) { #ifdef USE_STATIC_DISPATCH return TypeDefault::ctc_loss(log_probs, targets, input_lengths, target_lengths, blank, reduction, zero_infinity); #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::ctc_loss", "IntList"}).value(); return c10::Dispatcher::singleton().callUnboxedOnly( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(log_probs, targets)), log_probs, targets, input_lengths, target_lengths, blank, reduction, zero_infinity); #endif } static inline Tensor ctc_loss(const Tensor & log_probs, const Tensor & targets, const Tensor & input_lengths, const Tensor & target_lengths, int64_t blank, int64_t reduction, bool zero_infinity) { #ifdef USE_STATIC_DISPATCH return TypeDefault::ctc_loss(log_probs, targets, input_lengths, target_lengths, blank, reduction, zero_infinity); #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::ctc_loss", "Tensor"}).value(); return c10::Dispatcher::singleton().callUnboxed( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(log_probs, targets, input_lengths, target_lengths)), log_probs, targets, input_lengths, target_lengths, blank, reduction, zero_infinity); #endif } static inline std::tuple _ctc_loss(const Tensor & log_probs, const Tensor & targets, IntArrayRef input_lengths, IntArrayRef target_lengths, int64_t blank, bool zero_infinity) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(log_probs, targets)))) { case Backend::CPU: return CPUType::_ctc_loss(log_probs, targets, input_lengths, target_lengths, blank, zero_infinity); break; default: AT_ERROR("_ctc_loss not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(log_probs, targets))); } #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::_ctc_loss", ""}).value(); return c10::Dispatcher::singleton().callUnboxedOnly, const Tensor &, const Tensor &, IntArrayRef, IntArrayRef, int64_t, bool>( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(log_probs, targets)), log_probs, targets, input_lengths, target_lengths, blank, zero_infinity); #endif } static inline Tensor _ctc_loss_backward(const Tensor & grad, const Tensor & log_probs, const Tensor & targets, IntArrayRef input_lengths, IntArrayRef target_lengths, const Tensor & neg_log_likelihood, const Tensor & log_alpha, int64_t blank, bool zero_infinity) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(grad, log_probs, targets, neg_log_likelihood, log_alpha)))) { case Backend::CPU: return CPUType::_ctc_loss_backward(grad, log_probs, targets, input_lengths, target_lengths, neg_log_likelihood, log_alpha, blank, zero_infinity); break; default: AT_ERROR("_ctc_loss_backward not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(grad, log_probs, targets, neg_log_likelihood, log_alpha))); } #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::_ctc_loss_backward", ""}).value(); return c10::Dispatcher::singleton().callUnboxedOnly( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(grad, log_probs, targets, neg_log_likelihood, log_alpha)), grad, log_probs, targets, input_lengths, target_lengths, neg_log_likelihood, log_alpha, blank, zero_infinity); #endif } static inline Tensor det(const Tensor & self) { #ifdef USE_STATIC_DISPATCH return TypeDefault::det(self); #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::det", ""}).value(); return c10::Dispatcher::singleton().callUnboxed( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self)), self); #endif } static inline Tensor diag_embed(const Tensor & self, int64_t offset, int64_t dim1, int64_t dim2) { #ifdef USE_STATIC_DISPATCH return TypeDefault::diag_embed(self, offset, dim1, dim2); #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::diag_embed", ""}).value(); return c10::Dispatcher::singleton().callUnboxed( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self)), self, offset, dim1, dim2); #endif } static inline Tensor diagflat(const Tensor & self, int64_t offset) { #ifdef USE_STATIC_DISPATCH return TypeDefault::diagflat(self, offset); #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::diagflat", ""}).value(); return c10::Dispatcher::singleton().callUnboxed( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self)), self, offset); #endif } static inline Tensor diagonal(const Tensor & self, int64_t offset, int64_t dim1, int64_t dim2) { #ifdef USE_STATIC_DISPATCH return TypeDefault::diagonal(self, offset, dim1, dim2); #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::diagonal", ""}).value(); return c10::Dispatcher::singleton().callUnboxedOnly( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self)), self, offset, dim1, dim2); #endif } static inline Tensor div(const Tensor & self, const Tensor & other) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self, other)))) { case Backend::CPU: return CPUType::div(self, other); break; case Backend::SparseCPU: return SparseCPUType::div(self, other); break; default: AT_ERROR("div not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(self, other))); } #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::div", "Tensor"}).value(); return c10::Dispatcher::singleton().callUnboxed( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self, other)), self, other); #endif } static inline Tensor & div_out(Tensor & out, const Tensor & self, const Tensor & other) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(out, self, other)))) { case Backend::CPU: return CPUType::div_out(out, self, other); break; case Backend::SparseCPU: return SparseCPUType::div_out(out, self, other); break; default: AT_ERROR("div_out not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(out, self, other))); } #else static auto table = globalATenDispatch().getOpTable("aten::div.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)"); return table->callUnboxed(out, self, other); #endif } static inline Tensor div(const Tensor & self, Scalar other) { #ifdef USE_STATIC_DISPATCH return TypeDefault::div(self, other); #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::div", "Scalar"}).value(); return c10::Dispatcher::singleton().callUnboxed( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self)), self, other); #endif } static inline Tensor dot(const Tensor & self, const Tensor & tensor) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self, tensor)))) { case Backend::CPU: return CPUType::dot(self, tensor); break; default: AT_ERROR("dot not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(self, tensor))); } #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::dot", ""}).value(); return c10::Dispatcher::singleton().callUnboxed( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self, tensor)), self, tensor); #endif } static inline Tensor & dot_out(Tensor & out, const Tensor & self, const Tensor & tensor) { #ifdef USE_STATIC_DISPATCH return TypeDefault::dot_out(out, self, tensor); #else static auto table = globalATenDispatch().getOpTable("aten::dot.out(Tensor self, Tensor tensor, *, Tensor(a!) out) -> Tensor(a!)"); return table->callUnboxed(out, self, tensor); #endif } static inline Tensor einsum(std::string equation, TensorList tensors) { #ifdef USE_STATIC_DISPATCH return TypeDefault::einsum(equation, tensors); #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::einsum", ""}).value(); return c10::Dispatcher::singleton().callUnboxedOnly( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(tensors)), equation, tensors); #endif } static inline Tensor embedding(const Tensor & weight, const Tensor & indices, int64_t padding_idx, bool scale_grad_by_freq, bool sparse) { #ifdef USE_STATIC_DISPATCH return TypeDefault::embedding(weight, indices, padding_idx, scale_grad_by_freq, sparse); #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::embedding", ""}).value(); return c10::Dispatcher::singleton().callUnboxed( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(weight, indices)), weight, indices, padding_idx, scale_grad_by_freq, sparse); #endif } static inline Tensor embedding_backward(const Tensor & grad, const Tensor & indices, int64_t num_weights, int64_t padding_idx, bool scale_grad_by_freq, bool sparse) { #ifdef USE_STATIC_DISPATCH return TypeDefault::embedding_backward(grad, indices, num_weights, padding_idx, scale_grad_by_freq, sparse); #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::embedding_backward", ""}).value(); return c10::Dispatcher::singleton().callUnboxed( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(grad, indices)), grad, indices, num_weights, padding_idx, scale_grad_by_freq, sparse); #endif } static inline Tensor embedding_dense_backward(const Tensor & grad_output, const Tensor & indices, int64_t num_weights, int64_t padding_idx, bool scale_grad_by_freq) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(grad_output, indices)))) { case Backend::CPU: return CPUType::embedding_dense_backward(grad_output, indices, num_weights, padding_idx, scale_grad_by_freq); break; default: AT_ERROR("embedding_dense_backward not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(grad_output, indices))); } #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::embedding_dense_backward", ""}).value(); return c10::Dispatcher::singleton().callUnboxed( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(grad_output, indices)), grad_output, indices, num_weights, padding_idx, scale_grad_by_freq); #endif } static inline Tensor & embedding_renorm_(Tensor & self, const Tensor & indices, double max_norm, double norm_type) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self, indices)))) { case Backend::CPU: return CPUType::embedding_renorm_(self, indices, max_norm, norm_type); break; default: AT_ERROR("embedding_renorm_ not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(self, indices))); } #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::embedding_renorm_", ""}).value(); return c10::Dispatcher::singleton().callUnboxedOnly( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self, indices)), self, indices, max_norm, norm_type); #endif } static inline Tensor embedding_sparse_backward(const Tensor & grad, const Tensor & indices, int64_t num_weights, int64_t padding_idx, bool scale_grad_by_freq) { #ifdef USE_STATIC_DISPATCH return TypeDefault::embedding_sparse_backward(grad, indices, num_weights, padding_idx, scale_grad_by_freq); #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::embedding_sparse_backward", ""}).value(); return c10::Dispatcher::singleton().callUnboxed( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(grad, indices)), grad, indices, num_weights, padding_idx, scale_grad_by_freq); #endif } static inline std::tuple embedding_bag(const Tensor & weight, const Tensor & indices, const Tensor & offsets, bool scale_grad_by_freq, int64_t mode, bool sparse, const Tensor & per_sample_weights) { #ifdef USE_STATIC_DISPATCH return TypeDefault::embedding_bag(weight, indices, offsets, scale_grad_by_freq, mode, sparse, per_sample_weights); #else static auto table = globalATenDispatch().getOpTable("aten::embedding_bag(Tensor weight, Tensor indices, Tensor offsets, bool scale_grad_by_freq=False, int mode=0, bool sparse=False, Tensor? per_sample_weights=None) -> (Tensor, Tensor, Tensor, Tensor)"); return table->callUnboxed, const Tensor &, const Tensor &, const Tensor &, bool, int64_t, bool, const Tensor &>(weight, indices, offsets, scale_grad_by_freq, mode, sparse, per_sample_weights); #endif } static inline std::tuple _embedding_bag(const Tensor & weight, const Tensor & indices, const Tensor & offsets, bool scale_grad_by_freq, int64_t mode, bool sparse, const Tensor & per_sample_weights) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(weight, indices, offsets, per_sample_weights)))) { case Backend::CPU: return CPUType::_embedding_bag(weight, indices, offsets, scale_grad_by_freq, mode, sparse, per_sample_weights); break; default: AT_ERROR("_embedding_bag not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(weight, indices, offsets, per_sample_weights))); } #else static auto table = globalATenDispatch().getOpTable("aten::_embedding_bag(Tensor weight, Tensor indices, Tensor offsets, bool scale_grad_by_freq=False, int mode=0, bool sparse=False, Tensor? per_sample_weights=None) -> (Tensor, Tensor, Tensor, Tensor)"); return table->callUnboxed, const Tensor &, const Tensor &, const Tensor &, bool, int64_t, bool, const Tensor &>(weight, indices, offsets, scale_grad_by_freq, mode, sparse, per_sample_weights); #endif } static inline Tensor _embedding_bag_backward(const Tensor & grad, const Tensor & indices, const Tensor & offsets, const Tensor & offset2bag, const Tensor & bag_size, const Tensor & maximum_indices, int64_t num_weights, bool scale_grad_by_freq, int64_t mode, bool sparse, const Tensor & per_sample_weights) { #ifdef USE_STATIC_DISPATCH return TypeDefault::_embedding_bag_backward(grad, indices, offsets, offset2bag, bag_size, maximum_indices, num_weights, scale_grad_by_freq, mode, sparse, per_sample_weights); #else static auto table = globalATenDispatch().getOpTable("aten::_embedding_bag_backward(Tensor grad, Tensor indices, Tensor offsets, Tensor offset2bag, Tensor bag_size, Tensor maximum_indices, int num_weights, bool scale_grad_by_freq, int mode, bool sparse, Tensor? per_sample_weights) -> Tensor"); return table->callUnboxed(grad, indices, offsets, offset2bag, bag_size, maximum_indices, num_weights, scale_grad_by_freq, mode, sparse, per_sample_weights); #endif } static inline Tensor _embedding_bag_sparse_backward(const Tensor & grad, const Tensor & indices, const Tensor & offsets, const Tensor & offset2bag, const Tensor & bag_size, int64_t num_weights, bool scale_grad_by_freq, int64_t mode, const Tensor & per_sample_weights) { #ifdef USE_STATIC_DISPATCH return TypeDefault::_embedding_bag_sparse_backward(grad, indices, offsets, offset2bag, bag_size, num_weights, scale_grad_by_freq, mode, per_sample_weights); #else static auto table = globalATenDispatch().getOpTable("aten::_embedding_bag_sparse_backward(Tensor grad, Tensor indices, Tensor offsets, Tensor offset2bag, Tensor bag_size, int num_weights, bool scale_grad_by_freq, int mode, Tensor? per_sample_weights) -> Tensor"); return table->callUnboxed(grad, indices, offsets, offset2bag, bag_size, num_weights, scale_grad_by_freq, mode, per_sample_weights); #endif } static inline Tensor _embedding_bag_dense_backward(const Tensor & grad, const Tensor & indices, const Tensor & offsets, const Tensor & offset2bag, const Tensor & bag_size, const Tensor & maximum_indices, int64_t num_weights, bool scale_grad_by_freq, int64_t mode, const Tensor & per_sample_weights) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(grad, indices, offsets, offset2bag, bag_size, maximum_indices, per_sample_weights)))) { case Backend::CPU: return CPUType::_embedding_bag_dense_backward(grad, indices, offsets, offset2bag, bag_size, maximum_indices, num_weights, scale_grad_by_freq, mode, per_sample_weights); break; default: AT_ERROR("_embedding_bag_dense_backward not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(grad, indices, offsets, offset2bag, bag_size, maximum_indices, per_sample_weights))); } #else static auto table = globalATenDispatch().getOpTable("aten::_embedding_bag_dense_backward(Tensor grad, Tensor indices, Tensor offsets, Tensor offset2bag, Tensor bag_size, Tensor maximum_indices, int num_weights, bool scale_grad_by_freq, int mode, Tensor? per_sample_weights) -> Tensor"); return table->callUnboxed(grad, indices, offsets, offset2bag, bag_size, maximum_indices, num_weights, scale_grad_by_freq, mode, per_sample_weights); #endif } static inline Tensor _embedding_bag_per_sample_weights_backward(const Tensor & grad, const Tensor & weight, const Tensor & indices, const Tensor & offsets, const Tensor & offset2bag, int64_t mode) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(grad, weight, indices, offsets, offset2bag)))) { case Backend::CPU: return CPUType::_embedding_bag_per_sample_weights_backward(grad, weight, indices, offsets, offset2bag, mode); break; default: AT_ERROR("_embedding_bag_per_sample_weights_backward not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(grad, weight, indices, offsets, offset2bag))); } #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::_embedding_bag_per_sample_weights_backward", ""}).value(); return c10::Dispatcher::singleton().callUnboxed( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(grad, weight, indices, offsets, offset2bag)), grad, weight, indices, offsets, offset2bag, mode); #endif } #ifdef BUILD_NAMEDTENSOR static inline Tensor empty(IntArrayRef size, c10::optional names, const TensorOptions & options, c10::optional memory_format) { #ifdef USE_STATIC_DISPATCH return TypeDefault::empty(size, names, options, memory_format); #else globalLegacyTypeDispatch().initForTensorTypeSet(at::detail::multi_dispatch_tensor_type_set(options)); static auto table = globalATenDispatch().getOpTable("aten::empty.names(int[] size, *, Dimname[]? names, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor"); return table->callUnboxed, const TensorOptions &, c10::optional>(size, names, options, memory_format); #endif } #endif static inline Tensor empty(IntArrayRef size, const TensorOptions & options, c10::optional memory_format) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(options)))) { case Backend::CPU: return CPUType::empty(size, options, memory_format); break; case Backend::SparseCPU: return SparseCPUType::empty(size, options, memory_format); break; default: AT_ERROR("empty not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(options))); } #else globalLegacyTypeDispatch().initForTensorTypeSet(at::detail::multi_dispatch_tensor_type_set(options)); static auto table = globalATenDispatch().getOpTable("aten::empty.memory_format(int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor"); return table->callUnboxed>(size, options, memory_format); #endif } static inline Tensor _empty_affine_quantized(IntArrayRef size, const TensorOptions & options, double scale, int64_t zero_point, c10::optional memory_format) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(options)))) { case Backend::CPU: return CPUType::_empty_affine_quantized(size, options, scale, zero_point, memory_format); break; case Backend::QuantizedCPU: return QuantizedCPUType::_empty_affine_quantized(size, options, scale, zero_point, memory_format); break; default: AT_ERROR("_empty_affine_quantized not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(options))); } #else globalLegacyTypeDispatch().initForTensorTypeSet(at::detail::multi_dispatch_tensor_type_set(options)); static auto table = globalATenDispatch().getOpTable("aten::_empty_affine_quantized(int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, float scale=1, int zero_point=0, MemoryFormat? memory_format=contiguous_format) -> Tensor"); return table->callUnboxed>(size, options, scale, zero_point, memory_format); #endif } static inline Tensor _empty_per_channel_affine_quantized(IntArrayRef size, const Tensor & scales, const Tensor & zero_points, int64_t axis, const TensorOptions & options, c10::optional memory_format) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(scales, zero_points, options)))) { case Backend::CPU: return CPUType::_empty_per_channel_affine_quantized(size, scales, zero_points, axis, options, memory_format); break; case Backend::QuantizedCPU: return QuantizedCPUType::_empty_per_channel_affine_quantized(size, scales, zero_points, axis, options, memory_format); break; default: AT_ERROR("_empty_per_channel_affine_quantized not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(scales, zero_points, options))); } #else globalLegacyTypeDispatch().initForTensorTypeSet(at::detail::multi_dispatch_tensor_type_set(scales, zero_points, options)); static auto table = globalATenDispatch().getOpTable("aten::_empty_per_channel_affine_quantized(int[] size, *, Tensor scales, Tensor zero_points, int axis, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=contiguous_format) -> Tensor"); return table->callUnboxed>(size, scales, zero_points, axis, options, memory_format); #endif } static inline Tensor & empty_out(Tensor & out, IntArrayRef size, c10::optional memory_format) { #ifdef USE_STATIC_DISPATCH return TypeDefault::empty_out(out, size, memory_format); #else static auto table = globalATenDispatch().getOpTable("aten::empty.out(int[] size, *, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!)"); return table->callUnboxed>(out, size, memory_format); #endif } static inline Tensor empty_like(const Tensor & self) { #ifdef USE_STATIC_DISPATCH return TypeDefault::empty_like(self); #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::empty_like", ""}).value(); return c10::Dispatcher::singleton().callUnboxed( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self)), self); #endif } static inline Tensor empty_like(const Tensor & self, const TensorOptions & options, c10::optional memory_format) { #ifdef USE_STATIC_DISPATCH return TypeDefault::empty_like(self, options, memory_format); #else globalLegacyTypeDispatch().initForTensorTypeSet(at::detail::multi_dispatch_tensor_type_set(self, options)); static auto table = globalATenDispatch().getOpTable("aten::empty_like.dtype(Tensor self, *, ScalarType dtype, Layout layout, Device device, bool pin_memory=False, MemoryFormat? memory_format=contiguous_format) -> Tensor"); return table->callUnboxed>(self, options, memory_format); #endif } static inline Tensor empty_strided(IntArrayRef size, IntArrayRef stride, const TensorOptions & options) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(options)))) { case Backend::CPU: return CPUType::empty_strided(size, stride, options); break; default: AT_ERROR("empty_strided not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(options))); } #else globalLegacyTypeDispatch().initForTensorTypeSet(at::detail::multi_dispatch_tensor_type_set(options)); static auto table = globalATenDispatch().getOpTable("aten::empty_strided(int[] size, int[] stride, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor"); return table->callUnboxed(size, stride, options); #endif } static inline Tensor erf(const Tensor & self) { #ifdef USE_STATIC_DISPATCH return TypeDefault::erf(self); #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::erf", ""}).value(); return c10::Dispatcher::singleton().callUnboxed( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self)), self); #endif } static inline Tensor & erf_(Tensor & self) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self)))) { case Backend::CPU: return CPUType::erf_(self); break; default: AT_ERROR("erf_ not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(self))); } #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::erf_", ""}).value(); return c10::Dispatcher::singleton().callUnboxedOnly( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self)), self); #endif } static inline Tensor & erf_out(Tensor & out, const Tensor & self) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(out, self)))) { case Backend::CPU: return CPUType::erf_out(out, self); break; default: AT_ERROR("erf_out not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(out, self))); } #else static auto table = globalATenDispatch().getOpTable("aten::erf.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)"); return table->callUnboxed(out, self); #endif } static inline Tensor erfc(const Tensor & self) { #ifdef USE_STATIC_DISPATCH return TypeDefault::erfc(self); #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::erfc", ""}).value(); return c10::Dispatcher::singleton().callUnboxed( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self)), self); #endif } static inline Tensor & erfc_(Tensor & self) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self)))) { case Backend::CPU: return CPUType::erfc_(self); break; default: AT_ERROR("erfc_ not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(self))); } #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::erfc_", ""}).value(); return c10::Dispatcher::singleton().callUnboxedOnly( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self)), self); #endif } static inline Tensor & erfc_out(Tensor & out, const Tensor & self) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(out, self)))) { case Backend::CPU: return CPUType::erfc_out(out, self); break; default: AT_ERROR("erfc_out not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(out, self))); } #else static auto table = globalATenDispatch().getOpTable("aten::erfc.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)"); return table->callUnboxed(out, self); #endif } static inline Tensor exp(const Tensor & self) { #ifdef USE_STATIC_DISPATCH return TypeDefault::exp(self); #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::exp", ""}).value(); return c10::Dispatcher::singleton().callUnboxed( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self)), self); #endif } static inline Tensor & exp_(Tensor & self) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self)))) { case Backend::CPU: return CPUType::exp_(self); break; default: AT_ERROR("exp_ not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(self))); } #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::exp_", ""}).value(); return c10::Dispatcher::singleton().callUnboxedOnly( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self)), self); #endif } static inline Tensor & exp_out(Tensor & out, const Tensor & self) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(out, self)))) { case Backend::CPU: return CPUType::exp_out(out, self); break; default: AT_ERROR("exp_out not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(out, self))); } #else static auto table = globalATenDispatch().getOpTable("aten::exp.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)"); return table->callUnboxed(out, self); #endif } static inline Tensor expm1(const Tensor & self) { #ifdef USE_STATIC_DISPATCH return TypeDefault::expm1(self); #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::expm1", ""}).value(); return c10::Dispatcher::singleton().callUnboxed( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self)), self); #endif } static inline Tensor & expm1_(Tensor & self) { #ifdef USE_STATIC_DISPATCH return TypeDefault::expm1_(self); #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::expm1_", ""}).value(); return c10::Dispatcher::singleton().callUnboxedOnly( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self)), self); #endif } static inline Tensor & expm1_out(Tensor & out, const Tensor & self) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(out, self)))) { case Backend::CPU: return CPUType::expm1_out(out, self); break; default: AT_ERROR("expm1_out not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(out, self))); } #else static auto table = globalATenDispatch().getOpTable("aten::expm1.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)"); return table->callUnboxed(out, self); #endif } static inline Tensor eye(int64_t n, const TensorOptions & options) { #ifdef USE_STATIC_DISPATCH return TypeDefault::eye(n, options); #else globalLegacyTypeDispatch().initForTensorTypeSet(at::detail::multi_dispatch_tensor_type_set(options)); static auto table = globalATenDispatch().getOpTable("aten::eye(int n, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor"); return table->callUnboxed(n, options); #endif } static inline Tensor eye(int64_t n, int64_t m, const TensorOptions & options) { #ifdef USE_STATIC_DISPATCH return TypeDefault::eye(n, m, options); #else globalLegacyTypeDispatch().initForTensorTypeSet(at::detail::multi_dispatch_tensor_type_set(options)); static auto table = globalATenDispatch().getOpTable("aten::eye.m(int n, int m, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor"); return table->callUnboxed(n, m, options); #endif } static inline Tensor & eye_out(Tensor & out, int64_t n) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(out)))) { case Backend::CPU: return CPUType::eye_out(out, n); break; default: AT_ERROR("eye_out not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(out))); } #else static auto table = globalATenDispatch().getOpTable("aten::eye.out(int n, *, Tensor(a!) out) -> Tensor(a!)"); return table->callUnboxed(out, n); #endif } static inline Tensor & eye_out(Tensor & out, int64_t n, int64_t m) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(out)))) { case Backend::CPU: return CPUType::eye_out(out, n, m); break; default: AT_ERROR("eye_out not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(out))); } #else static auto table = globalATenDispatch().getOpTable("aten::eye.m_out(int n, int m, *, Tensor(a!) out) -> Tensor(a!)"); return table->callUnboxed(out, n, m); #endif } static inline Tensor flatten(const Tensor & self, int64_t start_dim, int64_t end_dim) { #ifdef USE_STATIC_DISPATCH return TypeDefault::flatten(self, start_dim, end_dim); #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::flatten", "using_ints"}).value(); return c10::Dispatcher::singleton().callUnboxed( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self)), self, start_dim, end_dim); #endif } #ifdef BUILD_NAMEDTENSOR static inline Tensor flatten(const Tensor & self, int64_t start_dim, int64_t end_dim, Dimname out_dim) { #ifdef USE_STATIC_DISPATCH return TypeDefault::flatten(self, start_dim, end_dim, out_dim); #else static auto table = globalATenDispatch().getOpTable("aten::flatten.named_out_dim(Tensor self, int start_dim, int end_dim, Dimname out_dim) -> Tensor"); return table->callUnboxed(self, start_dim, end_dim, out_dim); #endif } #endif #ifdef BUILD_NAMEDTENSOR static inline Tensor flatten(const Tensor & self, Dimname start_dim, Dimname end_dim, Dimname out_dim) { #ifdef USE_STATIC_DISPATCH return TypeDefault::flatten(self, start_dim, end_dim, out_dim); #else static auto table = globalATenDispatch().getOpTable("aten::flatten.using_names(Tensor self, Dimname start_dim, Dimname end_dim, Dimname out_dim) -> Tensor"); return table->callUnboxed(self, start_dim, end_dim, out_dim); #endif } #endif #ifdef BUILD_NAMEDTENSOR static inline Tensor flatten(const Tensor & self, DimnameList dims, Dimname out_dim) { #ifdef USE_STATIC_DISPATCH return TypeDefault::flatten(self, dims, out_dim); #else static auto table = globalATenDispatch().getOpTable("aten::flatten.DimnameList(Tensor self, DimnameList dims, Dimname out_dim) -> Tensor"); return table->callUnboxed(self, dims, out_dim); #endif } #endif static inline Tensor & fill_(Tensor & self, Scalar value) { #ifdef USE_STATIC_DISPATCH return TypeDefault::fill_(self, value); #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::fill_", "Scalar"}).value(); return c10::Dispatcher::singleton().callUnboxedOnly( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self)), self, value); #endif } static inline Tensor & fill_(Tensor & self, const Tensor & value) { #ifdef USE_STATIC_DISPATCH return TypeDefault::fill_(self, value); #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::fill_", "Tensor"}).value(); return c10::Dispatcher::singleton().callUnboxedOnly( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self, value)), self, value); #endif } static inline Tensor floor(const Tensor & self) { #ifdef USE_STATIC_DISPATCH return TypeDefault::floor(self); #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::floor", ""}).value(); return c10::Dispatcher::singleton().callUnboxed( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self)), self); #endif } static inline Tensor & floor_(Tensor & self) { #ifdef USE_STATIC_DISPATCH return TypeDefault::floor_(self); #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::floor_", ""}).value(); return c10::Dispatcher::singleton().callUnboxedOnly( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self)), self); #endif } static inline Tensor & floor_out(Tensor & out, const Tensor & self) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(out, self)))) { case Backend::CPU: return CPUType::floor_out(out, self); break; default: AT_ERROR("floor_out not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(out, self))); } #else static auto table = globalATenDispatch().getOpTable("aten::floor.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)"); return table->callUnboxed(out, self); #endif } static inline Tensor frac(const Tensor & self) { #ifdef USE_STATIC_DISPATCH return TypeDefault::frac(self); #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::frac", ""}).value(); return c10::Dispatcher::singleton().callUnboxed( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self)), self); #endif } static inline Tensor & frac_(Tensor & self) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self)))) { case Backend::CPU: return CPUType::frac_(self); break; default: AT_ERROR("frac_ not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(self))); } #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::frac_", ""}).value(); return c10::Dispatcher::singleton().callUnboxedOnly( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self)), self); #endif } static inline Tensor & frac_out(Tensor & out, const Tensor & self) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(out, self)))) { case Backend::CPU: return CPUType::frac_out(out, self); break; default: AT_ERROR("frac_out not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(out, self))); } #else static auto table = globalATenDispatch().getOpTable("aten::frac.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)"); return table->callUnboxed(out, self); #endif } #ifdef BUILD_NAMEDTENSOR static inline Tensor full(IntArrayRef size, Scalar fill_value, c10::optional names, const TensorOptions & options) { #ifdef USE_STATIC_DISPATCH return TypeDefault::full(size, fill_value, names, options); #else globalLegacyTypeDispatch().initForTensorTypeSet(at::detail::multi_dispatch_tensor_type_set(options)); static auto table = globalATenDispatch().getOpTable("aten::full.names(int[] size, Scalar fill_value, *, Dimname[]? names, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor"); return table->callUnboxed, const TensorOptions &>(size, fill_value, names, options); #endif } #endif static inline Tensor full(IntArrayRef size, Scalar fill_value, const TensorOptions & options) { #ifdef USE_STATIC_DISPATCH return TypeDefault::full(size, fill_value, options); #else globalLegacyTypeDispatch().initForTensorTypeSet(at::detail::multi_dispatch_tensor_type_set(options)); static auto table = globalATenDispatch().getOpTable("aten::full(int[] size, Scalar fill_value, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor"); return table->callUnboxed(size, fill_value, options); #endif } static inline Tensor & full_out(Tensor & out, IntArrayRef size, Scalar fill_value) { #ifdef USE_STATIC_DISPATCH return TypeDefault::full_out(out, size, fill_value); #else static auto table = globalATenDispatch().getOpTable("aten::full.out(int[] size, Scalar fill_value, *, Tensor(a!) out) -> Tensor(a!)"); return table->callUnboxed(out, size, fill_value); #endif } static inline Tensor full_like(const Tensor & self, Scalar fill_value) { #ifdef USE_STATIC_DISPATCH return TypeDefault::full_like(self, fill_value); #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::full_like", ""}).value(); return c10::Dispatcher::singleton().callUnboxed( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self)), self, fill_value); #endif } static inline Tensor full_like(const Tensor & self, Scalar fill_value, const TensorOptions & options) { #ifdef USE_STATIC_DISPATCH return TypeDefault::full_like(self, fill_value, options); #else globalLegacyTypeDispatch().initForTensorTypeSet(at::detail::multi_dispatch_tensor_type_set(self, options)); static auto table = globalATenDispatch().getOpTable("aten::full_like.dtype(Tensor self, Scalar fill_value, *, ScalarType dtype, Layout layout, Device device, bool pin_memory=False) -> Tensor"); return table->callUnboxed(self, fill_value, options); #endif } static inline Tensor from_file(std::string filename, c10::optional shared, c10::optional size, const TensorOptions & options) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(options)))) { case Backend::CPU: return CPUType::from_file(filename, shared, size, options); break; default: AT_ERROR("from_file not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(options))); } #else globalLegacyTypeDispatch().initForTensorTypeSet(at::detail::multi_dispatch_tensor_type_set(options)); static auto table = globalATenDispatch().getOpTable("aten::from_file(str filename, bool? shared=None, int? size=0, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor"); return table->callUnboxed, c10::optional, const TensorOptions &>(filename, shared, size, options); #endif } static inline Tensor grid_sampler(const Tensor & input, const Tensor & grid, int64_t interpolation_mode, int64_t padding_mode, bool align_corners) { #ifdef USE_STATIC_DISPATCH return TypeDefault::grid_sampler(input, grid, interpolation_mode, padding_mode, align_corners); #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::grid_sampler", ""}).value(); return c10::Dispatcher::singleton().callUnboxed( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(input, grid)), input, grid, interpolation_mode, padding_mode, align_corners); #endif } static inline Tensor grid_sampler_2d(const Tensor & input, const Tensor & grid, int64_t interpolation_mode, int64_t padding_mode, bool align_corners) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(input, grid)))) { case Backend::CPU: return CPUType::grid_sampler_2d(input, grid, interpolation_mode, padding_mode, align_corners); break; default: AT_ERROR("grid_sampler_2d not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(input, grid))); } #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::grid_sampler_2d", ""}).value(); return c10::Dispatcher::singleton().callUnboxed( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(input, grid)), input, grid, interpolation_mode, padding_mode, align_corners); #endif } static inline std::tuple grid_sampler_2d_backward(const Tensor & grad_output, const Tensor & input, const Tensor & grid, int64_t interpolation_mode, int64_t padding_mode, bool align_corners) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(grad_output, input, grid)))) { case Backend::CPU: return CPUType::grid_sampler_2d_backward(grad_output, input, grid, interpolation_mode, padding_mode, align_corners); break; default: AT_ERROR("grid_sampler_2d_backward not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(grad_output, input, grid))); } #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::grid_sampler_2d_backward", ""}).value(); return c10::Dispatcher::singleton().callUnboxedOnly, const Tensor &, const Tensor &, const Tensor &, int64_t, int64_t, bool>( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(grad_output, input, grid)), grad_output, input, grid, interpolation_mode, padding_mode, align_corners); #endif } static inline Tensor grid_sampler_3d(const Tensor & input, const Tensor & grid, int64_t interpolation_mode, int64_t padding_mode, bool align_corners) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(input, grid)))) { case Backend::CPU: return CPUType::grid_sampler_3d(input, grid, interpolation_mode, padding_mode, align_corners); break; default: AT_ERROR("grid_sampler_3d not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(input, grid))); } #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::grid_sampler_3d", ""}).value(); return c10::Dispatcher::singleton().callUnboxed( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(input, grid)), input, grid, interpolation_mode, padding_mode, align_corners); #endif } static inline std::tuple grid_sampler_3d_backward(const Tensor & grad_output, const Tensor & input, const Tensor & grid, int64_t interpolation_mode, int64_t padding_mode, bool align_corners) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(grad_output, input, grid)))) { case Backend::CPU: return CPUType::grid_sampler_3d_backward(grad_output, input, grid, interpolation_mode, padding_mode, align_corners); break; default: AT_ERROR("grid_sampler_3d_backward not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(grad_output, input, grid))); } #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::grid_sampler_3d_backward", ""}).value(); return c10::Dispatcher::singleton().callUnboxedOnly, const Tensor &, const Tensor &, const Tensor &, int64_t, int64_t, bool>( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(grad_output, input, grid)), grad_output, input, grid, interpolation_mode, padding_mode, align_corners); #endif } static inline Tensor hann_window(int64_t window_length, const TensorOptions & options) { #ifdef USE_STATIC_DISPATCH return TypeDefault::hann_window(window_length, options); #else globalLegacyTypeDispatch().initForTensorTypeSet(at::detail::multi_dispatch_tensor_type_set(options)); static auto table = globalATenDispatch().getOpTable("aten::hann_window(int window_length, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor"); return table->callUnboxed(window_length, options); #endif } static inline Tensor hann_window(int64_t window_length, bool periodic, const TensorOptions & options) { #ifdef USE_STATIC_DISPATCH return TypeDefault::hann_window(window_length, periodic, options); #else globalLegacyTypeDispatch().initForTensorTypeSet(at::detail::multi_dispatch_tensor_type_set(options)); static auto table = globalATenDispatch().getOpTable("aten::hann_window.periodic(int window_length, bool periodic, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor"); return table->callUnboxed(window_length, periodic, options); #endif } static inline Tensor hamming_window(int64_t window_length, const TensorOptions & options) { #ifdef USE_STATIC_DISPATCH return TypeDefault::hamming_window(window_length, options); #else globalLegacyTypeDispatch().initForTensorTypeSet(at::detail::multi_dispatch_tensor_type_set(options)); static auto table = globalATenDispatch().getOpTable("aten::hamming_window(int window_length, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor"); return table->callUnboxed(window_length, options); #endif } static inline Tensor hamming_window(int64_t window_length, bool periodic, const TensorOptions & options) { #ifdef USE_STATIC_DISPATCH return TypeDefault::hamming_window(window_length, periodic, options); #else globalLegacyTypeDispatch().initForTensorTypeSet(at::detail::multi_dispatch_tensor_type_set(options)); static auto table = globalATenDispatch().getOpTable("aten::hamming_window.periodic(int window_length, bool periodic, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor"); return table->callUnboxed(window_length, periodic, options); #endif } static inline Tensor hamming_window(int64_t window_length, bool periodic, double alpha, const TensorOptions & options) { #ifdef USE_STATIC_DISPATCH return TypeDefault::hamming_window(window_length, periodic, alpha, options); #else globalLegacyTypeDispatch().initForTensorTypeSet(at::detail::multi_dispatch_tensor_type_set(options)); static auto table = globalATenDispatch().getOpTable("aten::hamming_window.periodic_alpha(int window_length, bool periodic, float alpha, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor"); return table->callUnboxed(window_length, periodic, alpha, options); #endif } static inline Tensor hamming_window(int64_t window_length, bool periodic, double alpha, double beta, const TensorOptions & options) { #ifdef USE_STATIC_DISPATCH return TypeDefault::hamming_window(window_length, periodic, alpha, beta, options); #else globalLegacyTypeDispatch().initForTensorTypeSet(at::detail::multi_dispatch_tensor_type_set(options)); static auto table = globalATenDispatch().getOpTable("aten::hamming_window.periodic_alpha_beta(int window_length, bool periodic, float alpha, float beta, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor"); return table->callUnboxed(window_length, periodic, alpha, beta, options); #endif } static inline Tensor hinge_embedding_loss(const Tensor & self, const Tensor & target, double margin, int64_t reduction) { #ifdef USE_STATIC_DISPATCH return TypeDefault::hinge_embedding_loss(self, target, margin, reduction); #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::hinge_embedding_loss", ""}).value(); return c10::Dispatcher::singleton().callUnboxed( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self, target)), self, target, margin, reduction); #endif } static inline Tensor ger(const Tensor & self, const Tensor & vec2) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self, vec2)))) { case Backend::CPU: return CPUType::ger(self, vec2); break; default: AT_ERROR("ger not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(self, vec2))); } #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::ger", ""}).value(); return c10::Dispatcher::singleton().callUnboxed( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self, vec2)), self, vec2); #endif } static inline Tensor & ger_out(Tensor & out, const Tensor & self, const Tensor & vec2) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(out, self, vec2)))) { case Backend::CPU: return CPUType::ger_out(out, self, vec2); break; default: AT_ERROR("ger_out not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(out, self, vec2))); } #else static auto table = globalATenDispatch().getOpTable("aten::ger.out(Tensor self, Tensor vec2, *, Tensor(a!) out) -> Tensor(a!)"); return table->callUnboxed(out, self, vec2); #endif } static inline Tensor group_norm(const Tensor & input, int64_t num_groups, const Tensor & weight, const Tensor & bias, double eps, bool cudnn_enabled) { #ifdef USE_STATIC_DISPATCH return TypeDefault::group_norm(input, num_groups, weight, bias, eps, cudnn_enabled); #else static auto table = globalATenDispatch().getOpTable("aten::group_norm(Tensor input, int num_groups, Tensor? weight=None, Tensor? bias=None, float eps=1e-05, bool cudnn_enabled=True) -> Tensor"); return table->callUnboxed(input, num_groups, weight, bias, eps, cudnn_enabled); #endif } static inline Tensor fft(const Tensor & self, int64_t signal_ndim, bool normalized) { #ifdef USE_STATIC_DISPATCH return TypeDefault::fft(self, signal_ndim, normalized); #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::fft", ""}).value(); return c10::Dispatcher::singleton().callUnboxed( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self)), self, signal_ndim, normalized); #endif } static inline Tensor ifft(const Tensor & self, int64_t signal_ndim, bool normalized) { #ifdef USE_STATIC_DISPATCH return TypeDefault::ifft(self, signal_ndim, normalized); #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::ifft", ""}).value(); return c10::Dispatcher::singleton().callUnboxed( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self)), self, signal_ndim, normalized); #endif } static inline Tensor rfft(const Tensor & self, int64_t signal_ndim, bool normalized, bool onesided) { #ifdef USE_STATIC_DISPATCH return TypeDefault::rfft(self, signal_ndim, normalized, onesided); #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::rfft", ""}).value(); return c10::Dispatcher::singleton().callUnboxed( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self)), self, signal_ndim, normalized, onesided); #endif } static inline Tensor irfft(const Tensor & self, int64_t signal_ndim, bool normalized, bool onesided, IntArrayRef signal_sizes) { #ifdef USE_STATIC_DISPATCH return TypeDefault::irfft(self, signal_ndim, normalized, onesided, signal_sizes); #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::irfft", ""}).value(); return c10::Dispatcher::singleton().callUnboxedOnly( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self)), self, signal_ndim, normalized, onesided, signal_sizes); #endif } static inline Tensor _fft_with_size(const Tensor & self, int64_t signal_ndim, bool complex_input, bool complex_output, bool inverse, IntArrayRef checked_signal_sizes, bool normalized, bool onesided, IntArrayRef output_sizes) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self)))) { case Backend::CPU: return CPUType::_fft_with_size(self, signal_ndim, complex_input, complex_output, inverse, checked_signal_sizes, normalized, onesided, output_sizes); break; default: AT_ERROR("_fft_with_size not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(self))); } #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::_fft_with_size", ""}).value(); return c10::Dispatcher::singleton().callUnboxedOnly( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self)), self, signal_ndim, complex_input, complex_output, inverse, checked_signal_sizes, normalized, onesided, output_sizes); #endif } static inline int64_t _cufft_get_plan_cache_size(int64_t device_index) { #ifdef USE_STATIC_DISPATCH return TypeDefault::_cufft_get_plan_cache_size(device_index); #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::_cufft_get_plan_cache_size", ""}).value(); return c10::Dispatcher::singleton().callUnboxed( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set()), device_index); #endif } static inline int64_t _cufft_get_plan_cache_max_size(int64_t device_index) { #ifdef USE_STATIC_DISPATCH return TypeDefault::_cufft_get_plan_cache_max_size(device_index); #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::_cufft_get_plan_cache_max_size", ""}).value(); return c10::Dispatcher::singleton().callUnboxed( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set()), device_index); #endif } static inline void _cufft_set_plan_cache_max_size(int64_t device_index, int64_t max_size) { #ifdef USE_STATIC_DISPATCH TypeDefault::_cufft_set_plan_cache_max_size(device_index, max_size); #else static auto table = globalATenDispatch().getOpTable("aten::_cufft_set_plan_cache_max_size(int device_index, int max_size) -> void"); return table->callUnboxed(device_index, max_size); #endif } static inline void _cufft_clear_plan_cache(int64_t device_index) { #ifdef USE_STATIC_DISPATCH TypeDefault::_cufft_clear_plan_cache(device_index); #else static auto table = globalATenDispatch().getOpTable("aten::_cufft_clear_plan_cache(int device_index) -> void"); return table->callUnboxed(device_index); #endif } static inline Tensor index(const Tensor & self, TensorList indices) { #ifdef USE_STATIC_DISPATCH return TypeDefault::index(self, indices); #else static auto table = globalATenDispatch().getOpTable("aten::index.Tensor(Tensor self, Tensor?[] indices) -> Tensor"); return table->callUnboxed(self, indices); #endif } static inline Tensor index_copy(const Tensor & self, int64_t dim, const Tensor & index, const Tensor & source) { #ifdef USE_STATIC_DISPATCH return TypeDefault::index_copy(self, dim, index, source); #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::index_copy", ""}).value(); return c10::Dispatcher::singleton().callUnboxed( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self, index, source)), self, dim, index, source); #endif } #ifdef BUILD_NAMEDTENSOR static inline Tensor index_copy(const Tensor & self, Dimname dim, const Tensor & index, const Tensor & source) { #ifdef USE_STATIC_DISPATCH return TypeDefault::index_copy(self, dim, index, source); #else static auto table = globalATenDispatch().getOpTable("aten::index_copy.dimname(Tensor self, Dimname dim, Tensor index, Tensor source) -> Tensor"); return table->callUnboxed(self, dim, index, source); #endif } #endif static inline Tensor & index_put_(Tensor & self, TensorList indices, const Tensor & values, bool accumulate) { #ifdef USE_STATIC_DISPATCH return TypeDefault::index_put_(self, indices, values, accumulate); #else static auto table = globalATenDispatch().getOpTable("aten::index_put_(Tensor(a!) self, Tensor?[] indices, Tensor values, bool accumulate=False) -> Tensor(a!)"); return table->callUnboxed(self, indices, values, accumulate); #endif } static inline Tensor index_put(const Tensor & self, TensorList indices, const Tensor & values, bool accumulate) { #ifdef USE_STATIC_DISPATCH return TypeDefault::index_put(self, indices, values, accumulate); #else static auto table = globalATenDispatch().getOpTable("aten::index_put(Tensor self, Tensor?[] indices, Tensor values, bool accumulate=False) -> Tensor"); return table->callUnboxed(self, indices, values, accumulate); #endif } static inline Tensor & _index_put_impl_(Tensor & self, TensorList indices, const Tensor & values, bool accumulate, bool unsafe) { #ifdef USE_STATIC_DISPATCH return TypeDefault::_index_put_impl_(self, indices, values, accumulate, unsafe); #else static auto table = globalATenDispatch().getOpTable("aten::_index_put_impl_(Tensor(a!) self, Tensor?[] indices, Tensor values, bool accumulate=False, bool unsafe=False) -> Tensor(a!)"); return table->callUnboxed(self, indices, values, accumulate, unsafe); #endif } static inline Tensor instance_norm(const Tensor & input, const Tensor & weight, const Tensor & bias, const Tensor & running_mean, const Tensor & running_var, bool use_input_stats, double momentum, double eps, bool cudnn_enabled) { #ifdef USE_STATIC_DISPATCH return TypeDefault::instance_norm(input, weight, bias, running_mean, running_var, use_input_stats, momentum, eps, cudnn_enabled); #else static auto table = globalATenDispatch().getOpTable("aten::instance_norm(Tensor input, Tensor? weight, Tensor? bias, Tensor? running_mean, Tensor? running_var, bool use_input_stats, float momentum, float eps, bool cudnn_enabled) -> Tensor"); return table->callUnboxed(input, weight, bias, running_mean, running_var, use_input_stats, momentum, eps, cudnn_enabled); #endif } static inline Tensor inverse(const Tensor & self) { #ifdef USE_STATIC_DISPATCH return TypeDefault::inverse(self); #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::inverse", ""}).value(); return c10::Dispatcher::singleton().callUnboxed( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self)), self); #endif } static inline Tensor & inverse_out(Tensor & out, const Tensor & self) { #ifdef USE_STATIC_DISPATCH return TypeDefault::inverse_out(out, self); #else static auto table = globalATenDispatch().getOpTable("aten::inverse.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)"); return table->callUnboxed(out, self); #endif } static inline Tensor _inverse_helper(const Tensor & self) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self)))) { case Backend::CPU: return CPUType::_inverse_helper(self); break; default: AT_ERROR("_inverse_helper not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(self))); } #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::_inverse_helper", ""}).value(); return c10::Dispatcher::singleton().callUnboxed( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self)), self); #endif } static inline Tensor isclose(const Tensor & self, const Tensor & other, double rtol, double atol, bool equal_nan) { #ifdef USE_STATIC_DISPATCH return TypeDefault::isclose(self, other, rtol, atol, equal_nan); #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::isclose", ""}).value(); return c10::Dispatcher::singleton().callUnboxed( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self, other)), self, other, rtol, atol, equal_nan); #endif } static inline Tensor isnan(const Tensor & self) { #ifdef USE_STATIC_DISPATCH return TypeDefault::isnan(self); #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::isnan", ""}).value(); return c10::Dispatcher::singleton().callUnboxed( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self)), self); #endif } static inline bool is_distributed(const Tensor & self) { #ifdef USE_STATIC_DISPATCH return TypeDefault::is_distributed(self); #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::is_distributed", ""}).value(); return c10::Dispatcher::singleton().callUnboxed( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self)), self); #endif } static inline bool is_floating_point(const Tensor & self) { #ifdef USE_STATIC_DISPATCH return TypeDefault::is_floating_point(self); #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::is_floating_point", ""}).value(); return c10::Dispatcher::singleton().callUnboxed( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self)), self); #endif } static inline bool is_complex(const Tensor & self) { #ifdef USE_STATIC_DISPATCH return TypeDefault::is_complex(self); #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::is_complex", ""}).value(); return c10::Dispatcher::singleton().callUnboxed( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self)), self); #endif } static inline bool is_nonzero(const Tensor & self) { #ifdef USE_STATIC_DISPATCH return TypeDefault::is_nonzero(self); #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::is_nonzero", ""}).value(); return c10::Dispatcher::singleton().callUnboxed( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self)), self); #endif } static inline bool is_same_size(const Tensor & self, const Tensor & other) { #ifdef USE_STATIC_DISPATCH return TypeDefault::is_same_size(self, other); #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::is_same_size", ""}).value(); return c10::Dispatcher::singleton().callUnboxed( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self, other)), self, other); #endif } static inline bool is_signed(const Tensor & self) { #ifdef USE_STATIC_DISPATCH return TypeDefault::is_signed(self); #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::is_signed", ""}).value(); return c10::Dispatcher::singleton().callUnboxed( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self)), self); #endif } static inline Tensor kl_div(const Tensor & self, const Tensor & target, int64_t reduction) { #ifdef USE_STATIC_DISPATCH return TypeDefault::kl_div(self, target, reduction); #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::kl_div", ""}).value(); return c10::Dispatcher::singleton().callUnboxed( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self, target)), self, target, reduction); #endif } static inline Tensor kl_div_backward(const Tensor & grad_output, const Tensor & self, const Tensor & target, int64_t reduction) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(grad_output, self, target)))) { case Backend::CPU: return CPUType::kl_div_backward(grad_output, self, target, reduction); break; default: AT_ERROR("kl_div_backward not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(grad_output, self, target))); } #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::kl_div_backward", ""}).value(); return c10::Dispatcher::singleton().callUnboxed( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(grad_output, self, target)), grad_output, self, target, reduction); #endif } static inline std::tuple kthvalue(const Tensor & self, int64_t k, int64_t dim, bool keepdim) { #ifdef USE_STATIC_DISPATCH return TypeDefault::kthvalue(self, k, dim, keepdim); #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::kthvalue", ""}).value(); return c10::Dispatcher::singleton().callUnboxedOnly, const Tensor &, int64_t, int64_t, bool>( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self)), self, k, dim, keepdim); #endif } static inline std::tuple kthvalue_out(Tensor & values, Tensor & indices, const Tensor & self, int64_t k, int64_t dim, bool keepdim) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(values, indices, self)))) { case Backend::CPU: return CPUType::kthvalue_out(values, indices, self, k, dim, keepdim); break; default: AT_ERROR("kthvalue_out not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(values, indices, self))); } #else static auto table = globalATenDispatch().getOpTable("aten::kthvalue.values(Tensor self, int k, int dim=-1, bool keepdim=False, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices)"); return table->callUnboxed, Tensor &, Tensor &, const Tensor &, int64_t, int64_t, bool>(values, indices, self, k, dim, keepdim); #endif } #ifdef BUILD_NAMEDTENSOR static inline std::tuple kthvalue(const Tensor & self, int64_t k, Dimname dim, bool keepdim) { #ifdef USE_STATIC_DISPATCH return TypeDefault::kthvalue(self, k, dim, keepdim); #else static auto table = globalATenDispatch().getOpTable("aten::kthvalue.dimname(Tensor self, int k, Dimname dim, bool keepdim=False) -> (Tensor values, Tensor indices)"); return table->callUnboxed, const Tensor &, int64_t, Dimname, bool>(self, k, dim, keepdim); #endif } #endif #ifdef BUILD_NAMEDTENSOR static inline std::tuple kthvalue_out(Tensor & values, Tensor & indices, const Tensor & self, int64_t k, Dimname dim, bool keepdim) { #ifdef USE_STATIC_DISPATCH return TypeDefault::kthvalue_out(values, indices, self, k, dim, keepdim); #else static auto table = globalATenDispatch().getOpTable("aten::kthvalue.dimname_out(Tensor self, int k, Dimname dim, bool keepdim=False, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices)"); return table->callUnboxed, Tensor &, Tensor &, const Tensor &, int64_t, Dimname, bool>(values, indices, self, k, dim, keepdim); #endif } #endif static inline Tensor layer_norm(const Tensor & input, IntArrayRef normalized_shape, const Tensor & weight, const Tensor & bias, double eps, bool cudnn_enable) { #ifdef USE_STATIC_DISPATCH return TypeDefault::layer_norm(input, normalized_shape, weight, bias, eps, cudnn_enable); #else static auto table = globalATenDispatch().getOpTable("aten::layer_norm(Tensor input, int[] normalized_shape, Tensor? weight=None, Tensor? bias=None, float eps=1e-05, bool cudnn_enable=True) -> Tensor"); return table->callUnboxed(input, normalized_shape, weight, bias, eps, cudnn_enable); #endif } static inline std::tuple native_layer_norm(const Tensor & input, const Tensor & weight, const Tensor & bias, int64_t M, int64_t N, double eps) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(input, weight, bias)))) { case Backend::CPU: return CPUType::native_layer_norm(input, weight, bias, M, N, eps); break; default: AT_ERROR("native_layer_norm not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(input, weight, bias))); } #else static auto table = globalATenDispatch().getOpTable("aten::native_layer_norm(Tensor input, Tensor? weight, Tensor? bias, int M, int N, float eps) -> (Tensor, Tensor, Tensor)"); return table->callUnboxed, const Tensor &, const Tensor &, const Tensor &, int64_t, int64_t, double>(input, weight, bias, M, N, eps); #endif } static inline std::tuple native_layer_norm_backward(const Tensor & grad_out, const Tensor & input, const Tensor & mean, const Tensor & rstd, const Tensor & weight, int64_t M, int64_t N, std::array output_mask) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(grad_out, input, mean, rstd, weight)))) { case Backend::CPU: return CPUType::native_layer_norm_backward(grad_out, input, mean, rstd, weight, M, N, output_mask); break; default: AT_ERROR("native_layer_norm_backward not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(grad_out, input, mean, rstd, weight))); } #else static auto table = globalATenDispatch().getOpTable("aten::native_layer_norm_backward(Tensor grad_out, Tensor input, Tensor mean, Tensor rstd, Tensor? weight, int M, int N, bool[3] output_mask) -> (Tensor, Tensor, Tensor)"); return table->callUnboxed, const Tensor &, const Tensor &, const Tensor &, const Tensor &, const Tensor &, int64_t, int64_t, std::array>(grad_out, input, mean, rstd, weight, M, N, output_mask); #endif } static inline std::tuple native_layer_norm_double_backward(const Tensor & ggI, const Tensor & ggW, const Tensor & ggb, const Tensor & gO, const Tensor & input, const Tensor & mean, const Tensor & rstd, const Tensor & weight, int64_t M, int64_t N, std::array output_mask) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(ggI, ggW, ggb, gO, input, mean, rstd, weight)))) { case Backend::CPU: return CPUType::native_layer_norm_double_backward(ggI, ggW, ggb, gO, input, mean, rstd, weight, M, N, output_mask); break; default: AT_ERROR("native_layer_norm_double_backward not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(ggI, ggW, ggb, gO, input, mean, rstd, weight))); } #else static auto table = globalATenDispatch().getOpTable("aten::native_layer_norm_double_backward(Tensor? ggI, Tensor? ggW, Tensor? ggb, Tensor gO, Tensor input, Tensor mean, Tensor rstd, Tensor? weight, int M, int N, bool[3] output_mask) -> (Tensor, Tensor, Tensor)"); return table->callUnboxed, const Tensor &, const Tensor &, const Tensor &, const Tensor &, const Tensor &, const Tensor &, const Tensor &, const Tensor &, int64_t, int64_t, std::array>(ggI, ggW, ggb, gO, input, mean, rstd, weight, M, N, output_mask); #endif } static inline Tensor linear(const Tensor & input, const Tensor & weight, const Tensor & bias) { #ifdef USE_STATIC_DISPATCH return TypeDefault::linear(input, weight, bias); #else static auto table = globalATenDispatch().getOpTable("aten::linear(Tensor input, Tensor weight, Tensor? bias=None) -> Tensor"); return table->callUnboxed(input, weight, bias); #endif } static inline Tensor mkldnn_linear(const Tensor & input, const Tensor & weight, const Tensor & bias) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(input, weight, bias)))) { default: AT_ERROR("mkldnn_linear not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(input, weight, bias))); } #else static auto table = globalATenDispatch().getOpTable("aten::mkldnn_linear(Tensor input, Tensor weight, Tensor? bias=None) -> Tensor"); return table->callUnboxed(input, weight, bias); #endif } static inline Tensor fbgemm_linear_int8_weight_fp32_activation(const Tensor & input, const Tensor & weight, const Tensor & packed, const Tensor & col_offsets, Scalar weight_scale, Scalar weight_zero_point, const Tensor & bias) { #ifdef USE_STATIC_DISPATCH return TypeDefault::fbgemm_linear_int8_weight_fp32_activation(input, weight, packed, col_offsets, weight_scale, weight_zero_point, bias); #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::fbgemm_linear_int8_weight_fp32_activation", ""}).value(); return c10::Dispatcher::singleton().callUnboxedOnly( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(input, weight, packed, col_offsets, bias)), input, weight, packed, col_offsets, weight_scale, weight_zero_point, bias); #endif } static inline Tensor fbgemm_linear_int8_weight(const Tensor & input, const Tensor & weight, const Tensor & packed, const Tensor & col_offsets, Scalar weight_scale, Scalar weight_zero_point, const Tensor & bias) { #ifdef USE_STATIC_DISPATCH return TypeDefault::fbgemm_linear_int8_weight(input, weight, packed, col_offsets, weight_scale, weight_zero_point, bias); #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::fbgemm_linear_int8_weight", ""}).value(); return c10::Dispatcher::singleton().callUnboxed( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(input, weight, packed, col_offsets, bias)), input, weight, packed, col_offsets, weight_scale, weight_zero_point, bias); #endif } static inline std::tuple fbgemm_linear_quantize_weight(const Tensor & input) { #ifdef USE_STATIC_DISPATCH return TypeDefault::fbgemm_linear_quantize_weight(input); #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::fbgemm_linear_quantize_weight", ""}).value(); return c10::Dispatcher::singleton().callUnboxedOnly, const Tensor &>( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(input)), input); #endif } static inline Tensor fbgemm_pack_gemm_matrix_fp16(const Tensor & input) { #ifdef USE_STATIC_DISPATCH return TypeDefault::fbgemm_pack_gemm_matrix_fp16(input); #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::fbgemm_pack_gemm_matrix_fp16", ""}).value(); return c10::Dispatcher::singleton().callUnboxed( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(input)), input); #endif } static inline Tensor fbgemm_linear_fp16_weight_fp32_activation(const Tensor & input, const Tensor & packed_weight, const Tensor & bias) { #ifdef USE_STATIC_DISPATCH return TypeDefault::fbgemm_linear_fp16_weight_fp32_activation(input, packed_weight, bias); #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::fbgemm_linear_fp16_weight_fp32_activation", ""}).value(); return c10::Dispatcher::singleton().callUnboxedOnly( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(input, packed_weight, bias)), input, packed_weight, bias); #endif } static inline Tensor fbgemm_linear_fp16_weight(const Tensor & input, const Tensor & packed_weight, const Tensor & bias) { #ifdef USE_STATIC_DISPATCH return TypeDefault::fbgemm_linear_fp16_weight(input, packed_weight, bias); #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::fbgemm_linear_fp16_weight", ""}).value(); return c10::Dispatcher::singleton().callUnboxed( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(input, packed_weight, bias)), input, packed_weight, bias); #endif } static inline Tensor fbgemm_pack_quantized_matrix(const Tensor & input) { #ifdef USE_STATIC_DISPATCH return TypeDefault::fbgemm_pack_quantized_matrix(input); #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::fbgemm_pack_quantized_matrix", ""}).value(); return c10::Dispatcher::singleton().callUnboxed( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(input)), input); #endif } static inline Tensor fbgemm_pack_quantized_matrix(const Tensor & input, int64_t K, int64_t N) { #ifdef USE_STATIC_DISPATCH return TypeDefault::fbgemm_pack_quantized_matrix(input, K, N); #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::fbgemm_pack_quantized_matrix", "KN"}).value(); return c10::Dispatcher::singleton().callUnboxed( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(input)), input, K, N); #endif } static inline Tensor linspace(Scalar start, Scalar end, int64_t steps, const TensorOptions & options) { #ifdef USE_STATIC_DISPATCH return TypeDefault::linspace(start, end, steps, options); #else globalLegacyTypeDispatch().initForTensorTypeSet(at::detail::multi_dispatch_tensor_type_set(options)); static auto table = globalATenDispatch().getOpTable("aten::linspace(Scalar start, Scalar end, int steps=100, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor"); return table->callUnboxed(start, end, steps, options); #endif } static inline Tensor & linspace_out(Tensor & out, Scalar start, Scalar end, int64_t steps) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(out)))) { case Backend::CPU: return CPUType::linspace_out(out, start, end, steps); break; default: AT_ERROR("linspace_out not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(out))); } #else static auto table = globalATenDispatch().getOpTable("aten::linspace.out(Scalar start, Scalar end, int steps=100, *, Tensor(a!) out) -> Tensor(a!)"); return table->callUnboxed(out, start, end, steps); #endif } static inline Tensor log(const Tensor & self) { #ifdef USE_STATIC_DISPATCH return TypeDefault::log(self); #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::log", ""}).value(); return c10::Dispatcher::singleton().callUnboxed( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self)), self); #endif } static inline Tensor & log_(Tensor & self) { #ifdef USE_STATIC_DISPATCH return TypeDefault::log_(self); #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::log_", ""}).value(); return c10::Dispatcher::singleton().callUnboxedOnly( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self)), self); #endif } static inline Tensor & log_out(Tensor & out, const Tensor & self) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(out, self)))) { case Backend::CPU: return CPUType::log_out(out, self); break; default: AT_ERROR("log_out not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(out, self))); } #else static auto table = globalATenDispatch().getOpTable("aten::log.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)"); return table->callUnboxed(out, self); #endif } static inline Tensor log10(const Tensor & self) { #ifdef USE_STATIC_DISPATCH return TypeDefault::log10(self); #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::log10", ""}).value(); return c10::Dispatcher::singleton().callUnboxed( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self)), self); #endif } static inline Tensor & log10_(Tensor & self) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self)))) { case Backend::CPU: return CPUType::log10_(self); break; default: AT_ERROR("log10_ not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(self))); } #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::log10_", ""}).value(); return c10::Dispatcher::singleton().callUnboxedOnly( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self)), self); #endif } static inline Tensor & log10_out(Tensor & out, const Tensor & self) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(out, self)))) { case Backend::CPU: return CPUType::log10_out(out, self); break; default: AT_ERROR("log10_out not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(out, self))); } #else static auto table = globalATenDispatch().getOpTable("aten::log10.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)"); return table->callUnboxed(out, self); #endif } static inline Tensor log1p(const Tensor & self) { #ifdef USE_STATIC_DISPATCH return TypeDefault::log1p(self); #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::log1p", ""}).value(); return c10::Dispatcher::singleton().callUnboxed( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self)), self); #endif } static inline Tensor & log1p_(Tensor & self) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self)))) { case Backend::CPU: return CPUType::log1p_(self); break; case Backend::SparseCPU: return SparseCPUType::log1p_(self); break; default: AT_ERROR("log1p_ not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(self))); } #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::log1p_", ""}).value(); return c10::Dispatcher::singleton().callUnboxedOnly( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self)), self); #endif } static inline Tensor & log1p_out(Tensor & out, const Tensor & self) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(out, self)))) { case Backend::CPU: return CPUType::log1p_out(out, self); break; case Backend::SparseCPU: return SparseCPUType::log1p_out(out, self); break; default: AT_ERROR("log1p_out not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(out, self))); } #else static auto table = globalATenDispatch().getOpTable("aten::log1p.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)"); return table->callUnboxed(out, self); #endif } static inline Tensor log2(const Tensor & self) { #ifdef USE_STATIC_DISPATCH return TypeDefault::log2(self); #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::log2", ""}).value(); return c10::Dispatcher::singleton().callUnboxed( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self)), self); #endif } static inline Tensor & log2_(Tensor & self) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self)))) { case Backend::CPU: return CPUType::log2_(self); break; default: AT_ERROR("log2_ not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(self))); } #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::log2_", ""}).value(); return c10::Dispatcher::singleton().callUnboxedOnly( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self)), self); #endif } static inline Tensor & log2_out(Tensor & out, const Tensor & self) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(out, self)))) { case Backend::CPU: return CPUType::log2_out(out, self); break; default: AT_ERROR("log2_out not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(out, self))); } #else static auto table = globalATenDispatch().getOpTable("aten::log2.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)"); return table->callUnboxed(out, self); #endif } static inline Tensor logdet(const Tensor & self) { #ifdef USE_STATIC_DISPATCH return TypeDefault::logdet(self); #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::logdet", ""}).value(); return c10::Dispatcher::singleton().callUnboxed( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self)), self); #endif } static inline Tensor logspace(Scalar start, Scalar end, int64_t steps, double base, const TensorOptions & options) { #ifdef USE_STATIC_DISPATCH return TypeDefault::logspace(start, end, steps, base, options); #else globalLegacyTypeDispatch().initForTensorTypeSet(at::detail::multi_dispatch_tensor_type_set(options)); static auto table = globalATenDispatch().getOpTable("aten::logspace(Scalar start, Scalar end, int steps=100, float base=10.0, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor"); return table->callUnboxed(start, end, steps, base, options); #endif } static inline Tensor & logspace_out(Tensor & out, Scalar start, Scalar end, int64_t steps, double base) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(out)))) { case Backend::CPU: return CPUType::logspace_out(out, start, end, steps, base); break; default: AT_ERROR("logspace_out not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(out))); } #else static auto table = globalATenDispatch().getOpTable("aten::logspace.out(Scalar start, Scalar end, int steps=100, float base=10.0, *, Tensor(a!) out) -> Tensor(a!)"); return table->callUnboxed(out, start, end, steps, base); #endif } static inline Tensor log_softmax(const Tensor & self, int64_t dim, c10::optional dtype) { #ifdef USE_STATIC_DISPATCH return TypeDefault::log_softmax(self, dim, dtype); #else static auto table = globalATenDispatch().getOpTable("aten::log_softmax(Tensor self, int dim, ScalarType? dtype=None) -> Tensor"); return table->callUnboxed>(self, dim, dtype); #endif } #ifdef BUILD_NAMEDTENSOR static inline Tensor log_softmax(const Tensor & self, Dimname dim, c10::optional dtype) { #ifdef USE_STATIC_DISPATCH return TypeDefault::log_softmax(self, dim, dtype); #else static auto table = globalATenDispatch().getOpTable("aten::log_softmax(Tensor self, Dimname dim, *, ScalarType? dtype=None) -> Tensor"); return table->callUnboxed>(self, dim, dtype); #endif } #endif static inline Tensor _log_softmax(const Tensor & self, int64_t dim, bool half_to_float) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self)))) { case Backend::CPU: return CPUType::_log_softmax(self, dim, half_to_float); break; default: AT_ERROR("_log_softmax not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(self))); } #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::_log_softmax", ""}).value(); return c10::Dispatcher::singleton().callUnboxed( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self)), self, dim, half_to_float); #endif } static inline Tensor _log_softmax_backward_data(const Tensor & grad_output, const Tensor & output, int64_t dim, const Tensor & self) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(grad_output, output, self)))) { case Backend::CPU: return CPUType::_log_softmax_backward_data(grad_output, output, dim, self); break; default: AT_ERROR("_log_softmax_backward_data not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(grad_output, output, self))); } #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::_log_softmax_backward_data", ""}).value(); return c10::Dispatcher::singleton().callUnboxed( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(grad_output, output, self)), grad_output, output, dim, self); #endif } static inline Tensor logsumexp(const Tensor & self, IntArrayRef dim, bool keepdim) { #ifdef USE_STATIC_DISPATCH return TypeDefault::logsumexp(self, dim, keepdim); #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::logsumexp", ""}).value(); return c10::Dispatcher::singleton().callUnboxedOnly( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self)), self, dim, keepdim); #endif } static inline Tensor & logsumexp_out(Tensor & out, const Tensor & self, IntArrayRef dim, bool keepdim) { #ifdef USE_STATIC_DISPATCH return TypeDefault::logsumexp_out(out, self, dim, keepdim); #else static auto table = globalATenDispatch().getOpTable("aten::logsumexp.out(Tensor self, int[1] dim, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)"); return table->callUnboxed(out, self, dim, keepdim); #endif } #ifdef BUILD_NAMEDTENSOR static inline Tensor logsumexp(const Tensor & self, DimnameList dim, bool keepdim) { #ifdef USE_STATIC_DISPATCH return TypeDefault::logsumexp(self, dim, keepdim); #else static auto table = globalATenDispatch().getOpTable("aten::logsumexp.names(Tensor self, Dimname[1] dim, bool keepdim=False) -> Tensor"); return table->callUnboxed(self, dim, keepdim); #endif } #endif #ifdef BUILD_NAMEDTENSOR static inline Tensor & logsumexp_out(Tensor & out, const Tensor & self, DimnameList dim, bool keepdim) { #ifdef USE_STATIC_DISPATCH return TypeDefault::logsumexp_out(out, self, dim, keepdim); #else static auto table = globalATenDispatch().getOpTable("aten::logsumexp.names_out(Tensor self, Dimname[1] dim, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)"); return table->callUnboxed(out, self, dim, keepdim); #endif } #endif static inline Tensor margin_ranking_loss(const Tensor & input1, const Tensor & input2, const Tensor & target, double margin, int64_t reduction) { #ifdef USE_STATIC_DISPATCH return TypeDefault::margin_ranking_loss(input1, input2, target, margin, reduction); #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::margin_ranking_loss", ""}).value(); return c10::Dispatcher::singleton().callUnboxed( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(input1, input2, target)), input1, input2, target, margin, reduction); #endif } static inline Tensor matmul(const Tensor & self, const Tensor & other) { #ifdef USE_STATIC_DISPATCH return TypeDefault::matmul(self, other); #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::matmul", ""}).value(); return c10::Dispatcher::singleton().callUnboxed( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self, other)), self, other); #endif } static inline Tensor & matmul_out(Tensor & out, const Tensor & self, const Tensor & other) { #ifdef USE_STATIC_DISPATCH return TypeDefault::matmul_out(out, self, other); #else static auto table = globalATenDispatch().getOpTable("aten::matmul.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)"); return table->callUnboxed(out, self, other); #endif } static inline Tensor matrix_rank(const Tensor & self, double tol, bool symmetric) { #ifdef USE_STATIC_DISPATCH return TypeDefault::matrix_rank(self, tol, symmetric); #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::matrix_rank", "tol"}).value(); return c10::Dispatcher::singleton().callUnboxed( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self)), self, tol, symmetric); #endif } static inline Tensor matrix_rank(const Tensor & self, bool symmetric) { #ifdef USE_STATIC_DISPATCH return TypeDefault::matrix_rank(self, symmetric); #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::matrix_rank", ""}).value(); return c10::Dispatcher::singleton().callUnboxed( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self)), self, symmetric); #endif } static inline Tensor matrix_power(const Tensor & self, int64_t n) { #ifdef USE_STATIC_DISPATCH return TypeDefault::matrix_power(self, n); #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::matrix_power", ""}).value(); return c10::Dispatcher::singleton().callUnboxed( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self)), self, n); #endif } static inline std::tuple max(const Tensor & self, int64_t dim, bool keepdim) { #ifdef USE_STATIC_DISPATCH return TypeDefault::max(self, dim, keepdim); #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::max", "dim"}).value(); return c10::Dispatcher::singleton().callUnboxedOnly, const Tensor &, int64_t, bool>( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self)), self, dim, keepdim); #endif } static inline std::tuple max_out(Tensor & max, Tensor & max_values, const Tensor & self, int64_t dim, bool keepdim) { #ifdef USE_STATIC_DISPATCH return TypeDefault::max_out(max, max_values, self, dim, keepdim); #else static auto table = globalATenDispatch().getOpTable("aten::max.dim_max(Tensor self, int dim, bool keepdim=False, *, Tensor(a!) max, Tensor(b!) max_values) -> (Tensor(a!) values, Tensor(b!) indices)"); return table->callUnboxed, Tensor &, Tensor &, const Tensor &, int64_t, bool>(max, max_values, self, dim, keepdim); #endif } static inline Tensor max_values(const Tensor & self, IntArrayRef dim, bool keepdim) { #ifdef USE_STATIC_DISPATCH return TypeDefault::max_values(self, dim, keepdim); #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::max_values", ""}).value(); return c10::Dispatcher::singleton().callUnboxedOnly( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self)), self, dim, keepdim); #endif } #ifdef BUILD_NAMEDTENSOR static inline std::tuple max(const Tensor & self, Dimname dim, bool keepdim) { #ifdef USE_STATIC_DISPATCH return TypeDefault::max(self, dim, keepdim); #else static auto table = globalATenDispatch().getOpTable("aten::max.names_dim(Tensor self, Dimname dim, bool keepdim=False) -> (Tensor values, Tensor indices)"); return table->callUnboxed, const Tensor &, Dimname, bool>(self, dim, keepdim); #endif } #endif #ifdef BUILD_NAMEDTENSOR static inline std::tuple max_out(Tensor & max, Tensor & max_values, const Tensor & self, Dimname dim, bool keepdim) { #ifdef USE_STATIC_DISPATCH return TypeDefault::max_out(max, max_values, self, dim, keepdim); #else static auto table = globalATenDispatch().getOpTable("aten::max.names_dim_max(Tensor self, Dimname dim, bool keepdim=False, *, Tensor(a!) max, Tensor(b!) max_values) -> (Tensor(a!) values, Tensor(b!) indices)"); return table->callUnboxed, Tensor &, Tensor &, const Tensor &, Dimname, bool>(max, max_values, self, dim, keepdim); #endif } #endif #ifdef BUILD_NAMEDTENSOR static inline Tensor max_values(const Tensor & self, DimnameList dim, bool keepdim) { #ifdef USE_STATIC_DISPATCH return TypeDefault::max_values(self, dim, keepdim); #else static auto table = globalATenDispatch().getOpTable("aten::max_values.names(Tensor self, Dimname[1] dim, bool keepdim=False) -> Tensor"); return table->callUnboxed(self, dim, keepdim); #endif } #endif static inline std::tuple max_pool1d_with_indices(const Tensor & self, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, bool ceil_mode) { #ifdef USE_STATIC_DISPATCH return TypeDefault::max_pool1d_with_indices(self, kernel_size, stride, padding, dilation, ceil_mode); #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::max_pool1d_with_indices", ""}).value(); return c10::Dispatcher::singleton().callUnboxedOnly, const Tensor &, IntArrayRef, IntArrayRef, IntArrayRef, IntArrayRef, bool>( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self)), self, kernel_size, stride, padding, dilation, ceil_mode); #endif } static inline Tensor max_pool1d(const Tensor & self, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, bool ceil_mode) { #ifdef USE_STATIC_DISPATCH return TypeDefault::max_pool1d(self, kernel_size, stride, padding, dilation, ceil_mode); #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::max_pool1d", ""}).value(); return c10::Dispatcher::singleton().callUnboxedOnly( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self)), self, kernel_size, stride, padding, dilation, ceil_mode); #endif } static inline Tensor max_pool2d(const Tensor & self, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, bool ceil_mode) { #ifdef USE_STATIC_DISPATCH return TypeDefault::max_pool2d(self, kernel_size, stride, padding, dilation, ceil_mode); #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::max_pool2d", ""}).value(); return c10::Dispatcher::singleton().callUnboxedOnly( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self)), self, kernel_size, stride, padding, dilation, ceil_mode); #endif } static inline Tensor mkldnn_max_pool2d(const Tensor & self, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, bool ceil_mode) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self)))) { default: AT_ERROR("mkldnn_max_pool2d not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(self))); } #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::mkldnn_max_pool2d", ""}).value(); return c10::Dispatcher::singleton().callUnboxedOnly( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self)), self, kernel_size, stride, padding, dilation, ceil_mode); #endif } static inline Tensor quantized_max_pool2d(const Tensor & self, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, bool ceil_mode) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self)))) { case Backend::QuantizedCPU: return QuantizedCPUType::quantized_max_pool2d(self, kernel_size, stride, padding, dilation, ceil_mode); break; default: AT_ERROR("quantized_max_pool2d not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(self))); } #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::quantized_max_pool2d", ""}).value(); return c10::Dispatcher::singleton().callUnboxedOnly( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self)), self, kernel_size, stride, padding, dilation, ceil_mode); #endif } static inline Tensor max_pool3d(const Tensor & self, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, bool ceil_mode) { #ifdef USE_STATIC_DISPATCH return TypeDefault::max_pool3d(self, kernel_size, stride, padding, dilation, ceil_mode); #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::max_pool3d", ""}).value(); return c10::Dispatcher::singleton().callUnboxedOnly( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self)), self, kernel_size, stride, padding, dilation, ceil_mode); #endif } static inline Tensor mean(const Tensor & self, c10::optional dtype) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self)))) { case Backend::CPU: return CPUType::mean(self, dtype); break; case Backend::QuantizedCPU: return QuantizedCPUType::mean(self, dtype); break; default: AT_ERROR("mean not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(self))); } #else static auto table = globalATenDispatch().getOpTable("aten::mean(Tensor self, *, ScalarType? dtype=None) -> Tensor"); return table->callUnboxed>(self, dtype); #endif } static inline Tensor mean(const Tensor & self, IntArrayRef dim, bool keepdim, c10::optional dtype) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self)))) { case Backend::CPU: return CPUType::mean(self, dim, keepdim, dtype); break; case Backend::QuantizedCPU: return QuantizedCPUType::mean(self, dim, keepdim, dtype); break; default: AT_ERROR("mean not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(self))); } #else static auto table = globalATenDispatch().getOpTable("aten::mean.dim(Tensor self, int[1] dim, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor"); return table->callUnboxed>(self, dim, keepdim, dtype); #endif } static inline Tensor & mean_out(Tensor & out, const Tensor & self, IntArrayRef dim, bool keepdim, c10::optional dtype) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(out, self)))) { case Backend::CPU: return CPUType::mean_out(out, self, dim, keepdim, dtype); break; case Backend::QuantizedCPU: return QuantizedCPUType::mean_out(out, self, dim, keepdim, dtype); break; default: AT_ERROR("mean_out not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(out, self))); } #else static auto table = globalATenDispatch().getOpTable("aten::mean.out(Tensor self, int[1] dim, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)"); return table->callUnboxed>(out, self, dim, keepdim, dtype); #endif } #ifdef BUILD_NAMEDTENSOR static inline Tensor mean(const Tensor & self, DimnameList dim, bool keepdim, c10::optional dtype) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self)))) { case Backend::CPU: return CPUType::mean(self, dim, keepdim, dtype); break; default: AT_ERROR("mean not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(self))); } #else static auto table = globalATenDispatch().getOpTable("aten::mean.names_dim(Tensor self, Dimname[1] dim, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor"); return table->callUnboxed>(self, dim, keepdim, dtype); #endif } #endif #ifdef BUILD_NAMEDTENSOR static inline Tensor & mean_out(Tensor & out, const Tensor & self, DimnameList dim, bool keepdim, c10::optional dtype) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(out, self)))) { case Backend::CPU: return CPUType::mean_out(out, self, dim, keepdim, dtype); break; default: AT_ERROR("mean_out not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(out, self))); } #else static auto table = globalATenDispatch().getOpTable("aten::mean.names_out(Tensor self, Dimname[1] dim, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)"); return table->callUnboxed>(out, self, dim, keepdim, dtype); #endif } #endif static inline std::tuple median(const Tensor & self, int64_t dim, bool keepdim) { #ifdef USE_STATIC_DISPATCH return TypeDefault::median(self, dim, keepdim); #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::median", "dim"}).value(); return c10::Dispatcher::singleton().callUnboxedOnly, const Tensor &, int64_t, bool>( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self)), self, dim, keepdim); #endif } static inline std::tuple median_out(Tensor & values, Tensor & indices, const Tensor & self, int64_t dim, bool keepdim) { #ifdef USE_STATIC_DISPATCH return TypeDefault::median_out(values, indices, self, dim, keepdim); #else static auto table = globalATenDispatch().getOpTable("aten::median.dim_values(Tensor self, int dim, bool keepdim=False, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices)"); return table->callUnboxed, Tensor &, Tensor &, const Tensor &, int64_t, bool>(values, indices, self, dim, keepdim); #endif } #ifdef BUILD_NAMEDTENSOR static inline std::tuple median(const Tensor & self, Dimname dim, bool keepdim) { #ifdef USE_STATIC_DISPATCH return TypeDefault::median(self, dim, keepdim); #else static auto table = globalATenDispatch().getOpTable("aten::median.names_dim(Tensor self, Dimname dim, bool keepdim=False) -> (Tensor values, Tensor indices)"); return table->callUnboxed, const Tensor &, Dimname, bool>(self, dim, keepdim); #endif } #endif #ifdef BUILD_NAMEDTENSOR static inline std::tuple median_out(Tensor & values, Tensor & indices, const Tensor & self, Dimname dim, bool keepdim) { #ifdef USE_STATIC_DISPATCH return TypeDefault::median_out(values, indices, self, dim, keepdim); #else static auto table = globalATenDispatch().getOpTable("aten::median.names_dim_values(Tensor self, Dimname dim, bool keepdim=False, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices)"); return table->callUnboxed, Tensor &, Tensor &, const Tensor &, Dimname, bool>(values, indices, self, dim, keepdim); #endif } #endif static inline std::tuple min(const Tensor & self, int64_t dim, bool keepdim) { #ifdef USE_STATIC_DISPATCH return TypeDefault::min(self, dim, keepdim); #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::min", "dim"}).value(); return c10::Dispatcher::singleton().callUnboxedOnly, const Tensor &, int64_t, bool>( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self)), self, dim, keepdim); #endif } static inline std::tuple min_out(Tensor & min, Tensor & min_indices, const Tensor & self, int64_t dim, bool keepdim) { #ifdef USE_STATIC_DISPATCH return TypeDefault::min_out(min, min_indices, self, dim, keepdim); #else static auto table = globalATenDispatch().getOpTable("aten::min.dim_min(Tensor self, int dim, bool keepdim=False, *, Tensor(a!) min, Tensor(b!) min_indices) -> (Tensor(a!) values, Tensor(b!) indices)"); return table->callUnboxed, Tensor &, Tensor &, const Tensor &, int64_t, bool>(min, min_indices, self, dim, keepdim); #endif } static inline Tensor min_values(const Tensor & self, IntArrayRef dim, bool keepdim) { #ifdef USE_STATIC_DISPATCH return TypeDefault::min_values(self, dim, keepdim); #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::min_values", ""}).value(); return c10::Dispatcher::singleton().callUnboxedOnly( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self)), self, dim, keepdim); #endif } #ifdef BUILD_NAMEDTENSOR static inline std::tuple min(const Tensor & self, Dimname dim, bool keepdim) { #ifdef USE_STATIC_DISPATCH return TypeDefault::min(self, dim, keepdim); #else static auto table = globalATenDispatch().getOpTable("aten::min.names_dim(Tensor self, Dimname dim, bool keepdim=False) -> (Tensor values, Tensor indices)"); return table->callUnboxed, const Tensor &, Dimname, bool>(self, dim, keepdim); #endif } #endif #ifdef BUILD_NAMEDTENSOR static inline std::tuple min_out(Tensor & min, Tensor & min_indices, const Tensor & self, Dimname dim, bool keepdim) { #ifdef USE_STATIC_DISPATCH return TypeDefault::min_out(min, min_indices, self, dim, keepdim); #else static auto table = globalATenDispatch().getOpTable("aten::min.names_dim_min(Tensor self, Dimname dim, bool keepdim=False, *, Tensor(a!) min, Tensor(b!) min_indices) -> (Tensor(a!) values, Tensor(b!) indices)"); return table->callUnboxed, Tensor &, Tensor &, const Tensor &, Dimname, bool>(min, min_indices, self, dim, keepdim); #endif } #endif #ifdef BUILD_NAMEDTENSOR static inline Tensor min_values(const Tensor & self, DimnameList dim, bool keepdim) { #ifdef USE_STATIC_DISPATCH return TypeDefault::min_values(self, dim, keepdim); #else static auto table = globalATenDispatch().getOpTable("aten::min_values.names(Tensor self, Dimname[1] dim, bool keepdim=False) -> Tensor"); return table->callUnboxed(self, dim, keepdim); #endif } #endif static inline Tensor mkldnn_convolution(const Tensor & self, const Tensor & weight, const Tensor & bias, IntArrayRef padding, IntArrayRef stride, IntArrayRef dilation, int64_t groups) { #ifdef USE_STATIC_DISPATCH return TypeDefault::mkldnn_convolution(self, weight, bias, padding, stride, dilation, groups); #else static auto table = globalATenDispatch().getOpTable("aten::mkldnn_convolution(Tensor self, Tensor weight, Tensor? bias, int[] padding, int[] stride, int[] dilation, int groups) -> Tensor"); return table->callUnboxed(self, weight, bias, padding, stride, dilation, groups); #endif } static inline Tensor mkldnn_convolution_backward_input(IntArrayRef self_size, const Tensor & grad_output, const Tensor & weight, IntArrayRef padding, IntArrayRef stride, IntArrayRef dilation, int64_t groups, bool bias_defined) { #ifdef USE_STATIC_DISPATCH return TypeDefault::mkldnn_convolution_backward_input(self_size, grad_output, weight, padding, stride, dilation, groups, bias_defined); #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::mkldnn_convolution_backward_input", ""}).value(); return c10::Dispatcher::singleton().callUnboxedOnly( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(grad_output, weight)), self_size, grad_output, weight, padding, stride, dilation, groups, bias_defined); #endif } static inline std::tuple mkldnn_convolution_backward_weights(IntArrayRef weight_size, const Tensor & grad_output, const Tensor & self, IntArrayRef padding, IntArrayRef stride, IntArrayRef dilation, int64_t groups, bool bias_defined) { #ifdef USE_STATIC_DISPATCH return TypeDefault::mkldnn_convolution_backward_weights(weight_size, grad_output, self, padding, stride, dilation, groups, bias_defined); #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::mkldnn_convolution_backward_weights", ""}).value(); return c10::Dispatcher::singleton().callUnboxedOnly, IntArrayRef, const Tensor &, const Tensor &, IntArrayRef, IntArrayRef, IntArrayRef, int64_t, bool>( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(grad_output, self)), weight_size, grad_output, self, padding, stride, dilation, groups, bias_defined); #endif } static inline std::tuple mkldnn_convolution_backward(const Tensor & self, const Tensor & grad_output, const Tensor & weight, IntArrayRef padding, IntArrayRef stride, IntArrayRef dilation, int64_t groups, std::array output_mask) { #ifdef USE_STATIC_DISPATCH return TypeDefault::mkldnn_convolution_backward(self, grad_output, weight, padding, stride, dilation, groups, output_mask); #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::mkldnn_convolution_backward", ""}).value(); return c10::Dispatcher::singleton().callUnboxedOnly, const Tensor &, const Tensor &, const Tensor &, IntArrayRef, IntArrayRef, IntArrayRef, int64_t, std::array>( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self, grad_output, weight)), self, grad_output, weight, padding, stride, dilation, groups, output_mask); #endif } static inline std::tuple miopen_batch_norm(const Tensor & input, const Tensor & weight, const Tensor & bias, const Tensor & running_mean, const Tensor & running_var, bool training, double exponential_average_factor, double epsilon) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(input, weight, bias, running_mean, running_var)))) { default: AT_ERROR("miopen_batch_norm not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(input, weight, bias, running_mean, running_var))); } #else static auto table = globalATenDispatch().getOpTable("aten::miopen_batch_norm(Tensor input, Tensor weight, Tensor? bias, Tensor? running_mean, Tensor? running_var, bool training, float exponential_average_factor, float epsilon) -> (Tensor, Tensor, Tensor)"); return table->callUnboxed, const Tensor &, const Tensor &, const Tensor &, const Tensor &, const Tensor &, bool, double, double>(input, weight, bias, running_mean, running_var, training, exponential_average_factor, epsilon); #endif } static inline std::tuple miopen_batch_norm_backward(const Tensor & input, const Tensor & grad_output, const Tensor & weight, const Tensor & running_mean, const Tensor & running_var, const Tensor & save_mean, const Tensor & save_var, double epsilon) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(input, grad_output, weight, running_mean, running_var, save_mean, save_var)))) { default: AT_ERROR("miopen_batch_norm_backward not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(input, grad_output, weight, running_mean, running_var, save_mean, save_var))); } #else static auto table = globalATenDispatch().getOpTable("aten::miopen_batch_norm_backward(Tensor input, Tensor grad_output, Tensor weight, Tensor? running_mean, Tensor? running_var, Tensor? save_mean, Tensor? save_var, float epsilon) -> (Tensor, Tensor, Tensor)"); return table->callUnboxed, const Tensor &, const Tensor &, const Tensor &, const Tensor &, const Tensor &, const Tensor &, const Tensor &, double>(input, grad_output, weight, running_mean, running_var, save_mean, save_var, epsilon); #endif } static inline Tensor miopen_convolution(const Tensor & self, const Tensor & weight, const Tensor & bias, IntArrayRef padding, IntArrayRef stride, IntArrayRef dilation, int64_t groups, bool benchmark, bool deterministic) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self, weight, bias)))) { default: AT_ERROR("miopen_convolution not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(self, weight, bias))); } #else static auto table = globalATenDispatch().getOpTable("aten::miopen_convolution(Tensor self, Tensor weight, Tensor? bias, int[] padding, int[] stride, int[] dilation, int groups, bool benchmark, bool deterministic) -> Tensor"); return table->callUnboxed(self, weight, bias, padding, stride, dilation, groups, benchmark, deterministic); #endif } static inline Tensor miopen_convolution_backward_input(IntArrayRef self_size, const Tensor & grad_output, const Tensor & weight, IntArrayRef padding, IntArrayRef stride, IntArrayRef dilation, int64_t groups, bool benchmark, bool deterministic) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(grad_output, weight)))) { default: AT_ERROR("miopen_convolution_backward_input not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(grad_output, weight))); } #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::miopen_convolution_backward_input", ""}).value(); return c10::Dispatcher::singleton().callUnboxedOnly( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(grad_output, weight)), self_size, grad_output, weight, padding, stride, dilation, groups, benchmark, deterministic); #endif } static inline std::tuple miopen_convolution_backward(const Tensor & self, const Tensor & grad_output, const Tensor & weight, IntArrayRef padding, IntArrayRef stride, IntArrayRef dilation, int64_t groups, bool benchmark, bool deterministic, std::array output_mask) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self, grad_output, weight)))) { default: AT_ERROR("miopen_convolution_backward not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(self, grad_output, weight))); } #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::miopen_convolution_backward", ""}).value(); return c10::Dispatcher::singleton().callUnboxedOnly, const Tensor &, const Tensor &, const Tensor &, IntArrayRef, IntArrayRef, IntArrayRef, int64_t, bool, bool, std::array>( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self, grad_output, weight)), self, grad_output, weight, padding, stride, dilation, groups, benchmark, deterministic, output_mask); #endif } static inline Tensor miopen_convolution_backward_bias(const Tensor & grad_output) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(grad_output)))) { default: AT_ERROR("miopen_convolution_backward_bias not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(grad_output))); } #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::miopen_convolution_backward_bias", ""}).value(); return c10::Dispatcher::singleton().callUnboxed( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(grad_output)), grad_output); #endif } static inline Tensor miopen_convolution_backward_weight(IntArrayRef weight_size, const Tensor & grad_output, const Tensor & self, IntArrayRef padding, IntArrayRef stride, IntArrayRef dilation, int64_t groups, bool benchmark, bool deterministic) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(grad_output, self)))) { default: AT_ERROR("miopen_convolution_backward_weight not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(grad_output, self))); } #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::miopen_convolution_backward_weight", ""}).value(); return c10::Dispatcher::singleton().callUnboxedOnly( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(grad_output, self)), weight_size, grad_output, self, padding, stride, dilation, groups, benchmark, deterministic); #endif } static inline Tensor miopen_convolution_transpose(const Tensor & self, const Tensor & weight, const Tensor & bias, IntArrayRef padding, IntArrayRef output_padding, IntArrayRef stride, IntArrayRef dilation, int64_t groups, bool benchmark, bool deterministic) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self, weight, bias)))) { default: AT_ERROR("miopen_convolution_transpose not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(self, weight, bias))); } #else static auto table = globalATenDispatch().getOpTable("aten::miopen_convolution_transpose(Tensor self, Tensor weight, Tensor? bias, int[] padding, int[] output_padding, int[] stride, int[] dilation, int groups, bool benchmark, bool deterministic) -> Tensor"); return table->callUnboxed(self, weight, bias, padding, output_padding, stride, dilation, groups, benchmark, deterministic); #endif } static inline std::tuple miopen_convolution_transpose_backward(const Tensor & self, const Tensor & grad_output, const Tensor & weight, IntArrayRef padding, IntArrayRef output_padding, IntArrayRef stride, IntArrayRef dilation, int64_t groups, bool benchmark, bool deterministic, std::array output_mask) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self, grad_output, weight)))) { default: AT_ERROR("miopen_convolution_transpose_backward not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(self, grad_output, weight))); } #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::miopen_convolution_transpose_backward", ""}).value(); return c10::Dispatcher::singleton().callUnboxedOnly, const Tensor &, const Tensor &, const Tensor &, IntArrayRef, IntArrayRef, IntArrayRef, IntArrayRef, int64_t, bool, bool, std::array>( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self, grad_output, weight)), self, grad_output, weight, padding, output_padding, stride, dilation, groups, benchmark, deterministic, output_mask); #endif } static inline Tensor miopen_convolution_transpose_backward_input(const Tensor & grad_output, const Tensor & weight, IntArrayRef padding, IntArrayRef stride, IntArrayRef dilation, int64_t groups, bool benchmark, bool deterministic) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(grad_output, weight)))) { default: AT_ERROR("miopen_convolution_transpose_backward_input not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(grad_output, weight))); } #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::miopen_convolution_transpose_backward_input", ""}).value(); return c10::Dispatcher::singleton().callUnboxedOnly( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(grad_output, weight)), grad_output, weight, padding, stride, dilation, groups, benchmark, deterministic); #endif } static inline Tensor miopen_convolution_transpose_backward_weight(IntArrayRef weight_size, const Tensor & grad_output, const Tensor & self, IntArrayRef padding, IntArrayRef stride, IntArrayRef dilation, int64_t groups, bool benchmark, bool deterministic) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(grad_output, self)))) { default: AT_ERROR("miopen_convolution_transpose_backward_weight not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(grad_output, self))); } #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::miopen_convolution_transpose_backward_weight", ""}).value(); return c10::Dispatcher::singleton().callUnboxedOnly( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(grad_output, self)), weight_size, grad_output, self, padding, stride, dilation, groups, benchmark, deterministic); #endif } static inline Tensor miopen_depthwise_convolution(const Tensor & self, const Tensor & weight, const Tensor & bias, IntArrayRef padding, IntArrayRef stride, IntArrayRef dilation, int64_t groups, bool benchmark, bool deterministic) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self, weight, bias)))) { default: AT_ERROR("miopen_depthwise_convolution not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(self, weight, bias))); } #else static auto table = globalATenDispatch().getOpTable("aten::miopen_depthwise_convolution(Tensor self, Tensor weight, Tensor? bias, int[] padding, int[] stride, int[] dilation, int groups, bool benchmark, bool deterministic) -> Tensor"); return table->callUnboxed(self, weight, bias, padding, stride, dilation, groups, benchmark, deterministic); #endif } static inline Tensor miopen_depthwise_convolution_backward_input(IntArrayRef self_size, const Tensor & grad_output, const Tensor & weight, IntArrayRef padding, IntArrayRef stride, IntArrayRef dilation, int64_t groups, bool benchmark, bool deterministic) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(grad_output, weight)))) { default: AT_ERROR("miopen_depthwise_convolution_backward_input not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(grad_output, weight))); } #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::miopen_depthwise_convolution_backward_input", ""}).value(); return c10::Dispatcher::singleton().callUnboxedOnly( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(grad_output, weight)), self_size, grad_output, weight, padding, stride, dilation, groups, benchmark, deterministic); #endif } static inline std::tuple miopen_depthwise_convolution_backward(const Tensor & self, const Tensor & grad_output, const Tensor & weight, IntArrayRef padding, IntArrayRef stride, IntArrayRef dilation, int64_t groups, bool benchmark, bool deterministic, std::array output_mask) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self, grad_output, weight)))) { default: AT_ERROR("miopen_depthwise_convolution_backward not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(self, grad_output, weight))); } #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::miopen_depthwise_convolution_backward", ""}).value(); return c10::Dispatcher::singleton().callUnboxedOnly, const Tensor &, const Tensor &, const Tensor &, IntArrayRef, IntArrayRef, IntArrayRef, int64_t, bool, bool, std::array>( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self, grad_output, weight)), self, grad_output, weight, padding, stride, dilation, groups, benchmark, deterministic, output_mask); #endif } static inline Tensor miopen_depthwise_convolution_backward_weight(IntArrayRef weight_size, const Tensor & grad_output, const Tensor & self, IntArrayRef padding, IntArrayRef stride, IntArrayRef dilation, int64_t groups, bool benchmark, bool deterministic) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(grad_output, self)))) { default: AT_ERROR("miopen_depthwise_convolution_backward_weight not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(grad_output, self))); } #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::miopen_depthwise_convolution_backward_weight", ""}).value(); return c10::Dispatcher::singleton().callUnboxedOnly( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(grad_output, self)), weight_size, grad_output, self, padding, stride, dilation, groups, benchmark, deterministic); #endif } static inline std::tuple miopen_rnn(const Tensor & input, TensorList weight, int64_t weight_stride0, const Tensor & hx, const Tensor & cx, int64_t mode, int64_t hidden_size, int64_t num_layers, bool batch_first, double dropout, bool train, bool bidirectional, IntArrayRef batch_sizes, const Tensor & dropout_state) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(input, weight, hx, cx, dropout_state)))) { default: AT_ERROR("miopen_rnn not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(input, weight, hx, cx, dropout_state))); } #else static auto table = globalATenDispatch().getOpTable("aten::miopen_rnn(Tensor input, Tensor[] weight, int weight_stride0, Tensor hx, Tensor? cx, int mode, int hidden_size, int num_layers, bool batch_first, float dropout, bool train, bool bidirectional, int[] batch_sizes, Tensor? dropout_state) -> (Tensor, Tensor, Tensor, Tensor, Tensor)"); return table->callUnboxed, const Tensor &, TensorList, int64_t, const Tensor &, const Tensor &, int64_t, int64_t, int64_t, bool, double, bool, bool, IntArrayRef, const Tensor &>(input, weight, weight_stride0, hx, cx, mode, hidden_size, num_layers, batch_first, dropout, train, bidirectional, batch_sizes, dropout_state); #endif } static inline std::tuple> miopen_rnn_backward(const Tensor & input, TensorList weight, int64_t weight_stride0, const Tensor & weight_buf, const Tensor & hx, const Tensor & cx, const Tensor & output, const Tensor & grad_output, const Tensor & grad_hy, const Tensor & grad_cy, int64_t mode, int64_t hidden_size, int64_t num_layers, bool batch_first, double dropout, bool train, bool bidirectional, IntArrayRef batch_sizes, const Tensor & dropout_state, const Tensor & reserve, std::array output_mask) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(input, weight, weight_buf, hx, cx, output, grad_output, grad_hy, grad_cy, dropout_state, reserve)))) { default: AT_ERROR("miopen_rnn_backward not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(input, weight, weight_buf, hx, cx, output, grad_output, grad_hy, grad_cy, dropout_state, reserve))); } #else static auto table = globalATenDispatch().getOpTable("aten::miopen_rnn_backward(Tensor input, Tensor[] weight, int weight_stride0, Tensor weight_buf, Tensor hx, Tensor? cx, Tensor output, Tensor? grad_output, Tensor? grad_hy, Tensor? grad_cy, int mode, int hidden_size, int num_layers, bool batch_first, float dropout, bool train, bool bidirectional, int[] batch_sizes, Tensor? dropout_state, Tensor reserve, bool[4] output_mask) -> (Tensor, Tensor, Tensor, Tensor[])"); return table->callUnboxed>, const Tensor &, TensorList, int64_t, const Tensor &, const Tensor &, const Tensor &, const Tensor &, const Tensor &, const Tensor &, const Tensor &, int64_t, int64_t, int64_t, bool, double, bool, bool, IntArrayRef, const Tensor &, const Tensor &, std::array>(input, weight, weight_stride0, weight_buf, hx, cx, output, grad_output, grad_hy, grad_cy, mode, hidden_size, num_layers, batch_first, dropout, train, bidirectional, batch_sizes, dropout_state, reserve, output_mask); #endif } static inline Tensor mm(const Tensor & self, const Tensor & mat2) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self, mat2)))) { case Backend::CPU: return CPUType::mm(self, mat2); break; case Backend::SparseCPU: return SparseCPUType::mm(self, mat2); break; default: AT_ERROR("mm not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(self, mat2))); } #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::mm", ""}).value(); return c10::Dispatcher::singleton().callUnboxed( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self, mat2)), self, mat2); #endif } static inline Tensor & mm_out(Tensor & out, const Tensor & self, const Tensor & mat2) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(out, self, mat2)))) { case Backend::CPU: return CPUType::mm_out(out, self, mat2); break; case Backend::SparseCPU: return SparseCPUType::mm_out(out, self, mat2); break; default: AT_ERROR("mm_out not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(out, self, mat2))); } #else static auto table = globalATenDispatch().getOpTable("aten::mm.out(Tensor self, Tensor mat2, *, Tensor(a!) out) -> Tensor(a!)"); return table->callUnboxed(out, self, mat2); #endif } static inline Tensor _sparse_mm(const Tensor & sparse, const Tensor & dense) { #ifdef USE_STATIC_DISPATCH return TypeDefault::_sparse_mm(sparse, dense); #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::_sparse_mm", ""}).value(); return c10::Dispatcher::singleton().callUnboxed( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(sparse, dense)), sparse, dense); #endif } static inline std::tuple mode(const Tensor & self, int64_t dim, bool keepdim) { #ifdef USE_STATIC_DISPATCH return TypeDefault::mode(self, dim, keepdim); #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::mode", ""}).value(); return c10::Dispatcher::singleton().callUnboxedOnly, const Tensor &, int64_t, bool>( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self)), self, dim, keepdim); #endif } static inline std::tuple mode_out(Tensor & values, Tensor & indices, const Tensor & self, int64_t dim, bool keepdim) { #ifdef USE_STATIC_DISPATCH return TypeDefault::mode_out(values, indices, self, dim, keepdim); #else static auto table = globalATenDispatch().getOpTable("aten::mode.values(Tensor self, int dim=-1, bool keepdim=False, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices)"); return table->callUnboxed, Tensor &, Tensor &, const Tensor &, int64_t, bool>(values, indices, self, dim, keepdim); #endif } #ifdef BUILD_NAMEDTENSOR static inline std::tuple mode(const Tensor & self, Dimname dim, bool keepdim) { #ifdef USE_STATIC_DISPATCH return TypeDefault::mode(self, dim, keepdim); #else static auto table = globalATenDispatch().getOpTable("aten::mode.dimname(Tensor self, Dimname dim, bool keepdim=False) -> (Tensor values, Tensor indices)"); return table->callUnboxed, const Tensor &, Dimname, bool>(self, dim, keepdim); #endif } #endif #ifdef BUILD_NAMEDTENSOR static inline std::tuple mode_out(Tensor & values, Tensor & indices, const Tensor & self, Dimname dim, bool keepdim) { #ifdef USE_STATIC_DISPATCH return TypeDefault::mode_out(values, indices, self, dim, keepdim); #else static auto table = globalATenDispatch().getOpTable("aten::mode.dimname_out(Tensor self, Dimname dim, bool keepdim=False, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices)"); return table->callUnboxed, Tensor &, Tensor &, const Tensor &, Dimname, bool>(values, indices, self, dim, keepdim); #endif } #endif static inline Tensor mul(const Tensor & self, const Tensor & other) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self, other)))) { case Backend::CPU: return CPUType::mul(self, other); break; case Backend::SparseCPU: return SparseCPUType::mul(self, other); break; default: AT_ERROR("mul not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(self, other))); } #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::mul", "Tensor"}).value(); return c10::Dispatcher::singleton().callUnboxed( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self, other)), self, other); #endif } static inline Tensor & mul_out(Tensor & out, const Tensor & self, const Tensor & other) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(out, self, other)))) { case Backend::CPU: return CPUType::mul_out(out, self, other); break; case Backend::SparseCPU: return SparseCPUType::mul_out(out, self, other); break; default: AT_ERROR("mul_out not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(out, self, other))); } #else static auto table = globalATenDispatch().getOpTable("aten::mul.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)"); return table->callUnboxed(out, self, other); #endif } static inline Tensor mul(const Tensor & self, Scalar other) { #ifdef USE_STATIC_DISPATCH return TypeDefault::mul(self, other); #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::mul", "Scalar"}).value(); return c10::Dispatcher::singleton().callUnboxed( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self)), self, other); #endif } static inline Tensor mv(const Tensor & self, const Tensor & vec) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self, vec)))) { case Backend::CPU: return CPUType::mv(self, vec); break; default: AT_ERROR("mv not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(self, vec))); } #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::mv", ""}).value(); return c10::Dispatcher::singleton().callUnboxed( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self, vec)), self, vec); #endif } static inline Tensor & mv_out(Tensor & out, const Tensor & self, const Tensor & vec) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(out, self, vec)))) { case Backend::CPU: return CPUType::mv_out(out, self, vec); break; default: AT_ERROR("mv_out not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(out, self, vec))); } #else static auto table = globalATenDispatch().getOpTable("aten::mv.out(Tensor self, Tensor vec, *, Tensor(a!) out) -> Tensor(a!)"); return table->callUnboxed(out, self, vec); #endif } static inline Tensor mvlgamma(const Tensor & self, int64_t p) { #ifdef USE_STATIC_DISPATCH return TypeDefault::mvlgamma(self, p); #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::mvlgamma", ""}).value(); return c10::Dispatcher::singleton().callUnboxed( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self)), self, p); #endif } static inline Tensor narrow(const Tensor & self, int64_t dim, int64_t start, int64_t length) { #ifdef USE_STATIC_DISPATCH return TypeDefault::narrow(self, dim, start, length); #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::narrow", ""}).value(); return c10::Dispatcher::singleton().callUnboxedOnly( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self)), self, dim, start, length); #endif } static inline std::tuple native_batch_norm(const Tensor & input, const Tensor & weight, const Tensor & bias, const Tensor & running_mean, const Tensor & running_var, bool training, double momentum, double eps) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(input, weight, bias, running_mean, running_var)))) { case Backend::CPU: return CPUType::native_batch_norm(input, weight, bias, running_mean, running_var, training, momentum, eps); break; default: AT_ERROR("native_batch_norm not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(input, weight, bias, running_mean, running_var))); } #else static auto table = globalATenDispatch().getOpTable("aten::native_batch_norm(Tensor input, Tensor? weight, Tensor? bias, Tensor? running_mean, Tensor? running_var, bool training, float momentum, float eps) -> (Tensor, Tensor, Tensor)"); return table->callUnboxed, const Tensor &, const Tensor &, const Tensor &, const Tensor &, const Tensor &, bool, double, double>(input, weight, bias, running_mean, running_var, training, momentum, eps); #endif } static inline std::tuple batch_norm_stats(const Tensor & input, double eps) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(input)))) { default: AT_ERROR("batch_norm_stats not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(input))); } #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::batch_norm_stats", ""}).value(); return c10::Dispatcher::singleton().callUnboxedOnly, const Tensor &, double>( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(input)), input, eps); #endif } static inline Tensor batch_norm_elemt(const Tensor & input, const Tensor & weight, const Tensor & bias, const Tensor & mean, const Tensor & invstd, double eps) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(input, weight, bias, mean, invstd)))) { default: AT_ERROR("batch_norm_elemt not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(input, weight, bias, mean, invstd))); } #else static auto table = globalATenDispatch().getOpTable("aten::batch_norm_elemt(Tensor input, Tensor? weight, Tensor? bias, Tensor mean, Tensor invstd, float eps) -> Tensor"); return table->callUnboxed(input, weight, bias, mean, invstd, eps); #endif } static inline std::tuple batch_norm_gather_stats(const Tensor & input, const Tensor & mean, const Tensor & invstd, const Tensor & running_mean, const Tensor & running_var, double momentum, double eps, int64_t count) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(input, mean, invstd, running_mean, running_var)))) { default: AT_ERROR("batch_norm_gather_stats not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(input, mean, invstd, running_mean, running_var))); } #else static auto table = globalATenDispatch().getOpTable("aten::batch_norm_gather_stats(Tensor input, Tensor mean, Tensor invstd, Tensor? running_mean, Tensor? running_var, float momentum, float eps, int count) -> (Tensor, Tensor)"); return table->callUnboxed, const Tensor &, const Tensor &, const Tensor &, const Tensor &, const Tensor &, double, double, int64_t>(input, mean, invstd, running_mean, running_var, momentum, eps, count); #endif } static inline std::tuple batch_norm_gather_stats_with_counts(const Tensor & input, const Tensor & mean, const Tensor & invstd, const Tensor & running_mean, const Tensor & running_var, double momentum, double eps, IntArrayRef counts) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(input, mean, invstd, running_mean, running_var)))) { default: AT_ERROR("batch_norm_gather_stats_with_counts not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(input, mean, invstd, running_mean, running_var))); } #else static auto table = globalATenDispatch().getOpTable("aten::batch_norm_gather_stats_with_counts(Tensor input, Tensor mean, Tensor invstd, Tensor? running_mean, Tensor? running_var, float momentum, float eps, int[] counts) -> (Tensor, Tensor)"); return table->callUnboxed, const Tensor &, const Tensor &, const Tensor &, const Tensor &, const Tensor &, double, double, IntArrayRef>(input, mean, invstd, running_mean, running_var, momentum, eps, counts); #endif } static inline std::tuple native_batch_norm_backward(const Tensor & grad_out, const Tensor & input, const Tensor & weight, const Tensor & running_mean, const Tensor & running_var, const Tensor & save_mean, const Tensor & save_invstd, bool train, double eps, std::array output_mask) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(grad_out, input, weight, running_mean, running_var, save_mean, save_invstd)))) { case Backend::CPU: return CPUType::native_batch_norm_backward(grad_out, input, weight, running_mean, running_var, save_mean, save_invstd, train, eps, output_mask); break; default: AT_ERROR("native_batch_norm_backward not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(grad_out, input, weight, running_mean, running_var, save_mean, save_invstd))); } #else static auto table = globalATenDispatch().getOpTable("aten::native_batch_norm_backward(Tensor grad_out, Tensor input, Tensor? weight, Tensor? running_mean, Tensor? running_var, Tensor? save_mean, Tensor? save_invstd, bool train, float eps, bool[3] output_mask) -> (Tensor, Tensor, Tensor)"); return table->callUnboxed, const Tensor &, const Tensor &, const Tensor &, const Tensor &, const Tensor &, const Tensor &, const Tensor &, bool, double, std::array>(grad_out, input, weight, running_mean, running_var, save_mean, save_invstd, train, eps, output_mask); #endif } static inline std::tuple batch_norm_backward_reduce(const Tensor & grad_out, const Tensor & input, const Tensor & mean, const Tensor & invstd, const Tensor & weight, bool input_g, bool weight_g, bool bias_g) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(grad_out, input, mean, invstd, weight)))) { default: AT_ERROR("batch_norm_backward_reduce not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(grad_out, input, mean, invstd, weight))); } #else static auto table = globalATenDispatch().getOpTable("aten::batch_norm_backward_reduce(Tensor grad_out, Tensor input, Tensor mean, Tensor invstd, Tensor? weight, bool input_g, bool weight_g, bool bias_g) -> (Tensor, Tensor, Tensor, Tensor)"); return table->callUnboxed, const Tensor &, const Tensor &, const Tensor &, const Tensor &, const Tensor &, bool, bool, bool>(grad_out, input, mean, invstd, weight, input_g, weight_g, bias_g); #endif } static inline Tensor batch_norm_backward_elemt(const Tensor & grad_out, const Tensor & input, const Tensor & mean, const Tensor & invstd, const Tensor & weight, const Tensor & mean_dy, const Tensor & mean_dy_xmu) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(grad_out, input, mean, invstd, weight, mean_dy, mean_dy_xmu)))) { default: AT_ERROR("batch_norm_backward_elemt not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(grad_out, input, mean, invstd, weight, mean_dy, mean_dy_xmu))); } #else static auto table = globalATenDispatch().getOpTable("aten::batch_norm_backward_elemt(Tensor grad_out, Tensor input, Tensor mean, Tensor invstd, Tensor? weight, Tensor mean_dy, Tensor mean_dy_xmu) -> Tensor"); return table->callUnboxed(grad_out, input, mean, invstd, weight, mean_dy, mean_dy_xmu); #endif } static inline std::tuple batch_norm_update_stats(const Tensor & input, const Tensor & running_mean, const Tensor & running_var, double momentum) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(input, running_mean, running_var)))) { case Backend::CPU: return CPUType::batch_norm_update_stats(input, running_mean, running_var, momentum); break; default: AT_ERROR("batch_norm_update_stats not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(input, running_mean, running_var))); } #else static auto table = globalATenDispatch().getOpTable("aten::batch_norm_update_stats(Tensor input, Tensor? running_mean, Tensor? running_var, float momentum) -> (Tensor, Tensor)"); return table->callUnboxed, const Tensor &, const Tensor &, const Tensor &, double>(input, running_mean, running_var, momentum); #endif } static inline bool _nnpack_available() { #ifdef USE_STATIC_DISPATCH return TypeDefault::_nnpack_available(); #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::_nnpack_available", ""}).value(); return c10::Dispatcher::singleton().callUnboxed( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set())); #endif } static inline Tensor _nnpack_spatial_convolution(const Tensor & input, const Tensor & weight, const Tensor & bias, IntArrayRef padding) { #ifdef USE_STATIC_DISPATCH return TypeDefault::_nnpack_spatial_convolution(input, weight, bias, padding); #else static auto table = globalATenDispatch().getOpTable("aten::_nnpack_spatial_convolution(Tensor input, Tensor weight, Tensor? bias, int[2] padding) -> Tensor"); return table->callUnboxed(input, weight, bias, padding); #endif } static inline std::tuple _nnpack_spatial_convolution_backward(const Tensor & input, const Tensor & grad_output, const Tensor & weight, IntArrayRef padding, std::array output_mask) { #ifdef USE_STATIC_DISPATCH return TypeDefault::_nnpack_spatial_convolution_backward(input, grad_output, weight, padding, output_mask); #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::_nnpack_spatial_convolution_backward", ""}).value(); return c10::Dispatcher::singleton().callUnboxedOnly, const Tensor &, const Tensor &, const Tensor &, IntArrayRef, std::array>( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(input, grad_output, weight)), input, grad_output, weight, padding, output_mask); #endif } static inline Tensor _nnpack_spatial_convolution_backward_input(const Tensor & input, const Tensor & grad_output, const Tensor & weight, IntArrayRef padding) { #ifdef USE_STATIC_DISPATCH return TypeDefault::_nnpack_spatial_convolution_backward_input(input, grad_output, weight, padding); #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::_nnpack_spatial_convolution_backward_input", ""}).value(); return c10::Dispatcher::singleton().callUnboxedOnly( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(input, grad_output, weight)), input, grad_output, weight, padding); #endif } static inline Tensor _nnpack_spatial_convolution_backward_weight(const Tensor & input, IntArrayRef weightsize, const Tensor & grad_output, IntArrayRef padding) { #ifdef USE_STATIC_DISPATCH return TypeDefault::_nnpack_spatial_convolution_backward_weight(input, weightsize, grad_output, padding); #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::_nnpack_spatial_convolution_backward_weight", ""}).value(); return c10::Dispatcher::singleton().callUnboxedOnly( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(input, grad_output)), input, weightsize, grad_output, padding); #endif } #ifdef BUILD_NAMEDTENSOR static inline Tensor ones(IntArrayRef size, c10::optional names, const TensorOptions & options) { #ifdef USE_STATIC_DISPATCH return TypeDefault::ones(size, names, options); #else globalLegacyTypeDispatch().initForTensorTypeSet(at::detail::multi_dispatch_tensor_type_set(options)); static auto table = globalATenDispatch().getOpTable("aten::ones.names(int[] size, *, Dimname[]? names, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor"); return table->callUnboxed, const TensorOptions &>(size, names, options); #endif } #endif static inline Tensor ones(IntArrayRef size, const TensorOptions & options) { #ifdef USE_STATIC_DISPATCH return TypeDefault::ones(size, options); #else globalLegacyTypeDispatch().initForTensorTypeSet(at::detail::multi_dispatch_tensor_type_set(options)); static auto table = globalATenDispatch().getOpTable("aten::ones(int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor"); return table->callUnboxed(size, options); #endif } static inline Tensor & ones_out(Tensor & out, IntArrayRef size) { #ifdef USE_STATIC_DISPATCH return TypeDefault::ones_out(out, size); #else static auto table = globalATenDispatch().getOpTable("aten::ones.out(int[] size, *, Tensor(a!) out) -> Tensor(a!)"); return table->callUnboxed(out, size); #endif } static inline Tensor ones_like(const Tensor & self) { #ifdef USE_STATIC_DISPATCH return TypeDefault::ones_like(self); #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::ones_like", ""}).value(); return c10::Dispatcher::singleton().callUnboxed( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self)), self); #endif } static inline Tensor ones_like(const Tensor & self, const TensorOptions & options) { #ifdef USE_STATIC_DISPATCH return TypeDefault::ones_like(self, options); #else globalLegacyTypeDispatch().initForTensorTypeSet(at::detail::multi_dispatch_tensor_type_set(self, options)); static auto table = globalATenDispatch().getOpTable("aten::ones_like.dtype(Tensor self, *, ScalarType dtype, Layout layout, Device device, bool pin_memory=False) -> Tensor"); return table->callUnboxed(self, options); #endif } static inline Tensor pairwise_distance(const Tensor & x1, const Tensor & x2, double p, double eps, bool keepdim) { #ifdef USE_STATIC_DISPATCH return TypeDefault::pairwise_distance(x1, x2, p, eps, keepdim); #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::pairwise_distance", ""}).value(); return c10::Dispatcher::singleton().callUnboxed( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(x1, x2)), x1, x2, p, eps, keepdim); #endif } static inline Tensor cdist(const Tensor & x1, const Tensor & x2, double p) { #ifdef USE_STATIC_DISPATCH return TypeDefault::cdist(x1, x2, p); #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::cdist", ""}).value(); return c10::Dispatcher::singleton().callUnboxed( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(x1, x2)), x1, x2, p); #endif } static inline Tensor _cdist_backward(const Tensor & grad, const Tensor & x1, const Tensor & x2, double p, const Tensor & cdist) { #ifdef USE_STATIC_DISPATCH return TypeDefault::_cdist_backward(grad, x1, x2, p, cdist); #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::_cdist_backward", ""}).value(); return c10::Dispatcher::singleton().callUnboxed( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(grad, x1, x2, cdist)), grad, x1, x2, p, cdist); #endif } static inline Tensor pdist(const Tensor & self, double p) { #ifdef USE_STATIC_DISPATCH return TypeDefault::pdist(self, p); #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::pdist", ""}).value(); return c10::Dispatcher::singleton().callUnboxed( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self)), self, p); #endif } static inline Tensor _pdist_forward(const Tensor & self, double p) { #ifdef USE_STATIC_DISPATCH return TypeDefault::_pdist_forward(self, p); #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::_pdist_forward", ""}).value(); return c10::Dispatcher::singleton().callUnboxed( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self)), self, p); #endif } static inline Tensor _pdist_backward(const Tensor & grad, const Tensor & self, double p, const Tensor & pdist) { #ifdef USE_STATIC_DISPATCH return TypeDefault::_pdist_backward(grad, self, p, pdist); #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::_pdist_backward", ""}).value(); return c10::Dispatcher::singleton().callUnboxed( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(grad, self, pdist)), grad, self, p, pdist); #endif } static inline Tensor cosine_similarity(const Tensor & x1, const Tensor & x2, int64_t dim, double eps) { #ifdef USE_STATIC_DISPATCH return TypeDefault::cosine_similarity(x1, x2, dim, eps); #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::cosine_similarity", ""}).value(); return c10::Dispatcher::singleton().callUnboxed( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(x1, x2)), x1, x2, dim, eps); #endif } static inline Tensor pixel_shuffle(const Tensor & self, int64_t upscale_factor) { #ifdef USE_STATIC_DISPATCH return TypeDefault::pixel_shuffle(self, upscale_factor); #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::pixel_shuffle", ""}).value(); return c10::Dispatcher::singleton().callUnboxed( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self)), self, upscale_factor); #endif } static inline Tensor pinverse(const Tensor & self, double rcond) { #ifdef USE_STATIC_DISPATCH return TypeDefault::pinverse(self, rcond); #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::pinverse", ""}).value(); return c10::Dispatcher::singleton().callUnboxed( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self)), self, rcond); #endif } static inline Tensor poisson_nll_loss(const Tensor & input, const Tensor & target, bool log_input, bool full, double eps, int64_t reduction) { #ifdef USE_STATIC_DISPATCH return TypeDefault::poisson_nll_loss(input, target, log_input, full, eps, reduction); #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::poisson_nll_loss", ""}).value(); return c10::Dispatcher::singleton().callUnboxed( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(input, target)), input, target, log_input, full, eps, reduction); #endif } static inline Tensor scalar_tensor(Scalar s, const TensorOptions & options) { #ifdef USE_STATIC_DISPATCH return TypeDefault::scalar_tensor(s, options); #else globalLegacyTypeDispatch().initForTensorTypeSet(at::detail::multi_dispatch_tensor_type_set(options)); static auto table = globalATenDispatch().getOpTable("aten::scalar_tensor(Scalar s, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor"); return table->callUnboxed(s, options); #endif } #ifdef BUILD_NAMEDTENSOR static inline Tensor rand(IntArrayRef size, c10::optional names, const TensorOptions & options) { #ifdef USE_STATIC_DISPATCH return TypeDefault::rand(size, names, options); #else globalLegacyTypeDispatch().initForTensorTypeSet(at::detail::multi_dispatch_tensor_type_set(options)); static auto table = globalATenDispatch().getOpTable("aten::rand.names(int[] size, *, Dimname[]? names, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor"); return table->callUnboxed, const TensorOptions &>(size, names, options); #endif } #endif #ifdef BUILD_NAMEDTENSOR static inline Tensor rand(IntArrayRef size, Generator * generator, c10::optional names, const TensorOptions & options) { #ifdef USE_STATIC_DISPATCH return TypeDefault::rand(size, generator, names, options); #else globalLegacyTypeDispatch().initForTensorTypeSet(at::detail::multi_dispatch_tensor_type_set(options)); static auto table = globalATenDispatch().getOpTable("aten::rand.generator_with_names(int[] size, *, Generator? generator, Dimname[]? names, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor"); return table->callUnboxed, const TensorOptions &>(size, generator, names, options); #endif } #endif static inline Tensor rand(IntArrayRef size, const TensorOptions & options) { #ifdef USE_STATIC_DISPATCH return TypeDefault::rand(size, options); #else globalLegacyTypeDispatch().initForTensorTypeSet(at::detail::multi_dispatch_tensor_type_set(options)); static auto table = globalATenDispatch().getOpTable("aten::rand(int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor"); return table->callUnboxed(size, options); #endif } static inline Tensor rand(IntArrayRef size, Generator * generator, const TensorOptions & options) { #ifdef USE_STATIC_DISPATCH return TypeDefault::rand(size, generator, options); #else globalLegacyTypeDispatch().initForTensorTypeSet(at::detail::multi_dispatch_tensor_type_set(options)); static auto table = globalATenDispatch().getOpTable("aten::rand.generator(int[] size, *, Generator? generator, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor"); return table->callUnboxed(size, generator, options); #endif } static inline Tensor & rand_out(Tensor & out, IntArrayRef size) { #ifdef USE_STATIC_DISPATCH return TypeDefault::rand_out(out, size); #else static auto table = globalATenDispatch().getOpTable("aten::rand.out(int[] size, *, Tensor(a!) out) -> Tensor(a!)"); return table->callUnboxed(out, size); #endif } static inline Tensor & rand_out(Tensor & out, IntArrayRef size, Generator * generator) { #ifdef USE_STATIC_DISPATCH return TypeDefault::rand_out(out, size, generator); #else static auto table = globalATenDispatch().getOpTable("aten::rand.generator_out(int[] size, *, Generator? generator, Tensor(a!) out) -> Tensor(a!)"); return table->callUnboxed(out, size, generator); #endif } static inline Tensor rand_like(const Tensor & self) { #ifdef USE_STATIC_DISPATCH return TypeDefault::rand_like(self); #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::rand_like", ""}).value(); return c10::Dispatcher::singleton().callUnboxed( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self)), self); #endif } static inline Tensor rand_like(const Tensor & self, const TensorOptions & options) { #ifdef USE_STATIC_DISPATCH return TypeDefault::rand_like(self, options); #else globalLegacyTypeDispatch().initForTensorTypeSet(at::detail::multi_dispatch_tensor_type_set(self, options)); static auto table = globalATenDispatch().getOpTable("aten::rand_like.dtype(Tensor self, *, ScalarType dtype, Layout layout, Device device, bool pin_memory=False) -> Tensor"); return table->callUnboxed(self, options); #endif } static inline Tensor randint(int64_t high, IntArrayRef size, const TensorOptions & options) { #ifdef USE_STATIC_DISPATCH return TypeDefault::randint(high, size, options); #else globalLegacyTypeDispatch().initForTensorTypeSet(at::detail::multi_dispatch_tensor_type_set(options)); static auto table = globalATenDispatch().getOpTable("aten::randint(int high, int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor"); return table->callUnboxed(high, size, options); #endif } static inline Tensor randint(int64_t high, IntArrayRef size, Generator * generator, const TensorOptions & options) { #ifdef USE_STATIC_DISPATCH return TypeDefault::randint(high, size, generator, options); #else globalLegacyTypeDispatch().initForTensorTypeSet(at::detail::multi_dispatch_tensor_type_set(options)); static auto table = globalATenDispatch().getOpTable("aten::randint.generator(int high, int[] size, *, Generator? generator, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor"); return table->callUnboxed(high, size, generator, options); #endif } static inline Tensor randint(int64_t low, int64_t high, IntArrayRef size, const TensorOptions & options) { #ifdef USE_STATIC_DISPATCH return TypeDefault::randint(low, high, size, options); #else globalLegacyTypeDispatch().initForTensorTypeSet(at::detail::multi_dispatch_tensor_type_set(options)); static auto table = globalATenDispatch().getOpTable("aten::randint.low(int low, int high, int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor"); return table->callUnboxed(low, high, size, options); #endif } static inline Tensor randint(int64_t low, int64_t high, IntArrayRef size, Generator * generator, const TensorOptions & options) { #ifdef USE_STATIC_DISPATCH return TypeDefault::randint(low, high, size, generator, options); #else globalLegacyTypeDispatch().initForTensorTypeSet(at::detail::multi_dispatch_tensor_type_set(options)); static auto table = globalATenDispatch().getOpTable("aten::randint.low_generator(int low, int high, int[] size, *, Generator? generator, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor"); return table->callUnboxed(low, high, size, generator, options); #endif } static inline Tensor & randint_out(Tensor & out, int64_t high, IntArrayRef size) { #ifdef USE_STATIC_DISPATCH return TypeDefault::randint_out(out, high, size); #else static auto table = globalATenDispatch().getOpTable("aten::randint.out(int high, int[] size, *, Tensor(a!) out) -> Tensor(a!)"); return table->callUnboxed(out, high, size); #endif } static inline Tensor & randint_out(Tensor & out, int64_t high, IntArrayRef size, Generator * generator) { #ifdef USE_STATIC_DISPATCH return TypeDefault::randint_out(out, high, size, generator); #else static auto table = globalATenDispatch().getOpTable("aten::randint.generator_out(int high, int[] size, *, Generator? generator, Tensor(a!) out) -> Tensor(a!)"); return table->callUnboxed(out, high, size, generator); #endif } static inline Tensor & randint_out(Tensor & out, int64_t low, int64_t high, IntArrayRef size) { #ifdef USE_STATIC_DISPATCH return TypeDefault::randint_out(out, low, high, size); #else static auto table = globalATenDispatch().getOpTable("aten::randint.low_out(int low, int high, int[] size, *, Tensor(a!) out) -> Tensor(a!)"); return table->callUnboxed(out, low, high, size); #endif } static inline Tensor & randint_out(Tensor & out, int64_t low, int64_t high, IntArrayRef size, Generator * generator) { #ifdef USE_STATIC_DISPATCH return TypeDefault::randint_out(out, low, high, size, generator); #else static auto table = globalATenDispatch().getOpTable("aten::randint.low_generator_out(int low, int high, int[] size, *, Generator? generator, Tensor(a!) out) -> Tensor(a!)"); return table->callUnboxed(out, low, high, size, generator); #endif } static inline Tensor randint_like(const Tensor & self, int64_t high) { #ifdef USE_STATIC_DISPATCH return TypeDefault::randint_like(self, high); #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::randint_like", ""}).value(); return c10::Dispatcher::singleton().callUnboxed( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self)), self, high); #endif } static inline Tensor randint_like(const Tensor & self, int64_t low, int64_t high) { #ifdef USE_STATIC_DISPATCH return TypeDefault::randint_like(self, low, high); #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::randint_like", "low"}).value(); return c10::Dispatcher::singleton().callUnboxed( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self)), self, low, high); #endif } static inline Tensor randint_like(const Tensor & self, int64_t high, const TensorOptions & options) { #ifdef USE_STATIC_DISPATCH return TypeDefault::randint_like(self, high, options); #else globalLegacyTypeDispatch().initForTensorTypeSet(at::detail::multi_dispatch_tensor_type_set(self, options)); static auto table = globalATenDispatch().getOpTable("aten::randint_like.dtype(Tensor self, int high, *, ScalarType dtype, Layout layout, Device device, bool pin_memory=False) -> Tensor"); return table->callUnboxed(self, high, options); #endif } static inline Tensor randint_like(const Tensor & self, int64_t low, int64_t high, const TensorOptions & options) { #ifdef USE_STATIC_DISPATCH return TypeDefault::randint_like(self, low, high, options); #else globalLegacyTypeDispatch().initForTensorTypeSet(at::detail::multi_dispatch_tensor_type_set(self, options)); static auto table = globalATenDispatch().getOpTable("aten::randint_like.low_dtype(Tensor self, int low, int high, *, ScalarType dtype, Layout layout, Device device, bool pin_memory=False) -> Tensor"); return table->callUnboxed(self, low, high, options); #endif } static inline Tensor randn(IntArrayRef size, const TensorOptions & options) { #ifdef USE_STATIC_DISPATCH return TypeDefault::randn(size, options); #else globalLegacyTypeDispatch().initForTensorTypeSet(at::detail::multi_dispatch_tensor_type_set(options)); static auto table = globalATenDispatch().getOpTable("aten::randn(int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor"); return table->callUnboxed(size, options); #endif } static inline Tensor randn(IntArrayRef size, Generator * generator, const TensorOptions & options) { #ifdef USE_STATIC_DISPATCH return TypeDefault::randn(size, generator, options); #else globalLegacyTypeDispatch().initForTensorTypeSet(at::detail::multi_dispatch_tensor_type_set(options)); static auto table = globalATenDispatch().getOpTable("aten::randn.generator(int[] size, *, Generator? generator, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor"); return table->callUnboxed(size, generator, options); #endif } #ifdef BUILD_NAMEDTENSOR static inline Tensor randn(IntArrayRef size, c10::optional names, const TensorOptions & options) { #ifdef USE_STATIC_DISPATCH return TypeDefault::randn(size, names, options); #else globalLegacyTypeDispatch().initForTensorTypeSet(at::detail::multi_dispatch_tensor_type_set(options)); static auto table = globalATenDispatch().getOpTable("aten::randn.names(int[] size, *, Dimname[]? names, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor"); return table->callUnboxed, const TensorOptions &>(size, names, options); #endif } #endif #ifdef BUILD_NAMEDTENSOR static inline Tensor randn(IntArrayRef size, Generator * generator, c10::optional names, const TensorOptions & options) { #ifdef USE_STATIC_DISPATCH return TypeDefault::randn(size, generator, names, options); #else globalLegacyTypeDispatch().initForTensorTypeSet(at::detail::multi_dispatch_tensor_type_set(options)); static auto table = globalATenDispatch().getOpTable("aten::randn.generator_with_names(int[] size, *, Generator? generator, Dimname[]? names, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor"); return table->callUnboxed, const TensorOptions &>(size, generator, names, options); #endif } #endif static inline Tensor & randn_out(Tensor & out, IntArrayRef size) { #ifdef USE_STATIC_DISPATCH return TypeDefault::randn_out(out, size); #else static auto table = globalATenDispatch().getOpTable("aten::randn.out(int[] size, *, Tensor(a!) out) -> Tensor(a!)"); return table->callUnboxed(out, size); #endif } static inline Tensor & randn_out(Tensor & out, IntArrayRef size, Generator * generator) { #ifdef USE_STATIC_DISPATCH return TypeDefault::randn_out(out, size, generator); #else static auto table = globalATenDispatch().getOpTable("aten::randn.generator_out(int[] size, *, Generator? generator, Tensor(a!) out) -> Tensor(a!)"); return table->callUnboxed(out, size, generator); #endif } static inline Tensor randn_like(const Tensor & self) { #ifdef USE_STATIC_DISPATCH return TypeDefault::randn_like(self); #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::randn_like", ""}).value(); return c10::Dispatcher::singleton().callUnboxed( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self)), self); #endif } static inline Tensor randn_like(const Tensor & self, const TensorOptions & options) { #ifdef USE_STATIC_DISPATCH return TypeDefault::randn_like(self, options); #else globalLegacyTypeDispatch().initForTensorTypeSet(at::detail::multi_dispatch_tensor_type_set(self, options)); static auto table = globalATenDispatch().getOpTable("aten::randn_like.dtype(Tensor self, *, ScalarType dtype, Layout layout, Device device, bool pin_memory=False) -> Tensor"); return table->callUnboxed(self, options); #endif } static inline Tensor randperm(int64_t n, const TensorOptions & options) { #ifdef USE_STATIC_DISPATCH return TypeDefault::randperm(n, options); #else globalLegacyTypeDispatch().initForTensorTypeSet(at::detail::multi_dispatch_tensor_type_set(options)); static auto table = globalATenDispatch().getOpTable("aten::randperm(int n, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor"); return table->callUnboxed(n, options); #endif } static inline Tensor randperm(int64_t n, Generator * generator, const TensorOptions & options) { #ifdef USE_STATIC_DISPATCH return TypeDefault::randperm(n, generator, options); #else globalLegacyTypeDispatch().initForTensorTypeSet(at::detail::multi_dispatch_tensor_type_set(options)); static auto table = globalATenDispatch().getOpTable("aten::randperm.generator(int n, *, Generator? generator, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor"); return table->callUnboxed(n, generator, options); #endif } static inline Tensor & randperm_out(Tensor & out, int64_t n) { #ifdef USE_STATIC_DISPATCH return TypeDefault::randperm_out(out, n); #else static auto table = globalATenDispatch().getOpTable("aten::randperm.out(int n, *, Tensor(a!) out) -> Tensor(a!)"); return table->callUnboxed(out, n); #endif } static inline Tensor & randperm_out(Tensor & out, int64_t n, Generator * generator) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(out)))) { case Backend::CPU: return CPUType::randperm_out(out, n, generator); break; default: AT_ERROR("randperm_out not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(out))); } #else static auto table = globalATenDispatch().getOpTable("aten::randperm.generator_out(int n, *, Generator? generator, Tensor(a!) out) -> Tensor(a!)"); return table->callUnboxed(out, n, generator); #endif } static inline Tensor range(Scalar start, Scalar end, Scalar step, const TensorOptions & options) { #ifdef USE_STATIC_DISPATCH return TypeDefault::range(start, end, step, options); #else globalLegacyTypeDispatch().initForTensorTypeSet(at::detail::multi_dispatch_tensor_type_set(options)); static auto table = globalATenDispatch().getOpTable("aten::range.step(Scalar start, Scalar end, Scalar step=1, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor"); return table->callUnboxed(start, end, step, options); #endif } static inline Tensor range(Scalar start, Scalar end, const TensorOptions & options) { #ifdef USE_STATIC_DISPATCH return TypeDefault::range(start, end, options); #else globalLegacyTypeDispatch().initForTensorTypeSet(at::detail::multi_dispatch_tensor_type_set(options)); static auto table = globalATenDispatch().getOpTable("aten::range(Scalar start, Scalar end, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor"); return table->callUnboxed(start, end, options); #endif } static inline Tensor & range_out(Tensor & out, Scalar start, Scalar end, Scalar step) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(out)))) { case Backend::CPU: return CPUType::range_out(out, start, end, step); break; default: AT_ERROR("range_out not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(out))); } #else static auto table = globalATenDispatch().getOpTable("aten::range.out(Scalar start, Scalar end, Scalar step=1, *, Tensor(a!) out) -> Tensor(a!)"); return table->callUnboxed(out, start, end, step); #endif } static inline Tensor reciprocal(const Tensor & self) { #ifdef USE_STATIC_DISPATCH return TypeDefault::reciprocal(self); #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::reciprocal", ""}).value(); return c10::Dispatcher::singleton().callUnboxed( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self)), self); #endif } static inline Tensor & reciprocal_(Tensor & self) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self)))) { case Backend::CPU: return CPUType::reciprocal_(self); break; default: AT_ERROR("reciprocal_ not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(self))); } #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::reciprocal_", ""}).value(); return c10::Dispatcher::singleton().callUnboxedOnly( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self)), self); #endif } static inline Tensor & reciprocal_out(Tensor & out, const Tensor & self) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(out, self)))) { case Backend::CPU: return CPUType::reciprocal_out(out, self); break; default: AT_ERROR("reciprocal_out not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(out, self))); } #else static auto table = globalATenDispatch().getOpTable("aten::reciprocal.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)"); return table->callUnboxed(out, self); #endif } static inline Tensor neg(const Tensor & self) { #ifdef USE_STATIC_DISPATCH return TypeDefault::neg(self); #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::neg", ""}).value(); return c10::Dispatcher::singleton().callUnboxed( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self)), self); #endif } static inline Tensor & neg_(Tensor & self) { #ifdef USE_STATIC_DISPATCH return TypeDefault::neg_(self); #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::neg_", ""}).value(); return c10::Dispatcher::singleton().callUnboxedOnly( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self)), self); #endif } static inline Tensor & neg_out(Tensor & out, const Tensor & self) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(out, self)))) { case Backend::CPU: return CPUType::neg_out(out, self); break; default: AT_ERROR("neg_out not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(out, self))); } #else static auto table = globalATenDispatch().getOpTable("aten::neg.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)"); return table->callUnboxed(out, self); #endif } static inline Tensor repeat_interleave(const Tensor & repeats) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(repeats)))) { case Backend::CPU: return CPUType::repeat_interleave(repeats); break; default: AT_ERROR("repeat_interleave not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(repeats))); } #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::repeat_interleave", "Tensor"}).value(); return c10::Dispatcher::singleton().callUnboxed( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(repeats)), repeats); #endif } static inline Tensor repeat_interleave(const Tensor & self, const Tensor & repeats, c10::optional dim) { #ifdef USE_STATIC_DISPATCH return TypeDefault::repeat_interleave(self, repeats, dim); #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::repeat_interleave", "self_Tensor"}).value(); return c10::Dispatcher::singleton().callUnboxed>( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self, repeats)), self, repeats, dim); #endif } static inline Tensor repeat_interleave(const Tensor & self, int64_t repeats, c10::optional dim) { #ifdef USE_STATIC_DISPATCH return TypeDefault::repeat_interleave(self, repeats, dim); #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::repeat_interleave", "self_int"}).value(); return c10::Dispatcher::singleton().callUnboxed>( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self)), self, repeats, dim); #endif } static inline Tensor reshape(const Tensor & self, IntArrayRef shape) { #ifdef USE_STATIC_DISPATCH return TypeDefault::reshape(self, shape); #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::reshape", ""}).value(); return c10::Dispatcher::singleton().callUnboxedOnly( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self)), self, shape); #endif } static inline Tensor _mkldnn_reshape(const Tensor & self, IntArrayRef shape) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self)))) { default: AT_ERROR("_mkldnn_reshape not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(self))); } #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::_mkldnn_reshape", ""}).value(); return c10::Dispatcher::singleton().callUnboxedOnly( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self)), self, shape); #endif } static inline Tensor round(const Tensor & self) { #ifdef USE_STATIC_DISPATCH return TypeDefault::round(self); #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::round", ""}).value(); return c10::Dispatcher::singleton().callUnboxed( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self)), self); #endif } static inline Tensor & round_(Tensor & self) { #ifdef USE_STATIC_DISPATCH return TypeDefault::round_(self); #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::round_", ""}).value(); return c10::Dispatcher::singleton().callUnboxedOnly( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self)), self); #endif } static inline Tensor & round_out(Tensor & out, const Tensor & self) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(out, self)))) { case Backend::CPU: return CPUType::round_out(out, self); break; default: AT_ERROR("round_out not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(out, self))); } #else static auto table = globalATenDispatch().getOpTable("aten::round.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)"); return table->callUnboxed(out, self); #endif } static inline Tensor rrelu(const Tensor & self, Scalar lower, Scalar upper, bool training, Generator * generator) { #ifdef USE_STATIC_DISPATCH return TypeDefault::rrelu(self, lower, upper, training, generator); #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::rrelu", ""}).value(); return c10::Dispatcher::singleton().callUnboxedOnly( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self)), self, lower, upper, training, generator); #endif } static inline Tensor & rrelu_(Tensor & self, Scalar lower, Scalar upper, bool training, Generator * generator) { #ifdef USE_STATIC_DISPATCH return TypeDefault::rrelu_(self, lower, upper, training, generator); #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::rrelu_", ""}).value(); return c10::Dispatcher::singleton().callUnboxedOnly( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self)), self, lower, upper, training, generator); #endif } static inline Tensor relu(const Tensor & self) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self)))) { case Backend::CPU: return CPUType::relu(self); break; case Backend::QuantizedCPU: return QuantizedCPUType::relu(self); break; default: AT_ERROR("relu not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(self))); } #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::relu", ""}).value(); return c10::Dispatcher::singleton().callUnboxed( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self)), self); #endif } static inline Tensor & relu_(Tensor & self) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self)))) { case Backend::CPU: return CPUType::relu_(self); break; case Backend::QuantizedCPU: return QuantizedCPUType::relu_(self); break; default: AT_ERROR("relu_ not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(self))); } #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::relu_", ""}).value(); return c10::Dispatcher::singleton().callUnboxedOnly( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self)), self); #endif } static inline Tensor prelu(const Tensor & self, const Tensor & weight) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self, weight)))) { case Backend::CPU: return CPUType::prelu(self, weight); break; default: AT_ERROR("prelu not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(self, weight))); } #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::prelu", ""}).value(); return c10::Dispatcher::singleton().callUnboxed( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self, weight)), self, weight); #endif } static inline std::tuple prelu_backward(const Tensor & grad_output, const Tensor & self, const Tensor & weight) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(grad_output, self, weight)))) { case Backend::CPU: return CPUType::prelu_backward(grad_output, self, weight); break; default: AT_ERROR("prelu_backward not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(grad_output, self, weight))); } #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::prelu_backward", ""}).value(); return c10::Dispatcher::singleton().callUnboxedOnly, const Tensor &, const Tensor &, const Tensor &>( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(grad_output, self, weight)), grad_output, self, weight); #endif } static inline Tensor gelu(const Tensor & self) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self)))) { case Backend::CPU: return CPUType::gelu(self); break; default: AT_ERROR("gelu not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(self))); } #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::gelu", ""}).value(); return c10::Dispatcher::singleton().callUnboxed( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self)), self); #endif } static inline Tensor gelu_backward(const Tensor & grad, const Tensor & self) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(grad, self)))) { case Backend::CPU: return CPUType::gelu_backward(grad, self); break; default: AT_ERROR("gelu_backward not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(grad, self))); } #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::gelu_backward", ""}).value(); return c10::Dispatcher::singleton().callUnboxed( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(grad, self)), grad, self); #endif } static inline Tensor hardshrink(const Tensor & self, Scalar lambd) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self)))) { case Backend::CPU: return CPUType::hardshrink(self, lambd); break; default: AT_ERROR("hardshrink not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(self))); } #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::hardshrink", ""}).value(); return c10::Dispatcher::singleton().callUnboxed( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self)), self, lambd); #endif } static inline Tensor hardshrink_backward(const Tensor & grad_out, const Tensor & self, Scalar lambd) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(grad_out, self)))) { case Backend::CPU: return CPUType::hardshrink_backward(grad_out, self, lambd); break; default: AT_ERROR("hardshrink_backward not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(grad_out, self))); } #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::hardshrink_backward", ""}).value(); return c10::Dispatcher::singleton().callUnboxed( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(grad_out, self)), grad_out, self, lambd); #endif } static inline Tensor rsqrt(const Tensor & self) { #ifdef USE_STATIC_DISPATCH return TypeDefault::rsqrt(self); #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::rsqrt", ""}).value(); return c10::Dispatcher::singleton().callUnboxed( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self)), self); #endif } static inline Tensor & rsqrt_(Tensor & self) { #ifdef USE_STATIC_DISPATCH return TypeDefault::rsqrt_(self); #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::rsqrt_", ""}).value(); return c10::Dispatcher::singleton().callUnboxedOnly( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self)), self); #endif } static inline Tensor & rsqrt_out(Tensor & out, const Tensor & self) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(out, self)))) { case Backend::CPU: return CPUType::rsqrt_out(out, self); break; default: AT_ERROR("rsqrt_out not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(out, self))); } #else static auto table = globalATenDispatch().getOpTable("aten::rsqrt.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)"); return table->callUnboxed(out, self); #endif } #ifdef BUILD_NAMEDTENSOR static inline Tensor select(const Tensor & self, Dimname dim, int64_t index) { #ifdef USE_STATIC_DISPATCH return TypeDefault::select(self, dim, index); #else static auto table = globalATenDispatch().getOpTable("aten::select.Dimname(Tensor(a) self, Dimname dim, int index) -> Tensor(a)"); return table->callUnboxed(self, dim, index); #endif } #endif static inline Tensor select(const Tensor & self, int64_t dim, int64_t index) { #ifdef USE_STATIC_DISPATCH return TypeDefault::select(self, dim, index); #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::select", "int"}).value(); return c10::Dispatcher::singleton().callUnboxedOnly( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self)), self, dim, index); #endif } static inline Tensor selu(const Tensor & self) { #ifdef USE_STATIC_DISPATCH return TypeDefault::selu(self); #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::selu", ""}).value(); return c10::Dispatcher::singleton().callUnboxed( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self)), self); #endif } static inline Tensor & selu_(Tensor & self) { #ifdef USE_STATIC_DISPATCH return TypeDefault::selu_(self); #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::selu_", ""}).value(); return c10::Dispatcher::singleton().callUnboxedOnly( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self)), self); #endif } static inline Tensor celu(const Tensor & self, Scalar alpha) { #ifdef USE_STATIC_DISPATCH return TypeDefault::celu(self, alpha); #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::celu", ""}).value(); return c10::Dispatcher::singleton().callUnboxed( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self)), self, alpha); #endif } static inline Tensor & celu_(Tensor & self, Scalar alpha) { #ifdef USE_STATIC_DISPATCH return TypeDefault::celu_(self, alpha); #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::celu_", ""}).value(); return c10::Dispatcher::singleton().callUnboxedOnly( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self)), self, alpha); #endif } static inline Tensor sigmoid(const Tensor & self) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self)))) { case Backend::CPU: return CPUType::sigmoid(self); break; default: AT_ERROR("sigmoid not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(self))); } #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::sigmoid", ""}).value(); return c10::Dispatcher::singleton().callUnboxed( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self)), self); #endif } static inline Tensor & sigmoid_(Tensor & self) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self)))) { case Backend::CPU: return CPUType::sigmoid_(self); break; default: AT_ERROR("sigmoid_ not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(self))); } #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::sigmoid_", ""}).value(); return c10::Dispatcher::singleton().callUnboxedOnly( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self)), self); #endif } static inline Tensor & sigmoid_out(Tensor & out, const Tensor & self) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(out, self)))) { case Backend::CPU: return CPUType::sigmoid_out(out, self); break; default: AT_ERROR("sigmoid_out not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(out, self))); } #else static auto table = globalATenDispatch().getOpTable("aten::sigmoid.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)"); return table->callUnboxed(out, self); #endif } static inline Tensor sin(const Tensor & self) { #ifdef USE_STATIC_DISPATCH return TypeDefault::sin(self); #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::sin", ""}).value(); return c10::Dispatcher::singleton().callUnboxed( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self)), self); #endif } static inline Tensor & sin_(Tensor & self) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self)))) { case Backend::CPU: return CPUType::sin_(self); break; default: AT_ERROR("sin_ not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(self))); } #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::sin_", ""}).value(); return c10::Dispatcher::singleton().callUnboxedOnly( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self)), self); #endif } static inline Tensor & sin_out(Tensor & out, const Tensor & self) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(out, self)))) { case Backend::CPU: return CPUType::sin_out(out, self); break; default: AT_ERROR("sin_out not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(out, self))); } #else static auto table = globalATenDispatch().getOpTable("aten::sin.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)"); return table->callUnboxed(out, self); #endif } static inline Tensor sinh(const Tensor & self) { #ifdef USE_STATIC_DISPATCH return TypeDefault::sinh(self); #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::sinh", ""}).value(); return c10::Dispatcher::singleton().callUnboxed( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self)), self); #endif } static inline Tensor & sinh_(Tensor & self) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self)))) { case Backend::CPU: return CPUType::sinh_(self); break; default: AT_ERROR("sinh_ not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(self))); } #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::sinh_", ""}).value(); return c10::Dispatcher::singleton().callUnboxedOnly( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self)), self); #endif } static inline Tensor & sinh_out(Tensor & out, const Tensor & self) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(out, self)))) { case Backend::CPU: return CPUType::sinh_out(out, self); break; default: AT_ERROR("sinh_out not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(out, self))); } #else static auto table = globalATenDispatch().getOpTable("aten::sinh.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)"); return table->callUnboxed(out, self); #endif } static inline Tensor detach(const Tensor & self) { #ifdef USE_STATIC_DISPATCH return TypeDefault::detach(self); #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::detach", ""}).value(); return c10::Dispatcher::singleton().callUnboxed( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self)), self); #endif } static inline Tensor & detach_(Tensor & self) { #ifdef USE_STATIC_DISPATCH return TypeDefault::detach_(self); #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::detach_", ""}).value(); return c10::Dispatcher::singleton().callUnboxedOnly( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self)), self); #endif } static inline int64_t size(const Tensor & self, int64_t dim) { #ifdef USE_STATIC_DISPATCH return TypeDefault::size(self, dim); #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::size", "int"}).value(); return c10::Dispatcher::singleton().callUnboxed( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self)), self, dim); #endif } #ifdef BUILD_NAMEDTENSOR static inline int64_t size(const Tensor & self, Dimname dim) { #ifdef USE_STATIC_DISPATCH return TypeDefault::size(self, dim); #else static auto table = globalATenDispatch().getOpTable("aten::size.Dimname(Tensor self, Dimname dim) -> int"); return table->callUnboxed(self, dim); #endif } #endif static inline Tensor slice(const Tensor & self, int64_t dim, int64_t start, int64_t end, int64_t step) { #ifdef USE_STATIC_DISPATCH return TypeDefault::slice(self, dim, start, end, step); #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::slice", "Tensor"}).value(); return c10::Dispatcher::singleton().callUnboxedOnly( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self)), self, dim, start, end, step); #endif } static inline std::tuple slogdet(const Tensor & self) { #ifdef USE_STATIC_DISPATCH return TypeDefault::slogdet(self); #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::slogdet", ""}).value(); return c10::Dispatcher::singleton().callUnboxedOnly, const Tensor &>( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self)), self); #endif } static inline Tensor smm(const Tensor & self, const Tensor & mat2) { #ifdef USE_STATIC_DISPATCH return TypeDefault::smm(self, mat2); #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::smm", ""}).value(); return c10::Dispatcher::singleton().callUnboxed( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self, mat2)), self, mat2); #endif } static inline Tensor softmax(const Tensor & self, int64_t dim, c10::optional dtype) { #ifdef USE_STATIC_DISPATCH return TypeDefault::softmax(self, dim, dtype); #else static auto table = globalATenDispatch().getOpTable("aten::softmax(Tensor self, int dim, ScalarType? dtype=None) -> Tensor"); return table->callUnboxed>(self, dim, dtype); #endif } #ifdef BUILD_NAMEDTENSOR static inline Tensor softmax(const Tensor & self, Dimname dim, c10::optional dtype) { #ifdef USE_STATIC_DISPATCH return TypeDefault::softmax(self, dim, dtype); #else static auto table = globalATenDispatch().getOpTable("aten::softmax(Tensor self, Dimname dim, *, ScalarType? dtype=None) -> Tensor"); return table->callUnboxed>(self, dim, dtype); #endif } #endif static inline Tensor _softmax(const Tensor & self, int64_t dim, bool half_to_float) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self)))) { case Backend::CPU: return CPUType::_softmax(self, dim, half_to_float); break; default: AT_ERROR("_softmax not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(self))); } #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::_softmax", ""}).value(); return c10::Dispatcher::singleton().callUnboxed( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self)), self, dim, half_to_float); #endif } static inline Tensor _softmax_backward_data(const Tensor & grad_output, const Tensor & output, int64_t dim, const Tensor & self) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(grad_output, output, self)))) { case Backend::CPU: return CPUType::_softmax_backward_data(grad_output, output, dim, self); break; default: AT_ERROR("_softmax_backward_data not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(grad_output, output, self))); } #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::_softmax_backward_data", ""}).value(); return c10::Dispatcher::singleton().callUnboxed( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(grad_output, output, self)), grad_output, output, dim, self); #endif } static inline std::vector split(const Tensor & self, int64_t split_size, int64_t dim) { #ifdef USE_STATIC_DISPATCH return TypeDefault::split(self, split_size, dim); #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::split", "Tensor"}).value(); return c10::Dispatcher::singleton().callUnboxedOnly, const Tensor &, int64_t, int64_t>( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self)), self, split_size, dim); #endif } static inline std::vector split_with_sizes(const Tensor & self, IntArrayRef split_sizes, int64_t dim) { #ifdef USE_STATIC_DISPATCH return TypeDefault::split_with_sizes(self, split_sizes, dim); #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::split_with_sizes", ""}).value(); return c10::Dispatcher::singleton().callUnboxedOnly, const Tensor &, IntArrayRef, int64_t>( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self)), self, split_sizes, dim); #endif } static inline Tensor squeeze(const Tensor & self) { #ifdef USE_STATIC_DISPATCH return TypeDefault::squeeze(self); #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::squeeze", ""}).value(); return c10::Dispatcher::singleton().callUnboxedOnly( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self)), self); #endif } static inline Tensor squeeze(const Tensor & self, int64_t dim) { #ifdef USE_STATIC_DISPATCH return TypeDefault::squeeze(self, dim); #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::squeeze", "dim"}).value(); return c10::Dispatcher::singleton().callUnboxedOnly( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self)), self, dim); #endif } #ifdef BUILD_NAMEDTENSOR static inline Tensor squeeze(const Tensor & self, Dimname dim) { #ifdef USE_STATIC_DISPATCH return TypeDefault::squeeze(self, dim); #else static auto table = globalATenDispatch().getOpTable("aten::squeeze.dimname(Tensor(a) self, Dimname dim) -> Tensor(a)"); return table->callUnboxed(self, dim); #endif } #endif static inline Tensor sspaddmm(const Tensor & self, const Tensor & mat1, const Tensor & mat2, Scalar beta, Scalar alpha) { #ifdef USE_STATIC_DISPATCH return TypeDefault::sspaddmm(self, mat1, mat2, beta, alpha); #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::sspaddmm", ""}).value(); return c10::Dispatcher::singleton().callUnboxed( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self, mat1, mat2)), self, mat1, mat2, beta, alpha); #endif } static inline Tensor & sspaddmm_out(Tensor & out, const Tensor & self, const Tensor & mat1, const Tensor & mat2, Scalar beta, Scalar alpha) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(out, self, mat1, mat2)))) { case Backend::CPU: return CPUType::sspaddmm_out(out, self, mat1, mat2, beta, alpha); break; case Backend::SparseCPU: return SparseCPUType::sspaddmm_out(out, self, mat1, mat2, beta, alpha); break; default: AT_ERROR("sspaddmm_out not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(out, self, mat1, mat2))); } #else static auto table = globalATenDispatch().getOpTable("aten::sspaddmm.out(Tensor self, Tensor mat1, Tensor mat2, *, Scalar beta=1, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!)"); return table->callUnboxed(out, self, mat1, mat2, beta, alpha); #endif } static inline Tensor stack(TensorList tensors, int64_t dim) { #ifdef USE_STATIC_DISPATCH return TypeDefault::stack(tensors, dim); #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::stack", ""}).value(); return c10::Dispatcher::singleton().callUnboxedOnly( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(tensors)), tensors, dim); #endif } static inline Tensor & stack_out(Tensor & out, TensorList tensors, int64_t dim) { #ifdef USE_STATIC_DISPATCH return TypeDefault::stack_out(out, tensors, dim); #else static auto table = globalATenDispatch().getOpTable("aten::stack.out(Tensor[] tensors, int dim=0, *, Tensor(a!) out) -> Tensor(a!)"); return table->callUnboxed(out, tensors, dim); #endif } static inline Tensor stft(const Tensor & self, int64_t n_fft, c10::optional hop_length, c10::optional win_length, const Tensor & window, bool normalized, bool onesided) { #ifdef USE_STATIC_DISPATCH return TypeDefault::stft(self, n_fft, hop_length, win_length, window, normalized, onesided); #else static auto table = globalATenDispatch().getOpTable("aten::stft(Tensor self, int n_fft, int? hop_length=None, int? win_length=None, Tensor? window=None, bool normalized=False, bool onesided=True) -> Tensor"); return table->callUnboxed, c10::optional, const Tensor &, bool, bool>(self, n_fft, hop_length, win_length, window, normalized, onesided); #endif } static inline int64_t stride(const Tensor & self, int64_t dim) { #ifdef USE_STATIC_DISPATCH return TypeDefault::stride(self, dim); #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::stride", "int"}).value(); return c10::Dispatcher::singleton().callUnboxed( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self)), self, dim); #endif } #ifdef BUILD_NAMEDTENSOR static inline int64_t stride(const Tensor & self, Dimname dim) { #ifdef USE_STATIC_DISPATCH return TypeDefault::stride(self, dim); #else static auto table = globalATenDispatch().getOpTable("aten::stride.Dimname(Tensor self, Dimname dim) -> int"); return table->callUnboxed(self, dim); #endif } #endif static inline Tensor sum(const Tensor & self, c10::optional dtype) { #ifdef USE_STATIC_DISPATCH return TypeDefault::sum(self, dtype); #else static auto table = globalATenDispatch().getOpTable("aten::sum(Tensor self, *, ScalarType? dtype=None) -> Tensor"); return table->callUnboxed>(self, dtype); #endif } static inline Tensor sum(const Tensor & self, IntArrayRef dim, bool keepdim, c10::optional dtype) { #ifdef USE_STATIC_DISPATCH return TypeDefault::sum(self, dim, keepdim, dtype); #else static auto table = globalATenDispatch().getOpTable("aten::sum.dim_IntList(Tensor self, int[1] dim, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor"); return table->callUnboxed>(self, dim, keepdim, dtype); #endif } #ifdef BUILD_NAMEDTENSOR static inline Tensor sum(const Tensor & self, DimnameList dim, bool keepdim, c10::optional dtype) { #ifdef USE_STATIC_DISPATCH return TypeDefault::sum(self, dim, keepdim, dtype); #else static auto table = globalATenDispatch().getOpTable("aten::sum.dim_DimnameList(Tensor self, Dimname[1] dim, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor"); return table->callUnboxed>(self, dim, keepdim, dtype); #endif } #endif static inline Tensor & sum_out(Tensor & out, const Tensor & self, IntArrayRef dim, bool keepdim, c10::optional dtype) { #ifdef USE_STATIC_DISPATCH return TypeDefault::sum_out(out, self, dim, keepdim, dtype); #else static auto table = globalATenDispatch().getOpTable("aten::sum.IntList_out(Tensor self, int[1] dim, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)"); return table->callUnboxed>(out, self, dim, keepdim, dtype); #endif } #ifdef BUILD_NAMEDTENSOR static inline Tensor & sum_out(Tensor & out, const Tensor & self, DimnameList dim, bool keepdim, c10::optional dtype) { #ifdef USE_STATIC_DISPATCH return TypeDefault::sum_out(out, self, dim, keepdim, dtype); #else static auto table = globalATenDispatch().getOpTable("aten::sum.DimnameList_out(Tensor self, Dimname[1] dim, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)"); return table->callUnboxed>(out, self, dim, keepdim, dtype); #endif } #endif static inline Tensor sqrt(const Tensor & self) { #ifdef USE_STATIC_DISPATCH return TypeDefault::sqrt(self); #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::sqrt", ""}).value(); return c10::Dispatcher::singleton().callUnboxed( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self)), self); #endif } static inline Tensor & sqrt_(Tensor & self) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self)))) { case Backend::CPU: return CPUType::sqrt_(self); break; default: AT_ERROR("sqrt_ not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(self))); } #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::sqrt_", ""}).value(); return c10::Dispatcher::singleton().callUnboxedOnly( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self)), self); #endif } static inline Tensor & sqrt_out(Tensor & out, const Tensor & self) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(out, self)))) { case Backend::CPU: return CPUType::sqrt_out(out, self); break; default: AT_ERROR("sqrt_out not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(out, self))); } #else static auto table = globalATenDispatch().getOpTable("aten::sqrt.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)"); return table->callUnboxed(out, self); #endif } static inline Tensor std(const Tensor & self, bool unbiased) { #ifdef USE_STATIC_DISPATCH return TypeDefault::std(self, unbiased); #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::std", ""}).value(); return c10::Dispatcher::singleton().callUnboxed( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self)), self, unbiased); #endif } static inline Tensor std(const Tensor & self, IntArrayRef dim, bool unbiased, bool keepdim) { #ifdef USE_STATIC_DISPATCH return TypeDefault::std(self, dim, unbiased, keepdim); #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::std", "dim"}).value(); return c10::Dispatcher::singleton().callUnboxedOnly( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self)), self, dim, unbiased, keepdim); #endif } static inline std::tuple std_mean(const Tensor & self, bool unbiased) { #ifdef USE_STATIC_DISPATCH return TypeDefault::std_mean(self, unbiased); #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::std_mean", ""}).value(); return c10::Dispatcher::singleton().callUnboxedOnly, const Tensor &, bool>( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self)), self, unbiased); #endif } static inline std::tuple std_mean(const Tensor & self, IntArrayRef dim, bool unbiased, bool keepdim) { #ifdef USE_STATIC_DISPATCH return TypeDefault::std_mean(self, dim, unbiased, keepdim); #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::std_mean", "dim"}).value(); return c10::Dispatcher::singleton().callUnboxedOnly, const Tensor &, IntArrayRef, bool, bool>( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self)), self, dim, unbiased, keepdim); #endif } #ifdef BUILD_NAMEDTENSOR static inline std::tuple std_mean(const Tensor & self, DimnameList dim, bool unbiased, bool keepdim) { #ifdef USE_STATIC_DISPATCH return TypeDefault::std_mean(self, dim, unbiased, keepdim); #else static auto table = globalATenDispatch().getOpTable("aten::std_mean.names_dim(Tensor self, Dimname[1] dim, bool unbiased=True, bool keepdim=False) -> (Tensor, Tensor)"); return table->callUnboxed, const Tensor &, DimnameList, bool, bool>(self, dim, unbiased, keepdim); #endif } #endif static inline Tensor & std_out(Tensor & out, const Tensor & self, IntArrayRef dim, bool unbiased, bool keepdim) { #ifdef USE_STATIC_DISPATCH return TypeDefault::std_out(out, self, dim, unbiased, keepdim); #else static auto table = globalATenDispatch().getOpTable("aten::std.out(Tensor self, int[1] dim, bool unbiased=True, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)"); return table->callUnboxed(out, self, dim, unbiased, keepdim); #endif } #ifdef BUILD_NAMEDTENSOR static inline Tensor std(const Tensor & self, DimnameList dim, bool unbiased, bool keepdim) { #ifdef USE_STATIC_DISPATCH return TypeDefault::std(self, dim, unbiased, keepdim); #else static auto table = globalATenDispatch().getOpTable("aten::std.names_dim(Tensor self, Dimname[1] dim, bool unbiased=True, bool keepdim=False) -> Tensor"); return table->callUnboxed(self, dim, unbiased, keepdim); #endif } #endif #ifdef BUILD_NAMEDTENSOR static inline Tensor & std_out(Tensor & out, const Tensor & self, DimnameList dim, bool unbiased, bool keepdim) { #ifdef USE_STATIC_DISPATCH return TypeDefault::std_out(out, self, dim, unbiased, keepdim); #else static auto table = globalATenDispatch().getOpTable("aten::std.names_out(Tensor self, Dimname[1] dim, bool unbiased=True, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)"); return table->callUnboxed(out, self, dim, unbiased, keepdim); #endif } #endif static inline Tensor prod(const Tensor & self, c10::optional dtype) { #ifdef USE_STATIC_DISPATCH return TypeDefault::prod(self, dtype); #else static auto table = globalATenDispatch().getOpTable("aten::prod(Tensor self, *, ScalarType? dtype=None) -> Tensor"); return table->callUnboxed>(self, dtype); #endif } static inline Tensor prod(const Tensor & self, int64_t dim, bool keepdim, c10::optional dtype) { #ifdef USE_STATIC_DISPATCH return TypeDefault::prod(self, dim, keepdim, dtype); #else static auto table = globalATenDispatch().getOpTable("aten::prod.dim_int(Tensor self, int dim, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor"); return table->callUnboxed>(self, dim, keepdim, dtype); #endif } static inline Tensor & prod_out(Tensor & out, const Tensor & self, int64_t dim, bool keepdim, c10::optional dtype) { #ifdef USE_STATIC_DISPATCH return TypeDefault::prod_out(out, self, dim, keepdim, dtype); #else static auto table = globalATenDispatch().getOpTable("aten::prod.int_out(Tensor self, int dim, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)"); return table->callUnboxed>(out, self, dim, keepdim, dtype); #endif } #ifdef BUILD_NAMEDTENSOR static inline Tensor prod(const Tensor & self, Dimname dim, bool keepdim, c10::optional dtype) { #ifdef USE_STATIC_DISPATCH return TypeDefault::prod(self, dim, keepdim, dtype); #else static auto table = globalATenDispatch().getOpTable("aten::prod.dim_Dimname(Tensor self, Dimname dim, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor"); return table->callUnboxed>(self, dim, keepdim, dtype); #endif } #endif #ifdef BUILD_NAMEDTENSOR static inline Tensor & prod_out(Tensor & out, const Tensor & self, Dimname dim, bool keepdim, c10::optional dtype) { #ifdef USE_STATIC_DISPATCH return TypeDefault::prod_out(out, self, dim, keepdim, dtype); #else static auto table = globalATenDispatch().getOpTable("aten::prod.Dimname_out(Tensor self, Dimname dim, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)"); return table->callUnboxed>(out, self, dim, keepdim, dtype); #endif } #endif static inline Tensor t(const Tensor & self) { #ifdef USE_STATIC_DISPATCH return TypeDefault::t(self); #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::t", ""}).value(); return c10::Dispatcher::singleton().callUnboxedOnly( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self)), self); #endif } static inline Tensor tan(const Tensor & self) { #ifdef USE_STATIC_DISPATCH return TypeDefault::tan(self); #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::tan", ""}).value(); return c10::Dispatcher::singleton().callUnboxed( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self)), self); #endif } static inline Tensor & tan_(Tensor & self) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self)))) { case Backend::CPU: return CPUType::tan_(self); break; default: AT_ERROR("tan_ not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(self))); } #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::tan_", ""}).value(); return c10::Dispatcher::singleton().callUnboxedOnly( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self)), self); #endif } static inline Tensor & tan_out(Tensor & out, const Tensor & self) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(out, self)))) { case Backend::CPU: return CPUType::tan_out(out, self); break; default: AT_ERROR("tan_out not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(out, self))); } #else static auto table = globalATenDispatch().getOpTable("aten::tan.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)"); return table->callUnboxed(out, self); #endif } static inline Tensor tanh(const Tensor & self) { #ifdef USE_STATIC_DISPATCH return TypeDefault::tanh(self); #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::tanh", ""}).value(); return c10::Dispatcher::singleton().callUnboxed( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self)), self); #endif } static inline Tensor & tanh_(Tensor & self) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self)))) { case Backend::CPU: return CPUType::tanh_(self); break; default: AT_ERROR("tanh_ not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(self))); } #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::tanh_", ""}).value(); return c10::Dispatcher::singleton().callUnboxedOnly( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self)), self); #endif } static inline Tensor & tanh_out(Tensor & out, const Tensor & self) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(out, self)))) { case Backend::CPU: return CPUType::tanh_out(out, self); break; default: AT_ERROR("tanh_out not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(out, self))); } #else static auto table = globalATenDispatch().getOpTable("aten::tanh.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)"); return table->callUnboxed(out, self); #endif } static inline Tensor tensordot(const Tensor & self, const Tensor & other, IntArrayRef dims_self, IntArrayRef dims_other) { #ifdef USE_STATIC_DISPATCH return TypeDefault::tensordot(self, other, dims_self, dims_other); #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::tensordot", ""}).value(); return c10::Dispatcher::singleton().callUnboxedOnly( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self, other)), self, other, dims_self, dims_other); #endif } static inline Tensor threshold(const Tensor & self, Scalar threshold, Scalar value) { #ifdef USE_STATIC_DISPATCH return TypeDefault::threshold(self, threshold, value); #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::threshold", ""}).value(); return c10::Dispatcher::singleton().callUnboxed( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self)), self, threshold, value); #endif } static inline Tensor & threshold_(Tensor & self, Scalar threshold, Scalar value) { #ifdef USE_STATIC_DISPATCH return TypeDefault::threshold_(self, threshold, value); #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::threshold_", ""}).value(); return c10::Dispatcher::singleton().callUnboxedOnly( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self)), self, threshold, value); #endif } static inline Tensor & threshold_out(Tensor & out, const Tensor & self, Scalar threshold, Scalar value) { #ifdef USE_STATIC_DISPATCH return TypeDefault::threshold_out(out, self, threshold, value); #else static auto table = globalATenDispatch().getOpTable("aten::threshold.out(Tensor self, Scalar threshold, Scalar value, *, Tensor(a!) out) -> Tensor(a!)"); return table->callUnboxed(out, self, threshold, value); #endif } static inline Tensor threshold_backward(const Tensor & grad_output, const Tensor & self, Scalar threshold) { #ifdef USE_STATIC_DISPATCH return TypeDefault::threshold_backward(grad_output, self, threshold); #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::threshold_backward", ""}).value(); return c10::Dispatcher::singleton().callUnboxed( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(grad_output, self)), grad_output, self, threshold); #endif } static inline Tensor transpose(const Tensor & self, int64_t dim0, int64_t dim1) { #ifdef USE_STATIC_DISPATCH return TypeDefault::transpose(self, dim0, dim1); #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::transpose", "int"}).value(); return c10::Dispatcher::singleton().callUnboxedOnly( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self)), self, dim0, dim1); #endif } #ifdef BUILD_NAMEDTENSOR static inline Tensor transpose(const Tensor & self, Dimname dim0, Dimname dim1) { #ifdef USE_STATIC_DISPATCH return TypeDefault::transpose(self, dim0, dim1); #else static auto table = globalATenDispatch().getOpTable("aten::transpose.Dimname(Tensor(a) self, Dimname dim0, Dimname dim1) -> Tensor(a)"); return table->callUnboxed(self, dim0, dim1); #endif } #endif static inline Tensor _mkldnn_transpose(const Tensor & self, int64_t dim0, int64_t dim1) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self)))) { default: AT_ERROR("_mkldnn_transpose not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(self))); } #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::_mkldnn_transpose", ""}).value(); return c10::Dispatcher::singleton().callUnboxed( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self)), self, dim0, dim1); #endif } static inline Tensor & _mkldnn_transpose_(Tensor & self, int64_t dim0, int64_t dim1) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self)))) { default: AT_ERROR("_mkldnn_transpose_ not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(self))); } #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::_mkldnn_transpose_", ""}).value(); return c10::Dispatcher::singleton().callUnboxedOnly( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self)), self, dim0, dim1); #endif } static inline Tensor one_hot(const Tensor & self, int64_t num_classes) { #ifdef USE_STATIC_DISPATCH return TypeDefault::one_hot(self, num_classes); #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::one_hot", ""}).value(); return c10::Dispatcher::singleton().callUnboxed( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self)), self, num_classes); #endif } static inline Tensor flip(const Tensor & self, IntArrayRef dims) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self)))) { case Backend::CPU: return CPUType::flip(self, dims); break; default: AT_ERROR("flip not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(self))); } #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::flip", ""}).value(); return c10::Dispatcher::singleton().callUnboxedOnly( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self)), self, dims); #endif } static inline Tensor roll(const Tensor & self, IntArrayRef shifts, IntArrayRef dims) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self)))) { case Backend::CPU: return CPUType::roll(self, shifts, dims); break; default: AT_ERROR("roll not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(self))); } #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::roll", ""}).value(); return c10::Dispatcher::singleton().callUnboxedOnly( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self)), self, shifts, dims); #endif } static inline Tensor rot90(const Tensor & self, int64_t k, IntArrayRef dims) { #ifdef USE_STATIC_DISPATCH return TypeDefault::rot90(self, k, dims); #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::rot90", ""}).value(); return c10::Dispatcher::singleton().callUnboxedOnly( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self)), self, k, dims); #endif } static inline Tensor trapz(const Tensor & y, const Tensor & x, int64_t dim) { #ifdef USE_STATIC_DISPATCH return TypeDefault::trapz(y, x, dim); #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::trapz", "x"}).value(); return c10::Dispatcher::singleton().callUnboxed( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(y, x)), y, x, dim); #endif } static inline Tensor trapz(const Tensor & y, double dx, int64_t dim) { #ifdef USE_STATIC_DISPATCH return TypeDefault::trapz(y, dx, dim); #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::trapz", "dx"}).value(); return c10::Dispatcher::singleton().callUnboxed( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(y)), y, dx, dim); #endif } static inline Tensor _trilinear(const Tensor & i1, const Tensor & i2, const Tensor & i3, IntArrayRef expand1, IntArrayRef expand2, IntArrayRef expand3, IntArrayRef sumdim, int64_t unroll_dim) { #ifdef USE_STATIC_DISPATCH return TypeDefault::_trilinear(i1, i2, i3, expand1, expand2, expand3, sumdim, unroll_dim); #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::_trilinear", ""}).value(); return c10::Dispatcher::singleton().callUnboxedOnly( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(i1, i2, i3)), i1, i2, i3, expand1, expand2, expand3, sumdim, unroll_dim); #endif } static inline Tensor triplet_margin_loss(const Tensor & anchor, const Tensor & positive, const Tensor & negative, double margin, double p, double eps, bool swap, int64_t reduction) { #ifdef USE_STATIC_DISPATCH return TypeDefault::triplet_margin_loss(anchor, positive, negative, margin, p, eps, swap, reduction); #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::triplet_margin_loss", ""}).value(); return c10::Dispatcher::singleton().callUnboxed( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(anchor, positive, negative)), anchor, positive, negative, margin, p, eps, swap, reduction); #endif } static inline Tensor trunc(const Tensor & self) { #ifdef USE_STATIC_DISPATCH return TypeDefault::trunc(self); #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::trunc", ""}).value(); return c10::Dispatcher::singleton().callUnboxed( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self)), self); #endif } static inline Tensor & trunc_(Tensor & self) { #ifdef USE_STATIC_DISPATCH return TypeDefault::trunc_(self); #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::trunc_", ""}).value(); return c10::Dispatcher::singleton().callUnboxedOnly( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self)), self); #endif } static inline Tensor & trunc_out(Tensor & out, const Tensor & self) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(out, self)))) { case Backend::CPU: return CPUType::trunc_out(out, self); break; default: AT_ERROR("trunc_out not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(out, self))); } #else static auto table = globalATenDispatch().getOpTable("aten::trunc.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)"); return table->callUnboxed(out, self); #endif } static inline bool _has_compatible_shallow_copy_type(const Tensor & self, const Tensor & from) { #ifdef USE_STATIC_DISPATCH return TypeDefault::_has_compatible_shallow_copy_type(self, from); #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::_has_compatible_shallow_copy_type", ""}).value(); return c10::Dispatcher::singleton().callUnboxed( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self, from)), self, from); #endif } static inline std::tuple _unique(const Tensor & self, bool sorted, bool return_inverse) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self)))) { case Backend::CPU: return CPUType::_unique(self, sorted, return_inverse); break; default: AT_ERROR("_unique not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(self))); } #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::_unique", ""}).value(); return c10::Dispatcher::singleton().callUnboxedOnly, const Tensor &, bool, bool>( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self)), self, sorted, return_inverse); #endif } static inline std::tuple unique_dim(const Tensor & self, int64_t dim, bool sorted, bool return_inverse, bool return_counts) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self)))) { case Backend::CPU: return CPUType::unique_dim(self, dim, sorted, return_inverse, return_counts); break; default: AT_ERROR("unique_dim not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(self))); } #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::unique_dim", ""}).value(); return c10::Dispatcher::singleton().callUnboxedOnly, const Tensor &, int64_t, bool, bool, bool>( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self)), self, dim, sorted, return_inverse, return_counts); #endif } static inline std::tuple unique_consecutive(const Tensor & self, bool return_inverse, bool return_counts, c10::optional dim) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self)))) { case Backend::CPU: return CPUType::unique_consecutive(self, return_inverse, return_counts, dim); break; default: AT_ERROR("unique_consecutive not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(self))); } #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::unique_consecutive", ""}).value(); return c10::Dispatcher::singleton().callUnboxedOnly, const Tensor &, bool, bool, c10::optional>( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self)), self, return_inverse, return_counts, dim); #endif } static inline std::tuple unique_dim_consecutive(const Tensor & self, int64_t dim, bool return_inverse, bool return_counts) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self)))) { case Backend::CPU: return CPUType::unique_dim_consecutive(self, dim, return_inverse, return_counts); break; default: AT_ERROR("unique_dim_consecutive not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(self))); } #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::unique_dim_consecutive", ""}).value(); return c10::Dispatcher::singleton().callUnboxedOnly, const Tensor &, int64_t, bool, bool>( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self)), self, dim, return_inverse, return_counts); #endif } static inline std::tuple _unique2(const Tensor & self, bool sorted, bool return_inverse, bool return_counts) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self)))) { case Backend::CPU: return CPUType::_unique2(self, sorted, return_inverse, return_counts); break; default: AT_ERROR("_unique2 not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(self))); } #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::_unique2", ""}).value(); return c10::Dispatcher::singleton().callUnboxedOnly, const Tensor &, bool, bool, bool>( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self)), self, sorted, return_inverse, return_counts); #endif } static inline Tensor _unsafe_view(const Tensor & self, IntArrayRef size) { #ifdef USE_STATIC_DISPATCH return TypeDefault::_unsafe_view(self, size); #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::_unsafe_view", ""}).value(); return c10::Dispatcher::singleton().callUnboxedOnly( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self)), self, size); #endif } static inline Tensor unsqueeze(const Tensor & self, int64_t dim) { #ifdef USE_STATIC_DISPATCH return TypeDefault::unsqueeze(self, dim); #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::unsqueeze", ""}).value(); return c10::Dispatcher::singleton().callUnboxedOnly( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self)), self, dim); #endif } static inline Tensor var(const Tensor & self, bool unbiased) { #ifdef USE_STATIC_DISPATCH return TypeDefault::var(self, unbiased); #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::var", ""}).value(); return c10::Dispatcher::singleton().callUnboxed( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self)), self, unbiased); #endif } static inline Tensor var(const Tensor & self, IntArrayRef dim, bool unbiased, bool keepdim) { #ifdef USE_STATIC_DISPATCH return TypeDefault::var(self, dim, unbiased, keepdim); #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::var", "dim"}).value(); return c10::Dispatcher::singleton().callUnboxedOnly( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self)), self, dim, unbiased, keepdim); #endif } static inline Tensor & var_out(Tensor & out, const Tensor & self, IntArrayRef dim, bool unbiased, bool keepdim) { #ifdef USE_STATIC_DISPATCH return TypeDefault::var_out(out, self, dim, unbiased, keepdim); #else static auto table = globalATenDispatch().getOpTable("aten::var.out(Tensor self, int[1] dim, bool unbiased=True, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)"); return table->callUnboxed(out, self, dim, unbiased, keepdim); #endif } #ifdef BUILD_NAMEDTENSOR static inline Tensor var(const Tensor & self, DimnameList dim, bool unbiased, bool keepdim) { #ifdef USE_STATIC_DISPATCH return TypeDefault::var(self, dim, unbiased, keepdim); #else static auto table = globalATenDispatch().getOpTable("aten::var.names_dim(Tensor self, Dimname[1] dim, bool unbiased=True, bool keepdim=False) -> Tensor"); return table->callUnboxed(self, dim, unbiased, keepdim); #endif } #endif #ifdef BUILD_NAMEDTENSOR static inline Tensor & var_out(Tensor & out, const Tensor & self, DimnameList dim, bool unbiased, bool keepdim) { #ifdef USE_STATIC_DISPATCH return TypeDefault::var_out(out, self, dim, unbiased, keepdim); #else static auto table = globalATenDispatch().getOpTable("aten::var.names_out(Tensor self, Dimname[1] dim, bool unbiased=True, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)"); return table->callUnboxed(out, self, dim, unbiased, keepdim); #endif } #endif static inline std::tuple var_mean(const Tensor & self, bool unbiased) { #ifdef USE_STATIC_DISPATCH return TypeDefault::var_mean(self, unbiased); #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::var_mean", ""}).value(); return c10::Dispatcher::singleton().callUnboxedOnly, const Tensor &, bool>( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self)), self, unbiased); #endif } static inline std::tuple var_mean(const Tensor & self, IntArrayRef dim, bool unbiased, bool keepdim) { #ifdef USE_STATIC_DISPATCH return TypeDefault::var_mean(self, dim, unbiased, keepdim); #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::var_mean", "dim"}).value(); return c10::Dispatcher::singleton().callUnboxedOnly, const Tensor &, IntArrayRef, bool, bool>( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self)), self, dim, unbiased, keepdim); #endif } #ifdef BUILD_NAMEDTENSOR static inline std::tuple var_mean(const Tensor & self, DimnameList dim, bool unbiased, bool keepdim) { #ifdef USE_STATIC_DISPATCH return TypeDefault::var_mean(self, dim, unbiased, keepdim); #else static auto table = globalATenDispatch().getOpTable("aten::var_mean.names_dim(Tensor self, Dimname[1] dim, bool unbiased=True, bool keepdim=False) -> (Tensor, Tensor)"); return table->callUnboxed, const Tensor &, DimnameList, bool, bool>(self, dim, unbiased, keepdim); #endif } #endif static inline Tensor where(const Tensor & condition, const Tensor & self, const Tensor & other) { #ifdef USE_STATIC_DISPATCH return TypeDefault::where(condition, self, other); #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::where", "self"}).value(); return c10::Dispatcher::singleton().callUnboxed( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(condition, self, other)), condition, self, other); #endif } static inline std::vector where(const Tensor & condition) { #ifdef USE_STATIC_DISPATCH return TypeDefault::where(condition); #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::where", ""}).value(); return c10::Dispatcher::singleton().callUnboxedOnly, const Tensor &>( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(condition)), condition); #endif } static inline Tensor _s_where(const Tensor & condition, const Tensor & self, const Tensor & other) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(condition, self, other)))) { case Backend::CPU: return CPUType::_s_where(condition, self, other); break; default: AT_ERROR("_s_where not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(condition, self, other))); } #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::_s_where", ""}).value(); return c10::Dispatcher::singleton().callUnboxed( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(condition, self, other)), condition, self, other); #endif } static inline Tensor norm_except_dim(const Tensor & v, int64_t pow, int64_t dim) { #ifdef USE_STATIC_DISPATCH return TypeDefault::norm_except_dim(v, pow, dim); #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::norm_except_dim", ""}).value(); return c10::Dispatcher::singleton().callUnboxedOnly( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(v)), v, pow, dim); #endif } static inline Tensor _weight_norm(const Tensor & v, const Tensor & g, int64_t dim) { #ifdef USE_STATIC_DISPATCH return TypeDefault::_weight_norm(v, g, dim); #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::_weight_norm", ""}).value(); return c10::Dispatcher::singleton().callUnboxed( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(v, g)), v, g, dim); #endif } static inline std::tuple _weight_norm_cuda_interface(const Tensor & v, const Tensor & g, int64_t dim) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(v, g)))) { default: AT_ERROR("_weight_norm_cuda_interface not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(v, g))); } #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::_weight_norm_cuda_interface", ""}).value(); return c10::Dispatcher::singleton().callUnboxedOnly, const Tensor &, const Tensor &, int64_t>( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(v, g)), v, g, dim); #endif } static inline std::tuple _weight_norm_cuda_interface_backward(const Tensor & grad_w, const Tensor & saved_v, const Tensor & saved_g, const Tensor & saved_norms, int64_t dim) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(grad_w, saved_v, saved_g, saved_norms)))) { default: AT_ERROR("_weight_norm_cuda_interface_backward not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(grad_w, saved_v, saved_g, saved_norms))); } #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::_weight_norm_cuda_interface_backward", ""}).value(); return c10::Dispatcher::singleton().callUnboxedOnly, const Tensor &, const Tensor &, const Tensor &, const Tensor &, int64_t>( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(grad_w, saved_v, saved_g, saved_norms)), grad_w, saved_v, saved_g, saved_norms, dim); #endif } static inline std::tuple _weight_norm_differentiable_backward(const Tensor & grad_w, const Tensor & saved_v, const Tensor & saved_g, const Tensor & saved_norms, int64_t dim) { #ifdef USE_STATIC_DISPATCH return TypeDefault::_weight_norm_differentiable_backward(grad_w, saved_v, saved_g, saved_norms, dim); #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::_weight_norm_differentiable_backward", ""}).value(); return c10::Dispatcher::singleton().callUnboxedOnly, const Tensor &, const Tensor &, const Tensor &, const Tensor &, int64_t>( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(grad_w, saved_v, saved_g, saved_norms)), grad_w, saved_v, saved_g, saved_norms, dim); #endif } #ifdef BUILD_NAMEDTENSOR static inline Tensor zeros(IntArrayRef size, c10::optional names, const TensorOptions & options) { #ifdef USE_STATIC_DISPATCH return TypeDefault::zeros(size, names, options); #else globalLegacyTypeDispatch().initForTensorTypeSet(at::detail::multi_dispatch_tensor_type_set(options)); static auto table = globalATenDispatch().getOpTable("aten::zeros.names(int[] size, *, Dimname[]? names, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor"); return table->callUnboxed, const TensorOptions &>(size, names, options); #endif } #endif static inline Tensor zeros(IntArrayRef size, const TensorOptions & options) { #ifdef USE_STATIC_DISPATCH return TypeDefault::zeros(size, options); #else globalLegacyTypeDispatch().initForTensorTypeSet(at::detail::multi_dispatch_tensor_type_set(options)); static auto table = globalATenDispatch().getOpTable("aten::zeros(int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor"); return table->callUnboxed(size, options); #endif } static inline Tensor & zeros_out(Tensor & out, IntArrayRef size) { #ifdef USE_STATIC_DISPATCH return TypeDefault::zeros_out(out, size); #else static auto table = globalATenDispatch().getOpTable("aten::zeros.out(int[] size, *, Tensor(a!) out) -> Tensor(a!)"); return table->callUnboxed(out, size); #endif } static inline Tensor zeros_like(const Tensor & self) { #ifdef USE_STATIC_DISPATCH return TypeDefault::zeros_like(self); #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::zeros_like", ""}).value(); return c10::Dispatcher::singleton().callUnboxed( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self)), self); #endif } static inline Tensor zeros_like(const Tensor & self, const TensorOptions & options) { #ifdef USE_STATIC_DISPATCH return TypeDefault::zeros_like(self, options); #else globalLegacyTypeDispatch().initForTensorTypeSet(at::detail::multi_dispatch_tensor_type_set(self, options)); static auto table = globalATenDispatch().getOpTable("aten::zeros_like.dtype(Tensor self, *, ScalarType dtype, Layout layout, Device device, bool pin_memory=False) -> Tensor"); return table->callUnboxed(self, options); #endif } static inline Tensor _standard_gamma_grad(const Tensor & self, const Tensor & output) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self, output)))) { case Backend::CPU: return CPUType::_standard_gamma_grad(self, output); break; default: AT_ERROR("_standard_gamma_grad not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(self, output))); } #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::_standard_gamma_grad", ""}).value(); return c10::Dispatcher::singleton().callUnboxed( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self, output)), self, output); #endif } static inline Tensor _standard_gamma(const Tensor & self, Generator * generator) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self)))) { case Backend::CPU: return CPUType::_standard_gamma(self, generator); break; default: AT_ERROR("_standard_gamma not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(self))); } #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::_standard_gamma", ""}).value(); return c10::Dispatcher::singleton().callUnboxedOnly( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self)), self, generator); #endif } static inline Tensor _dirichlet_grad(const Tensor & x, const Tensor & alpha, const Tensor & total) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(x, alpha, total)))) { case Backend::CPU: return CPUType::_dirichlet_grad(x, alpha, total); break; default: AT_ERROR("_dirichlet_grad not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(x, alpha, total))); } #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::_dirichlet_grad", ""}).value(); return c10::Dispatcher::singleton().callUnboxed( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(x, alpha, total)), x, alpha, total); #endif } static inline Tensor _sample_dirichlet(const Tensor & self, Generator * generator) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self)))) { case Backend::CPU: return CPUType::_sample_dirichlet(self, generator); break; default: AT_ERROR("_sample_dirichlet not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(self))); } #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::_sample_dirichlet", ""}).value(); return c10::Dispatcher::singleton().callUnboxedOnly( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self)), self, generator); #endif } static inline Tensor poisson(const Tensor & self, Generator * generator) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self)))) { case Backend::CPU: return CPUType::poisson(self, generator); break; default: AT_ERROR("poisson not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(self))); } #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::poisson", ""}).value(); return c10::Dispatcher::singleton().callUnboxedOnly( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self)), self, generator); #endif } static inline Tensor native_norm(const Tensor & self, Scalar p) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self)))) { case Backend::SparseCPU: return SparseCPUType::native_norm(self, p); break; default: AT_ERROR("native_norm not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(self))); } #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::native_norm", ""}).value(); return c10::Dispatcher::singleton().callUnboxed( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self)), self, p); #endif } static inline Tensor _sparse_sum(const Tensor & self) { #ifdef USE_STATIC_DISPATCH return TypeDefault::_sparse_sum(self); #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::_sparse_sum", ""}).value(); return c10::Dispatcher::singleton().callUnboxed( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self)), self); #endif } static inline Tensor _sparse_sum(const Tensor & self, ScalarType dtype) { #ifdef USE_STATIC_DISPATCH return TypeDefault::_sparse_sum(self, dtype); #else static auto table = globalATenDispatch().getOpTable("aten::_sparse_sum.dtype(Tensor self, *, ScalarType dtype) -> Tensor"); return table->callUnboxed(self, dtype); #endif } static inline Tensor _sparse_sum(const Tensor & self, IntArrayRef dim) { #ifdef USE_STATIC_DISPATCH return TypeDefault::_sparse_sum(self, dim); #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::_sparse_sum", "dim"}).value(); return c10::Dispatcher::singleton().callUnboxedOnly( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self)), self, dim); #endif } static inline Tensor _sparse_sum(const Tensor & self, IntArrayRef dim, ScalarType dtype) { #ifdef USE_STATIC_DISPATCH return TypeDefault::_sparse_sum(self, dim, dtype); #else static auto table = globalATenDispatch().getOpTable("aten::_sparse_sum.dim_dtype(Tensor self, int[1] dim, *, ScalarType dtype) -> Tensor"); return table->callUnboxed(self, dim, dtype); #endif } static inline Tensor _sparse_sum_backward(const Tensor & grad, const Tensor & self, IntArrayRef dim) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(grad, self)))) { case Backend::SparseCPU: return SparseCPUType::_sparse_sum_backward(grad, self, dim); break; default: AT_ERROR("_sparse_sum_backward not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(grad, self))); } #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::_sparse_sum_backward", ""}).value(); return c10::Dispatcher::singleton().callUnboxedOnly( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(grad, self)), grad, self, dim); #endif } static inline Tensor norm(const Tensor & self, c10::optional p, ScalarType dtype) { #ifdef USE_STATIC_DISPATCH return TypeDefault::norm(self, p, dtype); #else static auto table = globalATenDispatch().getOpTable("aten::norm.ScalarOpt_dtype(Tensor self, Scalar? p, *, ScalarType dtype) -> Tensor"); return table->callUnboxed, ScalarType>(self, p, dtype); #endif } static inline Tensor norm(const Tensor & self, Scalar p) { #ifdef USE_STATIC_DISPATCH return TypeDefault::norm(self, p); #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::norm", "Scalar"}).value(); return c10::Dispatcher::singleton().callUnboxed( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self)), self, p); #endif } static inline Tensor norm(const Tensor & self, c10::optional p, IntArrayRef dim, bool keepdim, ScalarType dtype) { #ifdef USE_STATIC_DISPATCH return TypeDefault::norm(self, p, dim, keepdim, dtype); #else static auto table = globalATenDispatch().getOpTable("aten::norm.ScalarOpt_dim_dtype(Tensor self, Scalar? p, int[1] dim, bool keepdim, *, ScalarType dtype) -> Tensor"); return table->callUnboxed, IntArrayRef, bool, ScalarType>(self, p, dim, keepdim, dtype); #endif } static inline Tensor norm(const Tensor & self, c10::optional p, IntArrayRef dim, bool keepdim) { #ifdef USE_STATIC_DISPATCH return TypeDefault::norm(self, p, dim, keepdim); #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::norm", "ScalarOpt_dim"}).value(); return c10::Dispatcher::singleton().callUnboxedOnly, IntArrayRef, bool>( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self)), self, p, dim, keepdim); #endif } static inline Tensor & norm_out(Tensor & out, const Tensor & self, c10::optional p, IntArrayRef dim, bool keepdim, ScalarType dtype) { #ifdef USE_STATIC_DISPATCH return TypeDefault::norm_out(out, self, p, dim, keepdim, dtype); #else static auto table = globalATenDispatch().getOpTable("aten::norm.dtype_out(Tensor self, Scalar? p, int[1] dim, bool keepdim, *, ScalarType dtype, Tensor(a!) out) -> Tensor(a!)"); return table->callUnboxed, IntArrayRef, bool, ScalarType>(out, self, p, dim, keepdim, dtype); #endif } static inline Tensor & norm_out(Tensor & out, const Tensor & self, c10::optional p, IntArrayRef dim, bool keepdim) { #ifdef USE_STATIC_DISPATCH return TypeDefault::norm_out(out, self, p, dim, keepdim); #else static auto table = globalATenDispatch().getOpTable("aten::norm.out(Tensor self, Scalar? p, int[1] dim, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)"); return table->callUnboxed, IntArrayRef, bool>(out, self, p, dim, keepdim); #endif } #ifdef BUILD_NAMEDTENSOR static inline Tensor norm(const Tensor & self, c10::optional p, DimnameList dim, bool keepdim, ScalarType dtype) { #ifdef USE_STATIC_DISPATCH return TypeDefault::norm(self, p, dim, keepdim, dtype); #else static auto table = globalATenDispatch().getOpTable("aten::norm.names_ScalarOpt_dim_dtype(Tensor self, Scalar? p, Dimname[1] dim, bool keepdim, *, ScalarType dtype) -> Tensor"); return table->callUnboxed, DimnameList, bool, ScalarType>(self, p, dim, keepdim, dtype); #endif } #endif #ifdef BUILD_NAMEDTENSOR static inline Tensor norm(const Tensor & self, c10::optional p, DimnameList dim, bool keepdim) { #ifdef USE_STATIC_DISPATCH return TypeDefault::norm(self, p, dim, keepdim); #else static auto table = globalATenDispatch().getOpTable("aten::norm.names_ScalarOpt_dim(Tensor self, Scalar? p, Dimname[1] dim, bool keepdim=False) -> Tensor"); return table->callUnboxed, DimnameList, bool>(self, p, dim, keepdim); #endif } #endif #ifdef BUILD_NAMEDTENSOR static inline Tensor & norm_out(Tensor & out, const Tensor & self, c10::optional p, DimnameList dim, bool keepdim, ScalarType dtype) { #ifdef USE_STATIC_DISPATCH return TypeDefault::norm_out(out, self, p, dim, keepdim, dtype); #else static auto table = globalATenDispatch().getOpTable("aten::norm.names_dtype_out(Tensor self, Scalar? p, Dimname[1] dim, bool keepdim, *, ScalarType dtype, Tensor(a!) out) -> Tensor(a!)"); return table->callUnboxed, DimnameList, bool, ScalarType>(out, self, p, dim, keepdim, dtype); #endif } #endif #ifdef BUILD_NAMEDTENSOR static inline Tensor & norm_out(Tensor & out, const Tensor & self, c10::optional p, DimnameList dim, bool keepdim) { #ifdef USE_STATIC_DISPATCH return TypeDefault::norm_out(out, self, p, dim, keepdim); #else static auto table = globalATenDispatch().getOpTable("aten::norm.names_out(Tensor self, Scalar? p, Dimname[1] dim, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)"); return table->callUnboxed, DimnameList, bool>(out, self, p, dim, keepdim); #endif } #endif static inline Tensor frobenius_norm(const Tensor & self) { #ifdef USE_STATIC_DISPATCH return TypeDefault::frobenius_norm(self); #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::frobenius_norm", ""}).value(); return c10::Dispatcher::singleton().callUnboxed( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self)), self); #endif } static inline Tensor frobenius_norm(const Tensor & self, IntArrayRef dim, bool keepdim) { #ifdef USE_STATIC_DISPATCH return TypeDefault::frobenius_norm(self, dim, keepdim); #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::frobenius_norm", "dim"}).value(); return c10::Dispatcher::singleton().callUnboxedOnly( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self)), self, dim, keepdim); #endif } static inline Tensor & frobenius_norm_out(Tensor & out, const Tensor & self, IntArrayRef dim, bool keepdim) { #ifdef USE_STATIC_DISPATCH return TypeDefault::frobenius_norm_out(out, self, dim, keepdim); #else static auto table = globalATenDispatch().getOpTable("aten::frobenius_norm.out(Tensor self, int[1] dim, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)"); return table->callUnboxed(out, self, dim, keepdim); #endif } static inline Tensor nuclear_norm(const Tensor & self, bool keepdim) { #ifdef USE_STATIC_DISPATCH return TypeDefault::nuclear_norm(self, keepdim); #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::nuclear_norm", ""}).value(); return c10::Dispatcher::singleton().callUnboxed( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self)), self, keepdim); #endif } static inline Tensor & nuclear_norm_out(Tensor & out, const Tensor & self, bool keepdim) { #ifdef USE_STATIC_DISPATCH return TypeDefault::nuclear_norm_out(out, self, keepdim); #else static auto table = globalATenDispatch().getOpTable("aten::nuclear_norm.out(Tensor self, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)"); return table->callUnboxed(out, self, keepdim); #endif } static inline Tensor nuclear_norm(const Tensor & self, IntArrayRef dim, bool keepdim) { #ifdef USE_STATIC_DISPATCH return TypeDefault::nuclear_norm(self, dim, keepdim); #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::nuclear_norm", "dim"}).value(); return c10::Dispatcher::singleton().callUnboxedOnly( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self)), self, dim, keepdim); #endif } static inline Tensor & nuclear_norm_out(Tensor & out, const Tensor & self, IntArrayRef dim, bool keepdim) { #ifdef USE_STATIC_DISPATCH return TypeDefault::nuclear_norm_out(out, self, dim, keepdim); #else static auto table = globalATenDispatch().getOpTable("aten::nuclear_norm.dim_out(Tensor self, int[2] dim, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)"); return table->callUnboxed(out, self, dim, keepdim); #endif } static inline Tensor clone(const Tensor & self) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self)))) { case Backend::CPU: return CPUType::clone(self); break; case Backend::QuantizedCPU: return QuantizedCPUType::clone(self); break; case Backend::SparseCPU: return SparseCPUType::clone(self); break; default: AT_ERROR("clone not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(self))); } #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::clone", ""}).value(); return c10::Dispatcher::singleton().callUnboxed( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self)), self); #endif } static inline Tensor & resize_as_(Tensor & self, const Tensor & the_template) { #ifdef USE_STATIC_DISPATCH return TypeDefault::resize_as_(self, the_template); #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::resize_as_", ""}).value(); return c10::Dispatcher::singleton().callUnboxedOnly( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self, the_template)), self, the_template); #endif } static inline Tensor & pow_out(Tensor & out, const Tensor & self, Scalar exponent) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(out, self)))) { case Backend::CPU: return CPUType::pow_out(out, self, exponent); break; case Backend::SparseCPU: return SparseCPUType::pow_out(out, self, exponent); break; default: AT_ERROR("pow_out not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(out, self))); } #else static auto table = globalATenDispatch().getOpTable("aten::pow.Tensor_Scalar_out(Tensor self, Scalar exponent, *, Tensor(a!) out) -> Tensor(a!)"); return table->callUnboxed(out, self, exponent); #endif } static inline Tensor pow(const Tensor & self, Scalar exponent) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self)))) { case Backend::CPU: return CPUType::pow(self, exponent); break; case Backend::SparseCPU: return SparseCPUType::pow(self, exponent); break; default: AT_ERROR("pow not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(self))); } #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::pow", "Tensor_Scalar"}).value(); return c10::Dispatcher::singleton().callUnboxed( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self)), self, exponent); #endif } static inline Tensor & zero_(Tensor & self) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self)))) { case Backend::CPU: return CPUType::zero_(self); break; case Backend::SparseCPU: return SparseCPUType::zero_(self); break; default: AT_ERROR("zero_ not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(self))); } #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::zero_", ""}).value(); return c10::Dispatcher::singleton().callUnboxedOnly( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self)), self); #endif } static inline Tensor & sub_out(Tensor & out, const Tensor & self, const Tensor & other, Scalar alpha) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(out, self, other)))) { case Backend::CPU: return CPUType::sub_out(out, self, other, alpha); break; case Backend::SparseCPU: return SparseCPUType::sub_out(out, self, other, alpha); break; default: AT_ERROR("sub_out not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(out, self, other))); } #else static auto table = globalATenDispatch().getOpTable("aten::sub.out(Tensor self, Tensor other, *, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!)"); return table->callUnboxed(out, self, other, alpha); #endif } static inline Tensor sub(const Tensor & self, const Tensor & other, Scalar alpha) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self, other)))) { case Backend::CPU: return CPUType::sub(self, other, alpha); break; case Backend::SparseCPU: return SparseCPUType::sub(self, other, alpha); break; default: AT_ERROR("sub not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(self, other))); } #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::sub", "Tensor"}).value(); return c10::Dispatcher::singleton().callUnboxed( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self, other)), self, other, alpha); #endif } static inline Tensor sub(const Tensor & self, Scalar other, Scalar alpha) { #ifdef USE_STATIC_DISPATCH return TypeDefault::sub(self, other, alpha); #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::sub", "Scalar"}).value(); return c10::Dispatcher::singleton().callUnboxed( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self)), self, other, alpha); #endif } static inline Tensor rsub(const Tensor & self, const Tensor & other, Scalar alpha) { #ifdef USE_STATIC_DISPATCH return TypeDefault::rsub(self, other, alpha); #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::rsub", "Tensor"}).value(); return c10::Dispatcher::singleton().callUnboxed( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self, other)), self, other, alpha); #endif } static inline Tensor rsub(const Tensor & self, Scalar other, Scalar alpha) { #ifdef USE_STATIC_DISPATCH return TypeDefault::rsub(self, other, alpha); #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::rsub", "Scalar"}).value(); return c10::Dispatcher::singleton().callUnboxed( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self)), self, other, alpha); #endif } static inline Tensor _sparse_addmm(const Tensor & self, const Tensor & sparse, const Tensor & dense, Scalar beta, Scalar alpha) { #ifdef USE_STATIC_DISPATCH return TypeDefault::_sparse_addmm(self, sparse, dense, beta, alpha); #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::_sparse_addmm", ""}).value(); return c10::Dispatcher::singleton().callUnboxed( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self, sparse, dense)), self, sparse, dense, beta, alpha); #endif } static inline Tensor & addmm_out(Tensor & out, const Tensor & self, const Tensor & mat1, const Tensor & mat2, Scalar beta, Scalar alpha) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(out, self, mat1, mat2)))) { case Backend::CPU: return CPUType::addmm_out(out, self, mat1, mat2, beta, alpha); break; case Backend::SparseCPU: return SparseCPUType::addmm_out(out, self, mat1, mat2, beta, alpha); break; default: AT_ERROR("addmm_out not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(out, self, mat1, mat2))); } #else static auto table = globalATenDispatch().getOpTable("aten::addmm.out(Tensor self, Tensor mat1, Tensor mat2, *, Scalar beta=1, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!)"); return table->callUnboxed(out, self, mat1, mat2, beta, alpha); #endif } static inline Tensor addmm(const Tensor & self, const Tensor & mat1, const Tensor & mat2, Scalar beta, Scalar alpha) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self, mat1, mat2)))) { case Backend::CPU: return CPUType::addmm(self, mat1, mat2, beta, alpha); break; case Backend::SparseCPU: return SparseCPUType::addmm(self, mat1, mat2, beta, alpha); break; default: AT_ERROR("addmm not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(self, mat1, mat2))); } #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::addmm", ""}).value(); return c10::Dispatcher::singleton().callUnboxed( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self, mat1, mat2)), self, mat1, mat2, beta, alpha); #endif } static inline Tensor sparse_coo_tensor(IntArrayRef size, const TensorOptions & options) { #ifdef USE_STATIC_DISPATCH return TypeDefault::sparse_coo_tensor(size, options); #else globalLegacyTypeDispatch().initForTensorTypeSet(at::detail::multi_dispatch_tensor_type_set(options)); static auto table = globalATenDispatch().getOpTable("aten::sparse_coo_tensor.size(int[] size, *, ScalarType dtype, Layout layout, Device device, bool pin_memory=False) -> Tensor"); return table->callUnboxed(size, options); #endif } static inline Tensor sparse_coo_tensor(const Tensor & indices, const Tensor & values, const TensorOptions & options) { #ifdef USE_STATIC_DISPATCH return TypeDefault::sparse_coo_tensor(indices, values, options); #else globalLegacyTypeDispatch().initForTensorTypeSet(at::detail::multi_dispatch_tensor_type_set(indices, values, options)); static auto table = globalATenDispatch().getOpTable("aten::sparse_coo_tensor.indices(Tensor indices, Tensor values, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor"); return table->callUnboxed(indices, values, options); #endif } static inline Tensor sparse_coo_tensor(const Tensor & indices, const Tensor & values, IntArrayRef size, const TensorOptions & options) { #ifdef USE_STATIC_DISPATCH return TypeDefault::sparse_coo_tensor(indices, values, size, options); #else globalLegacyTypeDispatch().initForTensorTypeSet(at::detail::multi_dispatch_tensor_type_set(indices, values, options)); static auto table = globalATenDispatch().getOpTable("aten::sparse_coo_tensor.indices_size(Tensor indices, Tensor values, int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor"); return table->callUnboxed(indices, values, size, options); #endif } static inline Tensor _sparse_coo_tensor_unsafe(const Tensor & indices, const Tensor & values, IntArrayRef size, const TensorOptions & options) { #ifdef USE_STATIC_DISPATCH return TypeDefault::_sparse_coo_tensor_unsafe(indices, values, size, options); #else globalLegacyTypeDispatch().initForTensorTypeSet(at::detail::multi_dispatch_tensor_type_set(indices, values, options)); static auto table = globalATenDispatch().getOpTable("aten::_sparse_coo_tensor_unsafe(Tensor indices, Tensor values, int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor"); return table->callUnboxed(indices, values, size, options); #endif } static inline Tensor _sparse_coo_tensor_with_dims(int64_t sparse_dim, int64_t dense_dim, IntArrayRef size, const TensorOptions & options) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(options)))) { case Backend::SparseCPU: return SparseCPUType::_sparse_coo_tensor_with_dims(sparse_dim, dense_dim, size, options); break; default: AT_ERROR("_sparse_coo_tensor_with_dims not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(options))); } #else globalLegacyTypeDispatch().initForTensorTypeSet(at::detail::multi_dispatch_tensor_type_set(options)); static auto table = globalATenDispatch().getOpTable("aten::_sparse_coo_tensor_with_dims(int sparse_dim, int dense_dim, int[] size, *, ScalarType dtype, Layout layout, Device device, bool pin_memory=False) -> Tensor"); return table->callUnboxed(sparse_dim, dense_dim, size, options); #endif } static inline Tensor _sparse_coo_tensor_with_dims_and_tensors(int64_t sparse_dim, int64_t dense_dim, IntArrayRef size, const Tensor & indices, const Tensor & values, const TensorOptions & options) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(indices, values, options)))) { case Backend::SparseCPU: return SparseCPUType::_sparse_coo_tensor_with_dims_and_tensors(sparse_dim, dense_dim, size, indices, values, options); break; default: AT_ERROR("_sparse_coo_tensor_with_dims_and_tensors not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(indices, values, options))); } #else globalLegacyTypeDispatch().initForTensorTypeSet(at::detail::multi_dispatch_tensor_type_set(indices, values, options)); static auto table = globalATenDispatch().getOpTable("aten::_sparse_coo_tensor_with_dims_and_tensors(int sparse_dim, int dense_dim, int[] size, Tensor indices, Tensor values, *, ScalarType dtype, Layout layout, Device device, bool pin_memory=False) -> Tensor"); return table->callUnboxed(sparse_dim, dense_dim, size, indices, values, options); #endif } static inline Tensor to_dense_backward(const Tensor & grad, const Tensor & input) { #ifdef USE_STATIC_DISPATCH return TypeDefault::to_dense_backward(grad, input); #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::to_dense_backward", ""}).value(); return c10::Dispatcher::singleton().callUnboxed( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(grad, input)), grad, input); #endif } static inline Tensor & hspmm_out(Tensor & out, const Tensor & mat1, const Tensor & mat2) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(out, mat1, mat2)))) { case Backend::SparseCPU: return SparseCPUType::hspmm_out(out, mat1, mat2); break; default: AT_ERROR("hspmm_out not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(out, mat1, mat2))); } #else static auto table = globalATenDispatch().getOpTable("aten::hspmm.out(Tensor mat1, Tensor mat2, *, Tensor(a!) out) -> Tensor(a!)"); return table->callUnboxed(out, mat1, mat2); #endif } static inline Tensor hspmm(const Tensor & mat1, const Tensor & mat2) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(mat1, mat2)))) { case Backend::SparseCPU: return SparseCPUType::hspmm(mat1, mat2); break; default: AT_ERROR("hspmm not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(mat1, mat2))); } #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::hspmm", ""}).value(); return c10::Dispatcher::singleton().callUnboxed( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(mat1, mat2)), mat1, mat2); #endif } static inline Tensor & copy_sparse_to_sparse_(Tensor & self, const Tensor & src, bool non_blocking) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self, src)))) { case Backend::SparseCPU: return SparseCPUType::copy_sparse_to_sparse_(self, src, non_blocking); break; default: AT_ERROR("copy_sparse_to_sparse_ not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(self, src))); } #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::copy_sparse_to_sparse_", ""}).value(); return c10::Dispatcher::singleton().callUnboxedOnly( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self, src)), self, src, non_blocking); #endif } static inline int64_t numel(const Tensor & self) { #ifdef USE_STATIC_DISPATCH return TypeDefault::numel(self); #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::numel", ""}).value(); return c10::Dispatcher::singleton().callUnboxed( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self)), self); #endif } static inline std::vector unbind(const Tensor & self, int64_t dim) { #ifdef USE_STATIC_DISPATCH return TypeDefault::unbind(self, dim); #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::unbind", "int"}).value(); return c10::Dispatcher::singleton().callUnboxedOnly, const Tensor &, int64_t>( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self)), self, dim); #endif } #ifdef BUILD_NAMEDTENSOR static inline std::vector unbind(const Tensor & self, Dimname dim) { #ifdef USE_STATIC_DISPATCH return TypeDefault::unbind(self, dim); #else static auto table = globalATenDispatch().getOpTable("aten::unbind.Dimname(Tensor(a) self, Dimname dim) -> Tensor(a)[]"); return table->callUnboxed, const Tensor &, Dimname>(self, dim); #endif } #endif static inline Tensor mkldnn_reorder_conv2d_weight(const Tensor & self, IntArrayRef padding, IntArrayRef stride, IntArrayRef dilation, int64_t groups) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self)))) { default: AT_ERROR("mkldnn_reorder_conv2d_weight not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(self))); } #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::mkldnn_reorder_conv2d_weight", ""}).value(); return c10::Dispatcher::singleton().callUnboxedOnly( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self)), self, padding, stride, dilation, groups); #endif } static inline Tensor to_mkldnn_backward(const Tensor & grad, const Tensor & input) { #ifdef USE_STATIC_DISPATCH return TypeDefault::to_mkldnn_backward(grad, input); #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::to_mkldnn_backward", ""}).value(); return c10::Dispatcher::singleton().callUnboxed( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(grad, input)), grad, input); #endif } static inline Tensor quantize_per_tensor(const Tensor & self, double scale, int64_t zero_point, ScalarType dtype) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self)))) { case Backend::CPU: return CPUType::quantize_per_tensor(self, scale, zero_point, dtype); break; default: AT_ERROR("quantize_per_tensor not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(self))); } #else static auto table = globalATenDispatch().getOpTable("aten::quantize_per_tensor(Tensor self, float scale, int zero_point, ScalarType dtype) -> Tensor"); return table->callUnboxed(self, scale, zero_point, dtype); #endif } static inline Tensor quantize_per_channel(const Tensor & self, const Tensor & scales, const Tensor & zero_points, int64_t axis, ScalarType dtype) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self, scales, zero_points)))) { case Backend::CPU: return CPUType::quantize_per_channel(self, scales, zero_points, axis, dtype); break; default: AT_ERROR("quantize_per_channel not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(self, scales, zero_points))); } #else static auto table = globalATenDispatch().getOpTable("aten::quantize_per_channel(Tensor self, Tensor scales, Tensor zero_points, int axis, ScalarType dtype) -> Tensor"); return table->callUnboxed(self, scales, zero_points, axis, dtype); #endif } static inline Tensor dequantize(const Tensor & self) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self)))) { case Backend::QuantizedCPU: return QuantizedCPUType::dequantize(self); break; default: AT_ERROR("dequantize not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(self))); } #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::dequantize", ""}).value(); return c10::Dispatcher::singleton().callUnboxed( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self)), self); #endif } static inline double q_scale(const Tensor & self) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self)))) { case Backend::QuantizedCPU: return QuantizedCPUType::q_scale(self); break; default: AT_ERROR("q_scale not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(self))); } #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::q_scale", ""}).value(); return c10::Dispatcher::singleton().callUnboxed( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self)), self); #endif } static inline int64_t q_zero_point(const Tensor & self) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self)))) { case Backend::QuantizedCPU: return QuantizedCPUType::q_zero_point(self); break; default: AT_ERROR("q_zero_point not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(self))); } #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::q_zero_point", ""}).value(); return c10::Dispatcher::singleton().callUnboxed( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self)), self); #endif } static inline Tensor q_per_channel_scales(const Tensor & self) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self)))) { case Backend::QuantizedCPU: return QuantizedCPUType::q_per_channel_scales(self); break; default: AT_ERROR("q_per_channel_scales not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(self))); } #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::q_per_channel_scales", ""}).value(); return c10::Dispatcher::singleton().callUnboxedOnly( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self)), self); #endif } static inline Tensor q_per_channel_zero_points(const Tensor & self) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self)))) { case Backend::QuantizedCPU: return QuantizedCPUType::q_per_channel_zero_points(self); break; default: AT_ERROR("q_per_channel_zero_points not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(self))); } #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::q_per_channel_zero_points", ""}).value(); return c10::Dispatcher::singleton().callUnboxedOnly( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self)), self); #endif } static inline int64_t q_per_channel_axis(const Tensor & self) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self)))) { case Backend::QuantizedCPU: return QuantizedCPUType::q_per_channel_axis(self); break; default: AT_ERROR("q_per_channel_axis not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(self))); } #else static auto table = globalATenDispatch().getOpTable("aten::q_per_channel_axis(Tensor self) -> int"); return table->callUnboxed(self); #endif } static inline Tensor int_repr(const Tensor & self) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self)))) { case Backend::QuantizedCPU: return QuantizedCPUType::int_repr(self); break; default: AT_ERROR("int_repr not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(self))); } #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::int_repr", ""}).value(); return c10::Dispatcher::singleton().callUnboxed( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self)), self); #endif } static inline Tensor _make_per_tensor_quantized_tensor(const Tensor & self, double scale, int64_t zero_point) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self)))) { case Backend::CPU: return CPUType::_make_per_tensor_quantized_tensor(self, scale, zero_point); break; default: AT_ERROR("_make_per_tensor_quantized_tensor not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(self))); } #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::_make_per_tensor_quantized_tensor", ""}).value(); return c10::Dispatcher::singleton().callUnboxed( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self)), self, scale, zero_point); #endif } static inline Tensor _make_per_channel_quantized_tensor(const Tensor & self, const Tensor & scale, const Tensor & zero_point, int64_t axis) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self, scale, zero_point)))) { case Backend::CPU: return CPUType::_make_per_channel_quantized_tensor(self, scale, zero_point, axis); break; default: AT_ERROR("_make_per_channel_quantized_tensor not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(self, scale, zero_point))); } #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::_make_per_channel_quantized_tensor", ""}).value(); return c10::Dispatcher::singleton().callUnboxedOnly( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self, scale, zero_point)), self, scale, zero_point, axis); #endif } static inline Tensor fake_quantize_per_tensor_affine(const Tensor & self, double scale, int64_t zero_point, int64_t quant_min, int64_t quant_max) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self)))) { case Backend::CPU: return CPUType::fake_quantize_per_tensor_affine(self, scale, zero_point, quant_min, quant_max); break; default: AT_ERROR("fake_quantize_per_tensor_affine not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(self))); } #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::fake_quantize_per_tensor_affine", ""}).value(); return c10::Dispatcher::singleton().callUnboxed( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self)), self, scale, zero_point, quant_min, quant_max); #endif } static inline Tensor fake_quantize_per_tensor_affine_backward(const Tensor & grad, const Tensor & self, double scale, int64_t zero_point, int64_t quant_min, int64_t quant_max) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(grad, self)))) { case Backend::CPU: return CPUType::fake_quantize_per_tensor_affine_backward(grad, self, scale, zero_point, quant_min, quant_max); break; default: AT_ERROR("fake_quantize_per_tensor_affine_backward not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(grad, self))); } #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::fake_quantize_per_tensor_affine_backward", ""}).value(); return c10::Dispatcher::singleton().callUnboxed( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(grad, self)), grad, self, scale, zero_point, quant_min, quant_max); #endif } static inline Tensor fake_quantize_per_channel_affine(const Tensor & self, const Tensor & scale, const Tensor & zero_point, int64_t axis, int64_t quant_min, int64_t quant_max) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self, scale, zero_point)))) { case Backend::CPU: return CPUType::fake_quantize_per_channel_affine(self, scale, zero_point, axis, quant_min, quant_max); break; default: AT_ERROR("fake_quantize_per_channel_affine not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(self, scale, zero_point))); } #else static auto table = globalATenDispatch().getOpTable("aten::fake_quantize_per_channel_affine(Tensor self, Tensor scale, Tensor zero_point, int axis, int quant_min, int quant_max) -> Tensor"); return table->callUnboxed(self, scale, zero_point, axis, quant_min, quant_max); #endif } static inline Tensor fake_quantize_per_channel_affine_backward(const Tensor & grad, const Tensor & self, const Tensor & scale, const Tensor & zero_point, int64_t axis, int64_t quant_min, int64_t quant_max) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(grad, self, scale, zero_point)))) { case Backend::CPU: return CPUType::fake_quantize_per_channel_affine_backward(grad, self, scale, zero_point, axis, quant_min, quant_max); break; default: AT_ERROR("fake_quantize_per_channel_affine_backward not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(grad, self, scale, zero_point))); } #else static auto table = globalATenDispatch().getOpTable("aten::fake_quantize_per_channel_affine_backward(Tensor grad, Tensor self, Tensor scale, Tensor zero_point, int axis, int quant_min, int quant_max) -> Tensor"); return table->callUnboxed(grad, self, scale, zero_point, axis, quant_min, quant_max); #endif } static inline std::vector meshgrid(TensorList tensors) { #ifdef USE_STATIC_DISPATCH return TypeDefault::meshgrid(tensors); #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::meshgrid", ""}).value(); return c10::Dispatcher::singleton().callUnboxedOnly, TensorList>( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(tensors)), tensors); #endif } static inline Tensor cartesian_prod(TensorList tensors) { #ifdef USE_STATIC_DISPATCH return TypeDefault::cartesian_prod(tensors); #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::cartesian_prod", ""}).value(); return c10::Dispatcher::singleton().callUnboxedOnly( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(tensors)), tensors); #endif } static inline Tensor combinations(const Tensor & self, int64_t r, bool with_replacement) { #ifdef USE_STATIC_DISPATCH return TypeDefault::combinations(self, r, with_replacement); #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::combinations", ""}).value(); return c10::Dispatcher::singleton().callUnboxed( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self)), self, r, with_replacement); #endif } static inline ScalarType result_type(const Tensor & tensor, const Tensor & other) { #ifdef USE_STATIC_DISPATCH return TypeDefault::result_type(tensor, other); #else static auto table = globalATenDispatch().getOpTable("aten::result_type.Tensor(Tensor tensor, Tensor other) -> ScalarType"); return table->callUnboxed(tensor, other); #endif } static inline ScalarType result_type(const Tensor & tensor, Scalar other) { #ifdef USE_STATIC_DISPATCH return TypeDefault::result_type(tensor, other); #else static auto table = globalATenDispatch().getOpTable("aten::result_type.Scalar(Tensor tensor, Scalar other) -> ScalarType"); return table->callUnboxed(tensor, other); #endif } static inline ScalarType result_type(Scalar scalar, const Tensor & tensor) { #ifdef USE_STATIC_DISPATCH return TypeDefault::result_type(scalar, tensor); #else static auto table = globalATenDispatch().getOpTable("aten::result_type.Scalar_Tensor(Scalar scalar, Tensor tensor) -> ScalarType"); return table->callUnboxed(scalar, tensor); #endif } static inline ScalarType result_type(Scalar scalar1, Scalar scalar2) { #ifdef USE_STATIC_DISPATCH return TypeDefault::result_type(scalar1, scalar2); #else static auto table = globalATenDispatch().getOpTable("aten::result_type.Scalar_Scalar(Scalar scalar1, Scalar scalar2) -> ScalarType"); return table->callUnboxed(scalar1, scalar2); #endif } static inline bool can_cast(ScalarType from, ScalarType to) { #ifdef USE_STATIC_DISPATCH return TypeDefault::can_cast(from, to); #else static auto table = globalATenDispatch().getOpTable("aten::can_cast(ScalarType from, ScalarType to) -> bool"); return table->callUnboxed(from, to); #endif } static inline ScalarType promote_types(ScalarType type1, ScalarType type2) { #ifdef USE_STATIC_DISPATCH return TypeDefault::promote_types(type1, type2); #else static auto table = globalATenDispatch().getOpTable("aten::promote_types(ScalarType type1, ScalarType type2) -> ScalarType"); return table->callUnboxed(type1, type2); #endif } static inline Scalar _local_scalar_dense(const Tensor & self) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self)))) { case Backend::CPU: return CPUType::_local_scalar_dense(self); break; default: AT_ERROR("_local_scalar_dense not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(self))); } #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::_local_scalar_dense", ""}).value(); return c10::Dispatcher::singleton().callUnboxed( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self)), self); #endif } static inline std::tuple _thnn_fused_lstm_cell(const Tensor & input_gates, const Tensor & hidden_gates, const Tensor & cx, const Tensor & input_bias, const Tensor & hidden_bias) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(input_gates, hidden_gates, cx, input_bias, hidden_bias)))) { default: AT_ERROR("_thnn_fused_lstm_cell not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(input_gates, hidden_gates, cx, input_bias, hidden_bias))); } #else static auto table = globalATenDispatch().getOpTable("aten::_thnn_fused_lstm_cell(Tensor input_gates, Tensor hidden_gates, Tensor cx, Tensor? input_bias=None, Tensor? hidden_bias=None) -> (Tensor, Tensor, Tensor)"); return table->callUnboxed, const Tensor &, const Tensor &, const Tensor &, const Tensor &, const Tensor &>(input_gates, hidden_gates, cx, input_bias, hidden_bias); #endif } static inline std::tuple _thnn_fused_lstm_cell_backward(const Tensor & grad_hy, const Tensor & grad_cy, const Tensor & cx, const Tensor & cy, const Tensor & workspace, bool has_bias) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(grad_hy, grad_cy, cx, cy, workspace)))) { default: AT_ERROR("_thnn_fused_lstm_cell_backward not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(grad_hy, grad_cy, cx, cy, workspace))); } #else static auto table = globalATenDispatch().getOpTable("aten::_thnn_fused_lstm_cell_backward(Tensor? grad_hy, Tensor? grad_cy, Tensor cx, Tensor cy, Tensor workspace, bool has_bias) -> (Tensor, Tensor, Tensor, Tensor, Tensor)"); return table->callUnboxed, const Tensor &, const Tensor &, const Tensor &, const Tensor &, const Tensor &, bool>(grad_hy, grad_cy, cx, cy, workspace, has_bias); #endif } static inline std::tuple _thnn_differentiable_lstm_cell_backward(const Tensor & grad_hy, const Tensor & grad_cy, const Tensor & input_gates, const Tensor & hidden_gates, const Tensor & input_bias, const Tensor & hidden_bias, const Tensor & cx, const Tensor & cy) { #ifdef USE_STATIC_DISPATCH return TypeDefault::_thnn_differentiable_lstm_cell_backward(grad_hy, grad_cy, input_gates, hidden_gates, input_bias, hidden_bias, cx, cy); #else static auto table = globalATenDispatch().getOpTable("aten::_thnn_differentiable_lstm_cell_backward(Tensor? grad_hy, Tensor? grad_cy, Tensor input_gates, Tensor hidden_gates, Tensor? input_bias, Tensor? hidden_bias, Tensor cx, Tensor cy) -> (Tensor, Tensor, Tensor, Tensor, Tensor)"); return table->callUnboxed, const Tensor &, const Tensor &, const Tensor &, const Tensor &, const Tensor &, const Tensor &, const Tensor &, const Tensor &>(grad_hy, grad_cy, input_gates, hidden_gates, input_bias, hidden_bias, cx, cy); #endif } static inline std::tuple _thnn_fused_gru_cell(const Tensor & input_gates, const Tensor & hidden_gates, const Tensor & hx, const Tensor & input_bias, const Tensor & hidden_bias) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(input_gates, hidden_gates, hx, input_bias, hidden_bias)))) { default: AT_ERROR("_thnn_fused_gru_cell not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(input_gates, hidden_gates, hx, input_bias, hidden_bias))); } #else static auto table = globalATenDispatch().getOpTable("aten::_thnn_fused_gru_cell(Tensor input_gates, Tensor hidden_gates, Tensor hx, Tensor? input_bias=None, Tensor? hidden_bias=None) -> (Tensor, Tensor)"); return table->callUnboxed, const Tensor &, const Tensor &, const Tensor &, const Tensor &, const Tensor &>(input_gates, hidden_gates, hx, input_bias, hidden_bias); #endif } static inline std::tuple _thnn_fused_gru_cell_backward(const Tensor & grad_hy, const Tensor & workspace, bool has_bias) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(grad_hy, workspace)))) { default: AT_ERROR("_thnn_fused_gru_cell_backward not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(grad_hy, workspace))); } #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::_thnn_fused_gru_cell_backward", ""}).value(); return c10::Dispatcher::singleton().callUnboxedOnly, const Tensor &, const Tensor &, bool>( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(grad_hy, workspace)), grad_hy, workspace, has_bias); #endif } static inline std::tuple _thnn_differentiable_gru_cell_backward(const Tensor & grad_hy, const Tensor & input_gates, const Tensor & hidden_gates, const Tensor & hx, const Tensor & input_bias, const Tensor & hidden_bias) { #ifdef USE_STATIC_DISPATCH return TypeDefault::_thnn_differentiable_gru_cell_backward(grad_hy, input_gates, hidden_gates, hx, input_bias, hidden_bias); #else static auto table = globalATenDispatch().getOpTable("aten::_thnn_differentiable_gru_cell_backward(Tensor grad_hy, Tensor input_gates, Tensor hidden_gates, Tensor hx, Tensor? input_bias, Tensor? hidden_bias) -> (Tensor, Tensor, Tensor, Tensor, Tensor)"); return table->callUnboxed, const Tensor &, const Tensor &, const Tensor &, const Tensor &, const Tensor &, const Tensor &>(grad_hy, input_gates, hidden_gates, hx, input_bias, hidden_bias); #endif } static inline std::tuple lstm(const Tensor & input, TensorList hx, TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional, bool batch_first) { #ifdef USE_STATIC_DISPATCH return TypeDefault::lstm(input, hx, params, has_biases, num_layers, dropout, train, bidirectional, batch_first); #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::lstm", "input"}).value(); return c10::Dispatcher::singleton().callUnboxedOnly, const Tensor &, TensorList, TensorList, bool, int64_t, double, bool, bool, bool>( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(input, hx, params)), input, hx, params, has_biases, num_layers, dropout, train, bidirectional, batch_first); #endif } static inline std::tuple lstm(const Tensor & data, const Tensor & batch_sizes, TensorList hx, TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional) { #ifdef USE_STATIC_DISPATCH return TypeDefault::lstm(data, batch_sizes, hx, params, has_biases, num_layers, dropout, train, bidirectional); #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::lstm", "data"}).value(); return c10::Dispatcher::singleton().callUnboxedOnly, const Tensor &, const Tensor &, TensorList, TensorList, bool, int64_t, double, bool, bool>( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(data, batch_sizes, hx, params)), data, batch_sizes, hx, params, has_biases, num_layers, dropout, train, bidirectional); #endif } static inline std::tuple gru(const Tensor & input, const Tensor & hx, TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional, bool batch_first) { #ifdef USE_STATIC_DISPATCH return TypeDefault::gru(input, hx, params, has_biases, num_layers, dropout, train, bidirectional, batch_first); #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::gru", "input"}).value(); return c10::Dispatcher::singleton().callUnboxedOnly, const Tensor &, const Tensor &, TensorList, bool, int64_t, double, bool, bool, bool>( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(input, hx, params)), input, hx, params, has_biases, num_layers, dropout, train, bidirectional, batch_first); #endif } static inline std::tuple gru(const Tensor & data, const Tensor & batch_sizes, const Tensor & hx, TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional) { #ifdef USE_STATIC_DISPATCH return TypeDefault::gru(data, batch_sizes, hx, params, has_biases, num_layers, dropout, train, bidirectional); #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::gru", "data"}).value(); return c10::Dispatcher::singleton().callUnboxedOnly, const Tensor &, const Tensor &, const Tensor &, TensorList, bool, int64_t, double, bool, bool>( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(data, batch_sizes, hx, params)), data, batch_sizes, hx, params, has_biases, num_layers, dropout, train, bidirectional); #endif } static inline std::tuple rnn_tanh(const Tensor & input, const Tensor & hx, TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional, bool batch_first) { #ifdef USE_STATIC_DISPATCH return TypeDefault::rnn_tanh(input, hx, params, has_biases, num_layers, dropout, train, bidirectional, batch_first); #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::rnn_tanh", "input"}).value(); return c10::Dispatcher::singleton().callUnboxedOnly, const Tensor &, const Tensor &, TensorList, bool, int64_t, double, bool, bool, bool>( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(input, hx, params)), input, hx, params, has_biases, num_layers, dropout, train, bidirectional, batch_first); #endif } static inline std::tuple rnn_tanh(const Tensor & data, const Tensor & batch_sizes, const Tensor & hx, TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional) { #ifdef USE_STATIC_DISPATCH return TypeDefault::rnn_tanh(data, batch_sizes, hx, params, has_biases, num_layers, dropout, train, bidirectional); #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::rnn_tanh", "data"}).value(); return c10::Dispatcher::singleton().callUnboxedOnly, const Tensor &, const Tensor &, const Tensor &, TensorList, bool, int64_t, double, bool, bool>( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(data, batch_sizes, hx, params)), data, batch_sizes, hx, params, has_biases, num_layers, dropout, train, bidirectional); #endif } static inline std::tuple rnn_relu(const Tensor & input, const Tensor & hx, TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional, bool batch_first) { #ifdef USE_STATIC_DISPATCH return TypeDefault::rnn_relu(input, hx, params, has_biases, num_layers, dropout, train, bidirectional, batch_first); #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::rnn_relu", "input"}).value(); return c10::Dispatcher::singleton().callUnboxedOnly, const Tensor &, const Tensor &, TensorList, bool, int64_t, double, bool, bool, bool>( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(input, hx, params)), input, hx, params, has_biases, num_layers, dropout, train, bidirectional, batch_first); #endif } static inline std::tuple rnn_relu(const Tensor & data, const Tensor & batch_sizes, const Tensor & hx, TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional) { #ifdef USE_STATIC_DISPATCH return TypeDefault::rnn_relu(data, batch_sizes, hx, params, has_biases, num_layers, dropout, train, bidirectional); #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::rnn_relu", "data"}).value(); return c10::Dispatcher::singleton().callUnboxedOnly, const Tensor &, const Tensor &, const Tensor &, TensorList, bool, int64_t, double, bool, bool>( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(data, batch_sizes, hx, params)), data, batch_sizes, hx, params, has_biases, num_layers, dropout, train, bidirectional); #endif } static inline std::tuple lstm_cell(const Tensor & input, TensorList hx, const Tensor & w_ih, const Tensor & w_hh, const Tensor & b_ih, const Tensor & b_hh) { #ifdef USE_STATIC_DISPATCH return TypeDefault::lstm_cell(input, hx, w_ih, w_hh, b_ih, b_hh); #else static auto table = globalATenDispatch().getOpTable("aten::lstm_cell(Tensor input, Tensor[] hx, Tensor w_ih, Tensor w_hh, Tensor? b_ih=None, Tensor? b_hh=None) -> (Tensor, Tensor)"); return table->callUnboxed, const Tensor &, TensorList, const Tensor &, const Tensor &, const Tensor &, const Tensor &>(input, hx, w_ih, w_hh, b_ih, b_hh); #endif } static inline Tensor gru_cell(const Tensor & input, const Tensor & hx, const Tensor & w_ih, const Tensor & w_hh, const Tensor & b_ih, const Tensor & b_hh) { #ifdef USE_STATIC_DISPATCH return TypeDefault::gru_cell(input, hx, w_ih, w_hh, b_ih, b_hh); #else static auto table = globalATenDispatch().getOpTable("aten::gru_cell(Tensor input, Tensor hx, Tensor w_ih, Tensor w_hh, Tensor? b_ih=None, Tensor? b_hh=None) -> Tensor"); return table->callUnboxed(input, hx, w_ih, w_hh, b_ih, b_hh); #endif } static inline Tensor rnn_tanh_cell(const Tensor & input, const Tensor & hx, const Tensor & w_ih, const Tensor & w_hh, const Tensor & b_ih, const Tensor & b_hh) { #ifdef USE_STATIC_DISPATCH return TypeDefault::rnn_tanh_cell(input, hx, w_ih, w_hh, b_ih, b_hh); #else static auto table = globalATenDispatch().getOpTable("aten::rnn_tanh_cell(Tensor input, Tensor hx, Tensor w_ih, Tensor w_hh, Tensor? b_ih=None, Tensor? b_hh=None) -> Tensor"); return table->callUnboxed(input, hx, w_ih, w_hh, b_ih, b_hh); #endif } static inline Tensor rnn_relu_cell(const Tensor & input, const Tensor & hx, const Tensor & w_ih, const Tensor & w_hh, const Tensor & b_ih, const Tensor & b_hh) { #ifdef USE_STATIC_DISPATCH return TypeDefault::rnn_relu_cell(input, hx, w_ih, w_hh, b_ih, b_hh); #else static auto table = globalATenDispatch().getOpTable("aten::rnn_relu_cell(Tensor input, Tensor hx, Tensor w_ih, Tensor w_hh, Tensor? b_ih=None, Tensor? b_hh=None) -> Tensor"); return table->callUnboxed(input, hx, w_ih, w_hh, b_ih, b_hh); #endif } static inline std::tuple quantized_lstm(const Tensor & input, TensorList hx, TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional, bool batch_first, c10::optional dtype, bool use_dynamic) { #ifdef USE_STATIC_DISPATCH return TypeDefault::quantized_lstm(input, hx, params, has_biases, num_layers, dropout, train, bidirectional, batch_first, dtype, use_dynamic); #else static auto table = globalATenDispatch().getOpTable("aten::quantized_lstm(Tensor input, Tensor[] hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional, bool batch_first, *, ScalarType? dtype=None, bool use_dynamic=False) -> (Tensor, Tensor, Tensor)"); return table->callUnboxed, const Tensor &, TensorList, TensorList, bool, int64_t, double, bool, bool, bool, c10::optional, bool>(input, hx, params, has_biases, num_layers, dropout, train, bidirectional, batch_first, dtype, use_dynamic); #endif } static inline std::tuple quantized_gru(const Tensor & input, const Tensor & hx, TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional, bool batch_first) { #ifdef USE_STATIC_DISPATCH return TypeDefault::quantized_gru(input, hx, params, has_biases, num_layers, dropout, train, bidirectional, batch_first); #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::quantized_gru", "input"}).value(); return c10::Dispatcher::singleton().callUnboxedOnly, const Tensor &, const Tensor &, TensorList, bool, int64_t, double, bool, bool, bool>( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(input, hx, params)), input, hx, params, has_biases, num_layers, dropout, train, bidirectional, batch_first); #endif } static inline std::tuple quantized_gru(const Tensor & data, const Tensor & batch_sizes, const Tensor & hx, TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional) { #ifdef USE_STATIC_DISPATCH return TypeDefault::quantized_gru(data, batch_sizes, hx, params, has_biases, num_layers, dropout, train, bidirectional); #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::quantized_gru", "data"}).value(); return c10::Dispatcher::singleton().callUnboxedOnly, const Tensor &, const Tensor &, const Tensor &, TensorList, bool, int64_t, double, bool, bool>( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(data, batch_sizes, hx, params)), data, batch_sizes, hx, params, has_biases, num_layers, dropout, train, bidirectional); #endif } static inline std::tuple quantized_lstm_cell(const Tensor & input, TensorList hx, const Tensor & w_ih, const Tensor & w_hh, const Tensor & b_ih, const Tensor & b_hh, const Tensor & packed_ih, const Tensor & packed_hh, const Tensor & col_offsets_ih, const Tensor & col_offsets_hh, Scalar scale_ih, Scalar scale_hh, Scalar zero_point_ih, Scalar zero_point_hh) { #ifdef USE_STATIC_DISPATCH return TypeDefault::quantized_lstm_cell(input, hx, w_ih, w_hh, b_ih, b_hh, packed_ih, packed_hh, col_offsets_ih, col_offsets_hh, scale_ih, scale_hh, zero_point_ih, zero_point_hh); #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::quantized_lstm_cell", ""}).value(); return c10::Dispatcher::singleton().callUnboxedOnly, const Tensor &, TensorList, const Tensor &, const Tensor &, const Tensor &, const Tensor &, const Tensor &, const Tensor &, const Tensor &, const Tensor &, Scalar, Scalar, Scalar, Scalar>( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(input, hx, w_ih, w_hh, b_ih, b_hh, packed_ih, packed_hh, col_offsets_ih, col_offsets_hh)), input, hx, w_ih, w_hh, b_ih, b_hh, packed_ih, packed_hh, col_offsets_ih, col_offsets_hh, scale_ih, scale_hh, zero_point_ih, zero_point_hh); #endif } static inline Tensor quantized_gru_cell(const Tensor & input, const Tensor & hx, const Tensor & w_ih, const Tensor & w_hh, const Tensor & b_ih, const Tensor & b_hh, const Tensor & packed_ih, const Tensor & packed_hh, const Tensor & col_offsets_ih, const Tensor & col_offsets_hh, Scalar scale_ih, Scalar scale_hh, Scalar zero_point_ih, Scalar zero_point_hh) { #ifdef USE_STATIC_DISPATCH return TypeDefault::quantized_gru_cell(input, hx, w_ih, w_hh, b_ih, b_hh, packed_ih, packed_hh, col_offsets_ih, col_offsets_hh, scale_ih, scale_hh, zero_point_ih, zero_point_hh); #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::quantized_gru_cell", ""}).value(); return c10::Dispatcher::singleton().callUnboxed( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(input, hx, w_ih, w_hh, b_ih, b_hh, packed_ih, packed_hh, col_offsets_ih, col_offsets_hh)), input, hx, w_ih, w_hh, b_ih, b_hh, packed_ih, packed_hh, col_offsets_ih, col_offsets_hh, scale_ih, scale_hh, zero_point_ih, zero_point_hh); #endif } static inline Tensor quantized_rnn_relu_cell(const Tensor & input, const Tensor & hx, const Tensor & w_ih, const Tensor & w_hh, const Tensor & b_ih, const Tensor & b_hh, const Tensor & packed_ih, const Tensor & packed_hh, const Tensor & col_offsets_ih, const Tensor & col_offsets_hh, Scalar scale_ih, Scalar scale_hh, Scalar zero_point_ih, Scalar zero_point_hh) { #ifdef USE_STATIC_DISPATCH return TypeDefault::quantized_rnn_relu_cell(input, hx, w_ih, w_hh, b_ih, b_hh, packed_ih, packed_hh, col_offsets_ih, col_offsets_hh, scale_ih, scale_hh, zero_point_ih, zero_point_hh); #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::quantized_rnn_relu_cell", ""}).value(); return c10::Dispatcher::singleton().callUnboxed( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(input, hx, w_ih, w_hh, b_ih, b_hh, packed_ih, packed_hh, col_offsets_ih, col_offsets_hh)), input, hx, w_ih, w_hh, b_ih, b_hh, packed_ih, packed_hh, col_offsets_ih, col_offsets_hh, scale_ih, scale_hh, zero_point_ih, zero_point_hh); #endif } static inline Tensor quantized_rnn_tanh_cell(const Tensor & input, const Tensor & hx, const Tensor & w_ih, const Tensor & w_hh, const Tensor & b_ih, const Tensor & b_hh, const Tensor & packed_ih, const Tensor & packed_hh, const Tensor & col_offsets_ih, const Tensor & col_offsets_hh, Scalar scale_ih, Scalar scale_hh, Scalar zero_point_ih, Scalar zero_point_hh) { #ifdef USE_STATIC_DISPATCH return TypeDefault::quantized_rnn_tanh_cell(input, hx, w_ih, w_hh, b_ih, b_hh, packed_ih, packed_hh, col_offsets_ih, col_offsets_hh, scale_ih, scale_hh, zero_point_ih, zero_point_hh); #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::quantized_rnn_tanh_cell", ""}).value(); return c10::Dispatcher::singleton().callUnboxed( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(input, hx, w_ih, w_hh, b_ih, b_hh, packed_ih, packed_hh, col_offsets_ih, col_offsets_hh)), input, hx, w_ih, w_hh, b_ih, b_hh, packed_ih, packed_hh, col_offsets_ih, col_offsets_hh, scale_ih, scale_hh, zero_point_ih, zero_point_hh); #endif } static inline std::tuple _pack_padded_sequence(const Tensor & input, const Tensor & lengths, bool batch_first) { #ifdef USE_STATIC_DISPATCH return TypeDefault::_pack_padded_sequence(input, lengths, batch_first); #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::_pack_padded_sequence", ""}).value(); return c10::Dispatcher::singleton().callUnboxedOnly, const Tensor &, const Tensor &, bool>( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(input, lengths)), input, lengths, batch_first); #endif } static inline Tensor _pack_padded_sequence_backward(const Tensor & grad, IntArrayRef input_size, const Tensor & batch_sizes, bool batch_first) { #ifdef USE_STATIC_DISPATCH return TypeDefault::_pack_padded_sequence_backward(grad, input_size, batch_sizes, batch_first); #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::_pack_padded_sequence_backward", ""}).value(); return c10::Dispatcher::singleton().callUnboxedOnly( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(grad, batch_sizes)), grad, input_size, batch_sizes, batch_first); #endif } static inline std::tuple _pad_packed_sequence(const Tensor & data, const Tensor & batch_sizes, bool batch_first, Scalar padding_value, int64_t total_length) { #ifdef USE_STATIC_DISPATCH return TypeDefault::_pad_packed_sequence(data, batch_sizes, batch_first, padding_value, total_length); #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::_pad_packed_sequence", ""}).value(); return c10::Dispatcher::singleton().callUnboxedOnly, const Tensor &, const Tensor &, bool, Scalar, int64_t>( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(data, batch_sizes)), data, batch_sizes, batch_first, padding_value, total_length); #endif } static inline Tensor masked_fill(const Tensor & self, const Tensor & mask, Scalar value) { #ifdef USE_STATIC_DISPATCH return TypeDefault::masked_fill(self, mask, value); #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::masked_fill", "Scalar"}).value(); return c10::Dispatcher::singleton().callUnboxed( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self, mask)), self, mask, value); #endif } static inline Tensor masked_fill(const Tensor & self, const Tensor & mask, const Tensor & value) { #ifdef USE_STATIC_DISPATCH return TypeDefault::masked_fill(self, mask, value); #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::masked_fill", "Tensor"}).value(); return c10::Dispatcher::singleton().callUnboxed( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self, mask, value)), self, mask, value); #endif } static inline Tensor masked_scatter(const Tensor & self, const Tensor & mask, const Tensor & source) { #ifdef USE_STATIC_DISPATCH return TypeDefault::masked_scatter(self, mask, source); #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::masked_scatter", ""}).value(); return c10::Dispatcher::singleton().callUnboxed( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self, mask, source)), self, mask, source); #endif } static inline Tensor index_add(const Tensor & self, int64_t dim, const Tensor & index, const Tensor & source) { #ifdef USE_STATIC_DISPATCH return TypeDefault::index_add(self, dim, index, source); #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::index_add", ""}).value(); return c10::Dispatcher::singleton().callUnboxed( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self, index, source)), self, dim, index, source); #endif } #ifdef BUILD_NAMEDTENSOR static inline Tensor index_add(const Tensor & self, Dimname dim, const Tensor & index, const Tensor & source) { #ifdef USE_STATIC_DISPATCH return TypeDefault::index_add(self, dim, index, source); #else static auto table = globalATenDispatch().getOpTable("aten::index_add.dimname(Tensor self, Dimname dim, Tensor index, Tensor source) -> Tensor"); return table->callUnboxed(self, dim, index, source); #endif } #endif static inline Tensor index_fill(const Tensor & self, int64_t dim, const Tensor & index, Scalar value) { #ifdef USE_STATIC_DISPATCH return TypeDefault::index_fill(self, dim, index, value); #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::index_fill", "Scalar"}).value(); return c10::Dispatcher::singleton().callUnboxed( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self, index)), self, dim, index, value); #endif } static inline Tensor index_fill(const Tensor & self, int64_t dim, const Tensor & index, const Tensor & value) { #ifdef USE_STATIC_DISPATCH return TypeDefault::index_fill(self, dim, index, value); #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::index_fill", "Tensor"}).value(); return c10::Dispatcher::singleton().callUnboxed( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self, index, value)), self, dim, index, value); #endif } #ifdef BUILD_NAMEDTENSOR static inline Tensor index_fill(const Tensor & self, Dimname dim, const Tensor & index, Scalar value) { #ifdef USE_STATIC_DISPATCH return TypeDefault::index_fill(self, dim, index, value); #else static auto table = globalATenDispatch().getOpTable("aten::index_fill.dimname_Scalar(Tensor self, Dimname dim, Tensor index, Scalar value) -> Tensor"); return table->callUnboxed(self, dim, index, value); #endif } #endif #ifdef BUILD_NAMEDTENSOR static inline Tensor index_fill(const Tensor & self, Dimname dim, const Tensor & index, const Tensor & value) { #ifdef USE_STATIC_DISPATCH return TypeDefault::index_fill(self, dim, index, value); #else static auto table = globalATenDispatch().getOpTable("aten::index_fill.dimname_Tensor(Tensor self, Dimname dim, Tensor index, Tensor value) -> Tensor"); return table->callUnboxed(self, dim, index, value); #endif } #endif static inline Tensor scatter(const Tensor & self, int64_t dim, const Tensor & index, const Tensor & src) { #ifdef USE_STATIC_DISPATCH return TypeDefault::scatter(self, dim, index, src); #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::scatter", "src"}).value(); return c10::Dispatcher::singleton().callUnboxed( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self, index, src)), self, dim, index, src); #endif } static inline Tensor scatter(const Tensor & self, int64_t dim, const Tensor & index, Scalar value) { #ifdef USE_STATIC_DISPATCH return TypeDefault::scatter(self, dim, index, value); #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::scatter", "value"}).value(); return c10::Dispatcher::singleton().callUnboxed( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self, index)), self, dim, index, value); #endif } #ifdef BUILD_NAMEDTENSOR static inline Tensor scatter(const Tensor & self, Dimname dim, const Tensor & index, const Tensor & src) { #ifdef USE_STATIC_DISPATCH return TypeDefault::scatter(self, dim, index, src); #else static auto table = globalATenDispatch().getOpTable("aten::scatter.dimname_src(Tensor self, Dimname dim, Tensor index, Tensor src) -> Tensor"); return table->callUnboxed(self, dim, index, src); #endif } #endif #ifdef BUILD_NAMEDTENSOR static inline Tensor scatter(const Tensor & self, Dimname dim, const Tensor & index, Scalar value) { #ifdef USE_STATIC_DISPATCH return TypeDefault::scatter(self, dim, index, value); #else static auto table = globalATenDispatch().getOpTable("aten::scatter.dimname_value(Tensor self, Dimname dim, Tensor index, Scalar value) -> Tensor"); return table->callUnboxed(self, dim, index, value); #endif } #endif static inline Tensor scatter_add(const Tensor & self, int64_t dim, const Tensor & index, const Tensor & src) { #ifdef USE_STATIC_DISPATCH return TypeDefault::scatter_add(self, dim, index, src); #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::scatter_add", ""}).value(); return c10::Dispatcher::singleton().callUnboxed( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self, index, src)), self, dim, index, src); #endif } #ifdef BUILD_NAMEDTENSOR static inline Tensor scatter_add(const Tensor & self, Dimname dim, const Tensor & index, const Tensor & src) { #ifdef USE_STATIC_DISPATCH return TypeDefault::scatter_add(self, dim, index, src); #else static auto table = globalATenDispatch().getOpTable("aten::scatter_add.dimname(Tensor self, Dimname dim, Tensor index, Tensor src) -> Tensor"); return table->callUnboxed(self, dim, index, src); #endif } #endif static inline Tensor __and__(const Tensor & self, Scalar other) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self)))) { case Backend::CPU: return CPUType::__and__(self, other); break; default: AT_ERROR("__and__ not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(self))); } #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::__and__", "Scalar"}).value(); return c10::Dispatcher::singleton().callUnboxed( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self)), self, other); #endif } static inline Tensor __and__(const Tensor & self, const Tensor & other) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self, other)))) { case Backend::CPU: return CPUType::__and__(self, other); break; default: AT_ERROR("__and__ not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(self, other))); } #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::__and__", "Tensor"}).value(); return c10::Dispatcher::singleton().callUnboxed( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self, other)), self, other); #endif } static inline Tensor __or__(const Tensor & self, Scalar other) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self)))) { case Backend::CPU: return CPUType::__or__(self, other); break; default: AT_ERROR("__or__ not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(self))); } #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::__or__", "Scalar"}).value(); return c10::Dispatcher::singleton().callUnboxed( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self)), self, other); #endif } static inline Tensor __or__(const Tensor & self, const Tensor & other) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self, other)))) { case Backend::CPU: return CPUType::__or__(self, other); break; default: AT_ERROR("__or__ not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(self, other))); } #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::__or__", "Tensor"}).value(); return c10::Dispatcher::singleton().callUnboxed( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self, other)), self, other); #endif } static inline Tensor __xor__(const Tensor & self, Scalar other) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self)))) { case Backend::CPU: return CPUType::__xor__(self, other); break; default: AT_ERROR("__xor__ not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(self))); } #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::__xor__", "Scalar"}).value(); return c10::Dispatcher::singleton().callUnboxed( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self)), self, other); #endif } static inline Tensor __xor__(const Tensor & self, const Tensor & other) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self, other)))) { case Backend::CPU: return CPUType::__xor__(self, other); break; default: AT_ERROR("__xor__ not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(self, other))); } #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::__xor__", "Tensor"}).value(); return c10::Dispatcher::singleton().callUnboxed( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self, other)), self, other); #endif } static inline Tensor __lshift__(const Tensor & self, Scalar other) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self)))) { case Backend::CPU: return CPUType::__lshift__(self, other); break; default: AT_ERROR("__lshift__ not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(self))); } #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::__lshift__", "Scalar"}).value(); return c10::Dispatcher::singleton().callUnboxed( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self)), self, other); #endif } static inline Tensor __lshift__(const Tensor & self, const Tensor & other) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self, other)))) { case Backend::CPU: return CPUType::__lshift__(self, other); break; default: AT_ERROR("__lshift__ not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(self, other))); } #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::__lshift__", "Tensor"}).value(); return c10::Dispatcher::singleton().callUnboxed( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self, other)), self, other); #endif } static inline Tensor __rshift__(const Tensor & self, Scalar other) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self)))) { case Backend::CPU: return CPUType::__rshift__(self, other); break; default: AT_ERROR("__rshift__ not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(self))); } #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::__rshift__", "Scalar"}).value(); return c10::Dispatcher::singleton().callUnboxed( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self)), self, other); #endif } static inline Tensor __rshift__(const Tensor & self, const Tensor & other) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self, other)))) { case Backend::CPU: return CPUType::__rshift__(self, other); break; default: AT_ERROR("__rshift__ not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(self, other))); } #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::__rshift__", "Tensor"}).value(); return c10::Dispatcher::singleton().callUnboxed( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self, other)), self, other); #endif } static inline Tensor & addbmm_out(Tensor & out, const Tensor & self, const Tensor & batch1, const Tensor & batch2, Scalar beta, Scalar alpha) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(out, self, batch1, batch2)))) { case Backend::CPU: return CPUType::addbmm_out(out, self, batch1, batch2, beta, alpha); break; default: AT_ERROR("addbmm_out not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(out, self, batch1, batch2))); } #else static auto table = globalATenDispatch().getOpTable("aten::addbmm.out(Tensor self, Tensor batch1, Tensor batch2, *, Scalar beta=1, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!)"); return table->callUnboxed(out, self, batch1, batch2, beta, alpha); #endif } static inline Tensor addbmm(const Tensor & self, const Tensor & batch1, const Tensor & batch2, Scalar beta, Scalar alpha) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self, batch1, batch2)))) { case Backend::CPU: return CPUType::addbmm(self, batch1, batch2, beta, alpha); break; default: AT_ERROR("addbmm not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(self, batch1, batch2))); } #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::addbmm", ""}).value(); return c10::Dispatcher::singleton().callUnboxed( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self, batch1, batch2)), self, batch1, batch2, beta, alpha); #endif } static inline Tensor & diag_out(Tensor & out, const Tensor & self, int64_t diagonal) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(out, self)))) { case Backend::CPU: return CPUType::diag_out(out, self, diagonal); break; default: AT_ERROR("diag_out not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(out, self))); } #else static auto table = globalATenDispatch().getOpTable("aten::diag.out(Tensor self, int diagonal=0, *, Tensor(a!) out) -> Tensor(a!)"); return table->callUnboxed(out, self, diagonal); #endif } static inline Tensor diag(const Tensor & self, int64_t diagonal) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self)))) { case Backend::CPU: return CPUType::diag(self, diagonal); break; default: AT_ERROR("diag not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(self))); } #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::diag", ""}).value(); return c10::Dispatcher::singleton().callUnboxed( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self)), self, diagonal); #endif } static inline Tensor & cross_out(Tensor & out, const Tensor & self, const Tensor & other, c10::optional dim) { #ifdef USE_STATIC_DISPATCH return TypeDefault::cross_out(out, self, other, dim); #else static auto table = globalATenDispatch().getOpTable("aten::cross.out(Tensor self, Tensor other, int? dim=None, *, Tensor(a!) out) -> Tensor(a!)"); return table->callUnboxed>(out, self, other, dim); #endif } static inline Tensor cross(const Tensor & self, const Tensor & other, c10::optional dim) { #ifdef USE_STATIC_DISPATCH return TypeDefault::cross(self, other, dim); #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::cross", ""}).value(); return c10::Dispatcher::singleton().callUnboxed>( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self, other)), self, other, dim); #endif } static inline Tensor & triu_out(Tensor & out, const Tensor & self, int64_t diagonal) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(out, self)))) { case Backend::CPU: return CPUType::triu_out(out, self, diagonal); break; default: AT_ERROR("triu_out not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(out, self))); } #else static auto table = globalATenDispatch().getOpTable("aten::triu.out(Tensor self, int diagonal=0, *, Tensor(a!) out) -> Tensor(a!)"); return table->callUnboxed(out, self, diagonal); #endif } static inline Tensor triu(const Tensor & self, int64_t diagonal) { #ifdef USE_STATIC_DISPATCH return TypeDefault::triu(self, diagonal); #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::triu", ""}).value(); return c10::Dispatcher::singleton().callUnboxed( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self)), self, diagonal); #endif } static inline Tensor & tril_out(Tensor & out, const Tensor & self, int64_t diagonal) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(out, self)))) { case Backend::CPU: return CPUType::tril_out(out, self, diagonal); break; default: AT_ERROR("tril_out not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(out, self))); } #else static auto table = globalATenDispatch().getOpTable("aten::tril.out(Tensor self, int diagonal=0, *, Tensor(a!) out) -> Tensor(a!)"); return table->callUnboxed(out, self, diagonal); #endif } static inline Tensor tril(const Tensor & self, int64_t diagonal) { #ifdef USE_STATIC_DISPATCH return TypeDefault::tril(self, diagonal); #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::tril", ""}).value(); return c10::Dispatcher::singleton().callUnboxed( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self)), self, diagonal); #endif } static inline Tensor tril_indices(int64_t row, int64_t col, int64_t offset, const TensorOptions & options) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(options)))) { case Backend::CPU: return CPUType::tril_indices(row, col, offset, options); break; default: AT_ERROR("tril_indices not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(options))); } #else globalLegacyTypeDispatch().initForTensorTypeSet(at::detail::multi_dispatch_tensor_type_set(options)); static auto table = globalATenDispatch().getOpTable("aten::tril_indices(int row, int col, int offset=0, *, ScalarType? dtype=long, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor"); return table->callUnboxed(row, col, offset, options); #endif } static inline Tensor triu_indices(int64_t row, int64_t col, int64_t offset, const TensorOptions & options) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(options)))) { case Backend::CPU: return CPUType::triu_indices(row, col, offset, options); break; default: AT_ERROR("triu_indices not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(options))); } #else globalLegacyTypeDispatch().initForTensorTypeSet(at::detail::multi_dispatch_tensor_type_set(options)); static auto table = globalATenDispatch().getOpTable("aten::triu_indices(int row, int col, int offset=0, *, ScalarType? dtype=long, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor"); return table->callUnboxed(row, col, offset, options); #endif } static inline Tensor trace(const Tensor & self) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self)))) { case Backend::CPU: return CPUType::trace(self); break; default: AT_ERROR("trace not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(self))); } #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::trace", ""}).value(); return c10::Dispatcher::singleton().callUnboxed( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self)), self); #endif } static inline Tensor & ne_out(Tensor & out, const Tensor & self, Scalar other) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(out, self)))) { case Backend::CPU: return CPUType::ne_out(out, self, other); break; case Backend::QuantizedCPU: return QuantizedCPUType::ne_out(out, self, other); break; default: AT_ERROR("ne_out not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(out, self))); } #else static auto table = globalATenDispatch().getOpTable("aten::ne.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)"); return table->callUnboxed(out, self, other); #endif } static inline Tensor ne(const Tensor & self, Scalar other) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self)))) { case Backend::CPU: return CPUType::ne(self, other); break; case Backend::QuantizedCPU: return QuantizedCPUType::ne(self, other); break; default: AT_ERROR("ne not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(self))); } #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::ne", "Scalar"}).value(); return c10::Dispatcher::singleton().callUnboxed( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self)), self, other); #endif } static inline Tensor & ne_out(Tensor & out, const Tensor & self, const Tensor & other) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(out, self, other)))) { case Backend::CPU: return CPUType::ne_out(out, self, other); break; case Backend::QuantizedCPU: return QuantizedCPUType::ne_out(out, self, other); break; default: AT_ERROR("ne_out not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(out, self, other))); } #else static auto table = globalATenDispatch().getOpTable("aten::ne.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)"); return table->callUnboxed(out, self, other); #endif } static inline Tensor ne(const Tensor & self, const Tensor & other) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self, other)))) { case Backend::CPU: return CPUType::ne(self, other); break; case Backend::QuantizedCPU: return QuantizedCPUType::ne(self, other); break; default: AT_ERROR("ne not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(self, other))); } #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::ne", "Tensor"}).value(); return c10::Dispatcher::singleton().callUnboxed( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self, other)), self, other); #endif } static inline Tensor & eq_out(Tensor & out, const Tensor & self, Scalar other) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(out, self)))) { case Backend::CPU: return CPUType::eq_out(out, self, other); break; case Backend::QuantizedCPU: return QuantizedCPUType::eq_out(out, self, other); break; default: AT_ERROR("eq_out not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(out, self))); } #else static auto table = globalATenDispatch().getOpTable("aten::eq.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)"); return table->callUnboxed(out, self, other); #endif } static inline Tensor eq(const Tensor & self, Scalar other) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self)))) { case Backend::CPU: return CPUType::eq(self, other); break; case Backend::QuantizedCPU: return QuantizedCPUType::eq(self, other); break; default: AT_ERROR("eq not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(self))); } #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::eq", "Scalar"}).value(); return c10::Dispatcher::singleton().callUnboxed( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self)), self, other); #endif } static inline Tensor & eq_out(Tensor & out, const Tensor & self, const Tensor & other) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(out, self, other)))) { case Backend::CPU: return CPUType::eq_out(out, self, other); break; case Backend::QuantizedCPU: return QuantizedCPUType::eq_out(out, self, other); break; default: AT_ERROR("eq_out not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(out, self, other))); } #else static auto table = globalATenDispatch().getOpTable("aten::eq.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)"); return table->callUnboxed(out, self, other); #endif } static inline Tensor eq(const Tensor & self, const Tensor & other) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self, other)))) { case Backend::CPU: return CPUType::eq(self, other); break; case Backend::QuantizedCPU: return QuantizedCPUType::eq(self, other); break; default: AT_ERROR("eq not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(self, other))); } #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::eq", "Tensor"}).value(); return c10::Dispatcher::singleton().callUnboxed( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self, other)), self, other); #endif } static inline Tensor & ge_out(Tensor & out, const Tensor & self, Scalar other) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(out, self)))) { case Backend::CPU: return CPUType::ge_out(out, self, other); break; case Backend::QuantizedCPU: return QuantizedCPUType::ge_out(out, self, other); break; default: AT_ERROR("ge_out not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(out, self))); } #else static auto table = globalATenDispatch().getOpTable("aten::ge.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)"); return table->callUnboxed(out, self, other); #endif } static inline Tensor ge(const Tensor & self, Scalar other) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self)))) { case Backend::CPU: return CPUType::ge(self, other); break; case Backend::QuantizedCPU: return QuantizedCPUType::ge(self, other); break; default: AT_ERROR("ge not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(self))); } #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::ge", "Scalar"}).value(); return c10::Dispatcher::singleton().callUnboxed( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self)), self, other); #endif } static inline Tensor & ge_out(Tensor & out, const Tensor & self, const Tensor & other) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(out, self, other)))) { case Backend::CPU: return CPUType::ge_out(out, self, other); break; case Backend::QuantizedCPU: return QuantizedCPUType::ge_out(out, self, other); break; default: AT_ERROR("ge_out not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(out, self, other))); } #else static auto table = globalATenDispatch().getOpTable("aten::ge.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)"); return table->callUnboxed(out, self, other); #endif } static inline Tensor ge(const Tensor & self, const Tensor & other) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self, other)))) { case Backend::CPU: return CPUType::ge(self, other); break; case Backend::QuantizedCPU: return QuantizedCPUType::ge(self, other); break; default: AT_ERROR("ge not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(self, other))); } #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::ge", "Tensor"}).value(); return c10::Dispatcher::singleton().callUnboxed( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self, other)), self, other); #endif } static inline Tensor & le_out(Tensor & out, const Tensor & self, Scalar other) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(out, self)))) { case Backend::CPU: return CPUType::le_out(out, self, other); break; case Backend::QuantizedCPU: return QuantizedCPUType::le_out(out, self, other); break; default: AT_ERROR("le_out not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(out, self))); } #else static auto table = globalATenDispatch().getOpTable("aten::le.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)"); return table->callUnboxed(out, self, other); #endif } static inline Tensor le(const Tensor & self, Scalar other) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self)))) { case Backend::CPU: return CPUType::le(self, other); break; case Backend::QuantizedCPU: return QuantizedCPUType::le(self, other); break; default: AT_ERROR("le not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(self))); } #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::le", "Scalar"}).value(); return c10::Dispatcher::singleton().callUnboxed( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self)), self, other); #endif } static inline Tensor & le_out(Tensor & out, const Tensor & self, const Tensor & other) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(out, self, other)))) { case Backend::CPU: return CPUType::le_out(out, self, other); break; case Backend::QuantizedCPU: return QuantizedCPUType::le_out(out, self, other); break; default: AT_ERROR("le_out not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(out, self, other))); } #else static auto table = globalATenDispatch().getOpTable("aten::le.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)"); return table->callUnboxed(out, self, other); #endif } static inline Tensor le(const Tensor & self, const Tensor & other) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self, other)))) { case Backend::CPU: return CPUType::le(self, other); break; case Backend::QuantizedCPU: return QuantizedCPUType::le(self, other); break; default: AT_ERROR("le not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(self, other))); } #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::le", "Tensor"}).value(); return c10::Dispatcher::singleton().callUnboxed( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self, other)), self, other); #endif } static inline Tensor & gt_out(Tensor & out, const Tensor & self, Scalar other) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(out, self)))) { case Backend::CPU: return CPUType::gt_out(out, self, other); break; case Backend::QuantizedCPU: return QuantizedCPUType::gt_out(out, self, other); break; default: AT_ERROR("gt_out not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(out, self))); } #else static auto table = globalATenDispatch().getOpTable("aten::gt.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)"); return table->callUnboxed(out, self, other); #endif } static inline Tensor gt(const Tensor & self, Scalar other) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self)))) { case Backend::CPU: return CPUType::gt(self, other); break; case Backend::QuantizedCPU: return QuantizedCPUType::gt(self, other); break; default: AT_ERROR("gt not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(self))); } #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::gt", "Scalar"}).value(); return c10::Dispatcher::singleton().callUnboxed( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self)), self, other); #endif } static inline Tensor & gt_out(Tensor & out, const Tensor & self, const Tensor & other) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(out, self, other)))) { case Backend::CPU: return CPUType::gt_out(out, self, other); break; case Backend::QuantizedCPU: return QuantizedCPUType::gt_out(out, self, other); break; default: AT_ERROR("gt_out not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(out, self, other))); } #else static auto table = globalATenDispatch().getOpTable("aten::gt.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)"); return table->callUnboxed(out, self, other); #endif } static inline Tensor gt(const Tensor & self, const Tensor & other) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self, other)))) { case Backend::CPU: return CPUType::gt(self, other); break; case Backend::QuantizedCPU: return QuantizedCPUType::gt(self, other); break; default: AT_ERROR("gt not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(self, other))); } #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::gt", "Tensor"}).value(); return c10::Dispatcher::singleton().callUnboxed( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self, other)), self, other); #endif } static inline Tensor & lt_out(Tensor & out, const Tensor & self, Scalar other) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(out, self)))) { case Backend::CPU: return CPUType::lt_out(out, self, other); break; case Backend::QuantizedCPU: return QuantizedCPUType::lt_out(out, self, other); break; default: AT_ERROR("lt_out not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(out, self))); } #else static auto table = globalATenDispatch().getOpTable("aten::lt.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)"); return table->callUnboxed(out, self, other); #endif } static inline Tensor lt(const Tensor & self, Scalar other) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self)))) { case Backend::CPU: return CPUType::lt(self, other); break; case Backend::QuantizedCPU: return QuantizedCPUType::lt(self, other); break; default: AT_ERROR("lt not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(self))); } #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::lt", "Scalar"}).value(); return c10::Dispatcher::singleton().callUnboxed( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self)), self, other); #endif } static inline Tensor & lt_out(Tensor & out, const Tensor & self, const Tensor & other) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(out, self, other)))) { case Backend::CPU: return CPUType::lt_out(out, self, other); break; case Backend::QuantizedCPU: return QuantizedCPUType::lt_out(out, self, other); break; default: AT_ERROR("lt_out not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(out, self, other))); } #else static auto table = globalATenDispatch().getOpTable("aten::lt.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)"); return table->callUnboxed(out, self, other); #endif } static inline Tensor lt(const Tensor & self, const Tensor & other) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self, other)))) { case Backend::CPU: return CPUType::lt(self, other); break; case Backend::QuantizedCPU: return QuantizedCPUType::lt(self, other); break; default: AT_ERROR("lt not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(self, other))); } #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::lt", "Tensor"}).value(); return c10::Dispatcher::singleton().callUnboxed( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self, other)), self, other); #endif } static inline Tensor & take_out(Tensor & out, const Tensor & self, const Tensor & index) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(out, self, index)))) { case Backend::CPU: return CPUType::take_out(out, self, index); break; default: AT_ERROR("take_out not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(out, self, index))); } #else static auto table = globalATenDispatch().getOpTable("aten::take.out(Tensor self, Tensor index, *, Tensor(a!) out) -> Tensor(a!)"); return table->callUnboxed(out, self, index); #endif } static inline Tensor take(const Tensor & self, const Tensor & index) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self, index)))) { case Backend::CPU: return CPUType::take(self, index); break; default: AT_ERROR("take not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(self, index))); } #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::take", ""}).value(); return c10::Dispatcher::singleton().callUnboxed( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self, index)), self, index); #endif } static inline Tensor & index_select_out(Tensor & out, const Tensor & self, int64_t dim, const Tensor & index) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(out, self, index)))) { case Backend::CPU: return CPUType::index_select_out(out, self, dim, index); break; default: AT_ERROR("index_select_out not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(out, self, index))); } #else static auto table = globalATenDispatch().getOpTable("aten::index_select.out(Tensor self, int dim, Tensor index, *, Tensor(a!) out) -> Tensor(a!)"); return table->callUnboxed(out, self, dim, index); #endif } static inline Tensor index_select(const Tensor & self, int64_t dim, const Tensor & index) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self, index)))) { case Backend::CPU: return CPUType::index_select(self, dim, index); break; case Backend::SparseCPU: return SparseCPUType::index_select(self, dim, index); break; default: AT_ERROR("index_select not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(self, index))); } #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::index_select", ""}).value(); return c10::Dispatcher::singleton().callUnboxed( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self, index)), self, dim, index); #endif } #ifdef BUILD_NAMEDTENSOR static inline Tensor & index_select_out(Tensor & out, const Tensor & self, Dimname dim, const Tensor & index) { #ifdef USE_STATIC_DISPATCH return TypeDefault::index_select_out(out, self, dim, index); #else static auto table = globalATenDispatch().getOpTable("aten::index_select.dimname_out(Tensor self, Dimname dim, Tensor index, *, Tensor(a!) out) -> Tensor(a!)"); return table->callUnboxed(out, self, dim, index); #endif } #endif #ifdef BUILD_NAMEDTENSOR static inline Tensor index_select(const Tensor & self, Dimname dim, const Tensor & index) { #ifdef USE_STATIC_DISPATCH return TypeDefault::index_select(self, dim, index); #else static auto table = globalATenDispatch().getOpTable("aten::index_select.dimname(Tensor self, Dimname dim, Tensor index) -> Tensor"); return table->callUnboxed(self, dim, index); #endif } #endif static inline Tensor & masked_select_out(Tensor & out, const Tensor & self, const Tensor & mask) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(out, self, mask)))) { case Backend::CPU: return CPUType::masked_select_out(out, self, mask); break; default: AT_ERROR("masked_select_out not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(out, self, mask))); } #else static auto table = globalATenDispatch().getOpTable("aten::masked_select.out(Tensor self, Tensor mask, *, Tensor(a!) out) -> Tensor(a!)"); return table->callUnboxed(out, self, mask); #endif } static inline Tensor masked_select(const Tensor & self, const Tensor & mask) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self, mask)))) { case Backend::CPU: return CPUType::masked_select(self, mask); break; default: AT_ERROR("masked_select not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(self, mask))); } #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::masked_select", ""}).value(); return c10::Dispatcher::singleton().callUnboxed( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self, mask)), self, mask); #endif } static inline Tensor & nonzero_out(Tensor & out, const Tensor & self) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(out, self)))) { case Backend::CPU: return CPUType::nonzero_out(out, self); break; default: AT_ERROR("nonzero_out not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(out, self))); } #else static auto table = globalATenDispatch().getOpTable("aten::nonzero.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)"); return table->callUnboxed(out, self); #endif } static inline Tensor nonzero(const Tensor & self) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self)))) { case Backend::CPU: return CPUType::nonzero(self); break; default: AT_ERROR("nonzero not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(self))); } #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::nonzero", ""}).value(); return c10::Dispatcher::singleton().callUnboxed( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self)), self); #endif } static inline std::vector nonzero_numpy(const Tensor & self) { #ifdef USE_STATIC_DISPATCH return TypeDefault::nonzero_numpy(self); #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::nonzero_numpy", ""}).value(); return c10::Dispatcher::singleton().callUnboxedOnly, const Tensor &>( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self)), self); #endif } static inline Tensor & gather_out(Tensor & out, const Tensor & self, int64_t dim, const Tensor & index, bool sparse_grad) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(out, self, index)))) { case Backend::CPU: return CPUType::gather_out(out, self, dim, index, sparse_grad); break; default: AT_ERROR("gather_out not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(out, self, index))); } #else static auto table = globalATenDispatch().getOpTable("aten::gather.out(Tensor self, int dim, Tensor index, *, bool sparse_grad=False, Tensor(a!) out) -> Tensor(a!)"); return table->callUnboxed(out, self, dim, index, sparse_grad); #endif } static inline Tensor gather(const Tensor & self, int64_t dim, const Tensor & index, bool sparse_grad) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self, index)))) { case Backend::CPU: return CPUType::gather(self, dim, index, sparse_grad); break; default: AT_ERROR("gather not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(self, index))); } #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::gather", ""}).value(); return c10::Dispatcher::singleton().callUnboxed( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self, index)), self, dim, index, sparse_grad); #endif } #ifdef BUILD_NAMEDTENSOR static inline Tensor & gather_out(Tensor & out, const Tensor & self, Dimname dim, const Tensor & index, bool sparse_grad) { #ifdef USE_STATIC_DISPATCH return TypeDefault::gather_out(out, self, dim, index, sparse_grad); #else static auto table = globalATenDispatch().getOpTable("aten::gather.dimname_out(Tensor self, Dimname dim, Tensor index, *, bool sparse_grad=False, Tensor(a!) out) -> Tensor(a!)"); return table->callUnboxed(out, self, dim, index, sparse_grad); #endif } #endif #ifdef BUILD_NAMEDTENSOR static inline Tensor gather(const Tensor & self, Dimname dim, const Tensor & index, bool sparse_grad) { #ifdef USE_STATIC_DISPATCH return TypeDefault::gather(self, dim, index, sparse_grad); #else static auto table = globalATenDispatch().getOpTable("aten::gather.dimname(Tensor self, Dimname dim, Tensor index, *, bool sparse_grad=False) -> Tensor"); return table->callUnboxed(self, dim, index, sparse_grad); #endif } #endif static inline Tensor _gather_sparse_backward(const Tensor & self, int64_t dim, const Tensor & index, const Tensor & grad) { #ifdef USE_STATIC_DISPATCH return TypeDefault::_gather_sparse_backward(self, dim, index, grad); #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::_gather_sparse_backward", ""}).value(); return c10::Dispatcher::singleton().callUnboxed( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self, index, grad)), self, dim, index, grad); #endif } static inline Tensor & addcmul_out(Tensor & out, const Tensor & self, const Tensor & tensor1, const Tensor & tensor2, Scalar value) { #ifdef USE_STATIC_DISPATCH return TypeDefault::addcmul_out(out, self, tensor1, tensor2, value); #else static auto table = globalATenDispatch().getOpTable("aten::addcmul.out(Tensor self, Tensor tensor1, Tensor tensor2, *, Scalar value=1, Tensor(a!) out) -> Tensor(a!)"); return table->callUnboxed(out, self, tensor1, tensor2, value); #endif } static inline Tensor addcmul(const Tensor & self, const Tensor & tensor1, const Tensor & tensor2, Scalar value) { #ifdef USE_STATIC_DISPATCH return TypeDefault::addcmul(self, tensor1, tensor2, value); #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::addcmul", ""}).value(); return c10::Dispatcher::singleton().callUnboxed( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self, tensor1, tensor2)), self, tensor1, tensor2, value); #endif } static inline Tensor & addcdiv_out(Tensor & out, const Tensor & self, const Tensor & tensor1, const Tensor & tensor2, Scalar value) { #ifdef USE_STATIC_DISPATCH return TypeDefault::addcdiv_out(out, self, tensor1, tensor2, value); #else static auto table = globalATenDispatch().getOpTable("aten::addcdiv.out(Tensor self, Tensor tensor1, Tensor tensor2, *, Scalar value=1, Tensor(a!) out) -> Tensor(a!)"); return table->callUnboxed(out, self, tensor1, tensor2, value); #endif } static inline Tensor addcdiv(const Tensor & self, const Tensor & tensor1, const Tensor & tensor2, Scalar value) { #ifdef USE_STATIC_DISPATCH return TypeDefault::addcdiv(self, tensor1, tensor2, value); #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::addcdiv", ""}).value(); return c10::Dispatcher::singleton().callUnboxed( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self, tensor1, tensor2)), self, tensor1, tensor2, value); #endif } static inline std::tuple lstsq_out(Tensor & X, Tensor & qr, const Tensor & self, const Tensor & A) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(X, qr, self, A)))) { case Backend::CPU: return CPUType::lstsq_out(X, qr, self, A); break; default: AT_ERROR("lstsq_out not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(X, qr, self, A))); } #else static auto table = globalATenDispatch().getOpTable("aten::lstsq.X(Tensor self, Tensor A, *, Tensor(a!) X, Tensor(b!) qr) -> (Tensor(a!) solution, Tensor(b!) QR)"); return table->callUnboxed, Tensor &, Tensor &, const Tensor &, const Tensor &>(X, qr, self, A); #endif } static inline std::tuple lstsq(const Tensor & self, const Tensor & A) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self, A)))) { case Backend::CPU: return CPUType::lstsq(self, A); break; default: AT_ERROR("lstsq not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(self, A))); } #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::lstsq", ""}).value(); return c10::Dispatcher::singleton().callUnboxedOnly, const Tensor &, const Tensor &>( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self, A)), self, A); #endif } static inline std::tuple triangular_solve_out(Tensor & X, Tensor & M, const Tensor & self, const Tensor & A, bool upper, bool transpose, bool unitriangular) { #ifdef USE_STATIC_DISPATCH return TypeDefault::triangular_solve_out(X, M, self, A, upper, transpose, unitriangular); #else static auto table = globalATenDispatch().getOpTable("aten::triangular_solve.X(Tensor self, Tensor A, bool upper=True, bool transpose=False, bool unitriangular=False, *, Tensor(a!) X, Tensor(b!) M) -> (Tensor(a!) solution, Tensor(b!) cloned_coefficient)"); return table->callUnboxed, Tensor &, Tensor &, const Tensor &, const Tensor &, bool, bool, bool>(X, M, self, A, upper, transpose, unitriangular); #endif } static inline std::tuple triangular_solve(const Tensor & self, const Tensor & A, bool upper, bool transpose, bool unitriangular) { #ifdef USE_STATIC_DISPATCH return TypeDefault::triangular_solve(self, A, upper, transpose, unitriangular); #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::triangular_solve", ""}).value(); return c10::Dispatcher::singleton().callUnboxedOnly, const Tensor &, const Tensor &, bool, bool, bool>( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self, A)), self, A, upper, transpose, unitriangular); #endif } static inline std::tuple _triangular_solve_helper(const Tensor & self, const Tensor & A, bool upper, bool transpose, bool unitriangular) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self, A)))) { case Backend::CPU: return CPUType::_triangular_solve_helper(self, A, upper, transpose, unitriangular); break; default: AT_ERROR("_triangular_solve_helper not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(self, A))); } #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::_triangular_solve_helper", ""}).value(); return c10::Dispatcher::singleton().callUnboxedOnly, const Tensor &, const Tensor &, bool, bool, bool>( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self, A)), self, A, upper, transpose, unitriangular); #endif } static inline std::tuple symeig_out(Tensor & e, Tensor & V, const Tensor & self, bool eigenvectors, bool upper) { #ifdef USE_STATIC_DISPATCH return TypeDefault::symeig_out(e, V, self, eigenvectors, upper); #else static auto table = globalATenDispatch().getOpTable("aten::symeig.e(Tensor self, bool eigenvectors=False, bool upper=True, *, Tensor(a!) e, Tensor(b!) V) -> (Tensor(a!) eigenvalues, Tensor(b!) eigenvectors)"); return table->callUnboxed, Tensor &, Tensor &, const Tensor &, bool, bool>(e, V, self, eigenvectors, upper); #endif } static inline std::tuple symeig(const Tensor & self, bool eigenvectors, bool upper) { #ifdef USE_STATIC_DISPATCH return TypeDefault::symeig(self, eigenvectors, upper); #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::symeig", ""}).value(); return c10::Dispatcher::singleton().callUnboxedOnly, const Tensor &, bool, bool>( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self)), self, eigenvectors, upper); #endif } static inline std::tuple _symeig_helper(const Tensor & self, bool eigenvectors, bool upper) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self)))) { case Backend::CPU: return CPUType::_symeig_helper(self, eigenvectors, upper); break; default: AT_ERROR("_symeig_helper not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(self))); } #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::_symeig_helper", ""}).value(); return c10::Dispatcher::singleton().callUnboxedOnly, const Tensor &, bool, bool>( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self)), self, eigenvectors, upper); #endif } static inline std::tuple eig_out(Tensor & e, Tensor & v, const Tensor & self, bool eigenvectors) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(e, v, self)))) { case Backend::CPU: return CPUType::eig_out(e, v, self, eigenvectors); break; default: AT_ERROR("eig_out not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(e, v, self))); } #else static auto table = globalATenDispatch().getOpTable("aten::eig.e(Tensor self, bool eigenvectors=False, *, Tensor(a!) e, Tensor(b!) v) -> (Tensor(a!) eigenvalues, Tensor(b!) eigenvectors)"); return table->callUnboxed, Tensor &, Tensor &, const Tensor &, bool>(e, v, self, eigenvectors); #endif } static inline std::tuple eig(const Tensor & self, bool eigenvectors) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self)))) { case Backend::CPU: return CPUType::eig(self, eigenvectors); break; default: AT_ERROR("eig not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(self))); } #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::eig", ""}).value(); return c10::Dispatcher::singleton().callUnboxedOnly, const Tensor &, bool>( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self)), self, eigenvectors); #endif } static inline std::tuple svd_out(Tensor & U, Tensor & S, Tensor & V, const Tensor & self, bool some, bool compute_uv) { #ifdef USE_STATIC_DISPATCH return TypeDefault::svd_out(U, S, V, self, some, compute_uv); #else static auto table = globalATenDispatch().getOpTable("aten::svd.U(Tensor self, bool some=True, bool compute_uv=True, *, Tensor(a!) U, Tensor(b!) S, Tensor(c!) V) -> (Tensor(a!) U, Tensor(b!) S, Tensor(c!) V)"); return table->callUnboxed, Tensor &, Tensor &, Tensor &, const Tensor &, bool, bool>(U, S, V, self, some, compute_uv); #endif } static inline std::tuple svd(const Tensor & self, bool some, bool compute_uv) { #ifdef USE_STATIC_DISPATCH return TypeDefault::svd(self, some, compute_uv); #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::svd", ""}).value(); return c10::Dispatcher::singleton().callUnboxedOnly, const Tensor &, bool, bool>( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self)), self, some, compute_uv); #endif } static inline std::tuple _svd_helper(const Tensor & self, bool some, bool compute_uv) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self)))) { case Backend::CPU: return CPUType::_svd_helper(self, some, compute_uv); break; default: AT_ERROR("_svd_helper not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(self))); } #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::_svd_helper", ""}).value(); return c10::Dispatcher::singleton().callUnboxedOnly, const Tensor &, bool, bool>( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self)), self, some, compute_uv); #endif } static inline Tensor & cholesky_out(Tensor & out, const Tensor & self, bool upper) { #ifdef USE_STATIC_DISPATCH return TypeDefault::cholesky_out(out, self, upper); #else static auto table = globalATenDispatch().getOpTable("aten::cholesky.out(Tensor self, bool upper=False, *, Tensor(a!) out) -> Tensor(a!)"); return table->callUnboxed(out, self, upper); #endif } static inline Tensor cholesky(const Tensor & self, bool upper) { #ifdef USE_STATIC_DISPATCH return TypeDefault::cholesky(self, upper); #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::cholesky", ""}).value(); return c10::Dispatcher::singleton().callUnboxed( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self)), self, upper); #endif } static inline Tensor _cholesky_helper(const Tensor & self, bool upper) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self)))) { case Backend::CPU: return CPUType::_cholesky_helper(self, upper); break; default: AT_ERROR("_cholesky_helper not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(self))); } #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::_cholesky_helper", ""}).value(); return c10::Dispatcher::singleton().callUnboxed( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self)), self, upper); #endif } static inline Tensor & cholesky_solve_out(Tensor & out, const Tensor & self, const Tensor & input2, bool upper) { #ifdef USE_STATIC_DISPATCH return TypeDefault::cholesky_solve_out(out, self, input2, upper); #else static auto table = globalATenDispatch().getOpTable("aten::cholesky_solve.out(Tensor self, Tensor input2, bool upper=False, *, Tensor(a!) out) -> Tensor(a!)"); return table->callUnboxed(out, self, input2, upper); #endif } static inline Tensor cholesky_solve(const Tensor & self, const Tensor & input2, bool upper) { #ifdef USE_STATIC_DISPATCH return TypeDefault::cholesky_solve(self, input2, upper); #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::cholesky_solve", ""}).value(); return c10::Dispatcher::singleton().callUnboxed( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self, input2)), self, input2, upper); #endif } static inline Tensor _cholesky_solve_helper(const Tensor & self, const Tensor & A, bool upper) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self, A)))) { case Backend::CPU: return CPUType::_cholesky_solve_helper(self, A, upper); break; default: AT_ERROR("_cholesky_solve_helper not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(self, A))); } #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::_cholesky_solve_helper", ""}).value(); return c10::Dispatcher::singleton().callUnboxed( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self, A)), self, A, upper); #endif } static inline std::tuple solve(const Tensor & self, const Tensor & A) { #ifdef USE_STATIC_DISPATCH return TypeDefault::solve(self, A); #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::solve", ""}).value(); return c10::Dispatcher::singleton().callUnboxedOnly, const Tensor &, const Tensor &>( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self, A)), self, A); #endif } static inline std::tuple solve_out(Tensor & solution, Tensor & lu, const Tensor & self, const Tensor & A) { #ifdef USE_STATIC_DISPATCH return TypeDefault::solve_out(solution, lu, self, A); #else static auto table = globalATenDispatch().getOpTable("aten::solve.solution(Tensor self, Tensor A, *, Tensor(a!) solution, Tensor(b!) lu) -> (Tensor(a!) solution, Tensor(b!) LU)"); return table->callUnboxed, Tensor &, Tensor &, const Tensor &, const Tensor &>(solution, lu, self, A); #endif } static inline std::tuple _solve_helper(const Tensor & self, const Tensor & A) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self, A)))) { case Backend::CPU: return CPUType::_solve_helper(self, A); break; default: AT_ERROR("_solve_helper not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(self, A))); } #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::_solve_helper", ""}).value(); return c10::Dispatcher::singleton().callUnboxedOnly, const Tensor &, const Tensor &>( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self, A)), self, A); #endif } static inline Tensor & cholesky_inverse_out(Tensor & out, const Tensor & self, bool upper) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(out, self)))) { case Backend::CPU: return CPUType::cholesky_inverse_out(out, self, upper); break; default: AT_ERROR("cholesky_inverse_out not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(out, self))); } #else static auto table = globalATenDispatch().getOpTable("aten::cholesky_inverse.out(Tensor self, bool upper=False, *, Tensor(a!) out) -> Tensor(a!)"); return table->callUnboxed(out, self, upper); #endif } static inline Tensor cholesky_inverse(const Tensor & self, bool upper) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self)))) { case Backend::CPU: return CPUType::cholesky_inverse(self, upper); break; default: AT_ERROR("cholesky_inverse not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(self))); } #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::cholesky_inverse", ""}).value(); return c10::Dispatcher::singleton().callUnboxed( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self)), self, upper); #endif } static inline std::tuple qr_out(Tensor & Q, Tensor & R, const Tensor & self, bool some) { #ifdef USE_STATIC_DISPATCH return TypeDefault::qr_out(Q, R, self, some); #else static auto table = globalATenDispatch().getOpTable("aten::qr.Q(Tensor self, bool some=True, *, Tensor(a!) Q, Tensor(b!) R) -> (Tensor(a!) Q, Tensor(b!) R)"); return table->callUnboxed, Tensor &, Tensor &, const Tensor &, bool>(Q, R, self, some); #endif } static inline std::tuple qr(const Tensor & self, bool some) { #ifdef USE_STATIC_DISPATCH return TypeDefault::qr(self, some); #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::qr", ""}).value(); return c10::Dispatcher::singleton().callUnboxedOnly, const Tensor &, bool>( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self)), self, some); #endif } static inline std::tuple _qr_helper(const Tensor & self, bool some) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self)))) { case Backend::CPU: return CPUType::_qr_helper(self, some); break; default: AT_ERROR("_qr_helper not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(self))); } #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::_qr_helper", ""}).value(); return c10::Dispatcher::singleton().callUnboxedOnly, const Tensor &, bool>( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self)), self, some); #endif } static inline std::tuple geqrf_out(Tensor & a, Tensor & tau, const Tensor & self) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(a, tau, self)))) { case Backend::CPU: return CPUType::geqrf_out(a, tau, self); break; default: AT_ERROR("geqrf_out not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(a, tau, self))); } #else static auto table = globalATenDispatch().getOpTable("aten::geqrf.a(Tensor self, *, Tensor(a!) a, Tensor(b!) tau) -> (Tensor(a!) a, Tensor(b!) tau)"); return table->callUnboxed, Tensor &, Tensor &, const Tensor &>(a, tau, self); #endif } static inline std::tuple geqrf(const Tensor & self) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self)))) { case Backend::CPU: return CPUType::geqrf(self); break; default: AT_ERROR("geqrf not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(self))); } #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::geqrf", ""}).value(); return c10::Dispatcher::singleton().callUnboxedOnly, const Tensor &>( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self)), self); #endif } static inline Tensor & orgqr_out(Tensor & out, const Tensor & self, const Tensor & input2) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(out, self, input2)))) { case Backend::CPU: return CPUType::orgqr_out(out, self, input2); break; default: AT_ERROR("orgqr_out not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(out, self, input2))); } #else static auto table = globalATenDispatch().getOpTable("aten::orgqr.out(Tensor self, Tensor input2, *, Tensor(a!) out) -> Tensor(a!)"); return table->callUnboxed(out, self, input2); #endif } static inline Tensor orgqr(const Tensor & self, const Tensor & input2) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self, input2)))) { case Backend::CPU: return CPUType::orgqr(self, input2); break; default: AT_ERROR("orgqr not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(self, input2))); } #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::orgqr", ""}).value(); return c10::Dispatcher::singleton().callUnboxed( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self, input2)), self, input2); #endif } static inline Tensor & ormqr_out(Tensor & out, const Tensor & self, const Tensor & input2, const Tensor & input3, bool left, bool transpose) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(out, self, input2, input3)))) { case Backend::CPU: return CPUType::ormqr_out(out, self, input2, input3, left, transpose); break; default: AT_ERROR("ormqr_out not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(out, self, input2, input3))); } #else static auto table = globalATenDispatch().getOpTable("aten::ormqr.out(Tensor self, Tensor input2, Tensor input3, bool left=True, bool transpose=False, *, Tensor(a!) out) -> Tensor(a!)"); return table->callUnboxed(out, self, input2, input3, left, transpose); #endif } static inline Tensor ormqr(const Tensor & self, const Tensor & input2, const Tensor & input3, bool left, bool transpose) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self, input2, input3)))) { case Backend::CPU: return CPUType::ormqr(self, input2, input3, left, transpose); break; default: AT_ERROR("ormqr not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(self, input2, input3))); } #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::ormqr", ""}).value(); return c10::Dispatcher::singleton().callUnboxed( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self, input2, input3)), self, input2, input3, left, transpose); #endif } static inline std::tuple _lu_with_info(const Tensor & self, bool pivot, bool check_errors) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self)))) { case Backend::CPU: return CPUType::_lu_with_info(self, pivot, check_errors); break; default: AT_ERROR("_lu_with_info not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(self))); } #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::_lu_with_info", ""}).value(); return c10::Dispatcher::singleton().callUnboxedOnly, const Tensor &, bool, bool>( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self)), self, pivot, check_errors); #endif } static inline Tensor & lu_solve_out(Tensor & out, const Tensor & self, const Tensor & LU_data, const Tensor & LU_pivots) { #ifdef USE_STATIC_DISPATCH return TypeDefault::lu_solve_out(out, self, LU_data, LU_pivots); #else static auto table = globalATenDispatch().getOpTable("aten::lu_solve.out(Tensor self, Tensor LU_data, Tensor LU_pivots, *, Tensor(a!) out) -> Tensor(a!)"); return table->callUnboxed(out, self, LU_data, LU_pivots); #endif } static inline Tensor lu_solve(const Tensor & self, const Tensor & LU_data, const Tensor & LU_pivots) { #ifdef USE_STATIC_DISPATCH return TypeDefault::lu_solve(self, LU_data, LU_pivots); #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::lu_solve", ""}).value(); return c10::Dispatcher::singleton().callUnboxed( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self, LU_data, LU_pivots)), self, LU_data, LU_pivots); #endif } static inline Tensor _lu_solve_helper(const Tensor & self, const Tensor & LU_data, const Tensor & LU_pivots) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self, LU_data, LU_pivots)))) { case Backend::CPU: return CPUType::_lu_solve_helper(self, LU_data, LU_pivots); break; default: AT_ERROR("_lu_solve_helper not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(self, LU_data, LU_pivots))); } #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::_lu_solve_helper", ""}).value(); return c10::Dispatcher::singleton().callUnboxed( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self, LU_data, LU_pivots)), self, LU_data, LU_pivots); #endif } static inline Tensor & multinomial_out(Tensor & out, const Tensor & self, int64_t num_samples, bool replacement, Generator * generator) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(out, self)))) { case Backend::CPU: return CPUType::multinomial_out(out, self, num_samples, replacement, generator); break; default: AT_ERROR("multinomial_out not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(out, self))); } #else static auto table = globalATenDispatch().getOpTable("aten::multinomial.out(Tensor self, int num_samples, bool replacement=False, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!)"); return table->callUnboxed(out, self, num_samples, replacement, generator); #endif } static inline Tensor multinomial(const Tensor & self, int64_t num_samples, bool replacement, Generator * generator) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self)))) { case Backend::CPU: return CPUType::multinomial(self, num_samples, replacement, generator); break; default: AT_ERROR("multinomial not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(self))); } #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::multinomial", ""}).value(); return c10::Dispatcher::singleton().callUnboxedOnly( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self)), self, num_samples, replacement, generator); #endif } static inline std::tuple _multinomial_alias_setup(const Tensor & probs) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(probs)))) { case Backend::CPU: return CPUType::_multinomial_alias_setup(probs); break; default: AT_ERROR("_multinomial_alias_setup not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(probs))); } #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::_multinomial_alias_setup", ""}).value(); return c10::Dispatcher::singleton().callUnboxedOnly, const Tensor &>( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(probs)), probs); #endif } static inline Tensor _multinomial_alias_draw(const Tensor & J, const Tensor & q, int64_t num_samples, Generator * generator) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(J, q)))) { case Backend::CPU: return CPUType::_multinomial_alias_draw(J, q, num_samples, generator); break; default: AT_ERROR("_multinomial_alias_draw not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(J, q))); } #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::_multinomial_alias_draw", ""}).value(); return c10::Dispatcher::singleton().callUnboxedOnly( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(J, q)), J, q, num_samples, generator); #endif } static inline Tensor & lgamma_out(Tensor & out, const Tensor & self) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(out, self)))) { case Backend::CPU: return CPUType::lgamma_out(out, self); break; default: AT_ERROR("lgamma_out not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(out, self))); } #else static auto table = globalATenDispatch().getOpTable("aten::lgamma.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)"); return table->callUnboxed(out, self); #endif } static inline Tensor lgamma(const Tensor & self) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self)))) { case Backend::CPU: return CPUType::lgamma(self); break; default: AT_ERROR("lgamma not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(self))); } #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::lgamma", ""}).value(); return c10::Dispatcher::singleton().callUnboxed( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self)), self); #endif } static inline Tensor & digamma_out(Tensor & out, const Tensor & self) { #ifdef USE_STATIC_DISPATCH return TypeDefault::digamma_out(out, self); #else static auto table = globalATenDispatch().getOpTable("aten::digamma.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)"); return table->callUnboxed(out, self); #endif } static inline Tensor digamma(const Tensor & self) { #ifdef USE_STATIC_DISPATCH return TypeDefault::digamma(self); #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::digamma", ""}).value(); return c10::Dispatcher::singleton().callUnboxed( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self)), self); #endif } static inline Tensor & polygamma_out(Tensor & out, int64_t n, const Tensor & self) { #ifdef USE_STATIC_DISPATCH return TypeDefault::polygamma_out(out, n, self); #else static auto table = globalATenDispatch().getOpTable("aten::polygamma.out(int n, Tensor self, *, Tensor(a!) out) -> Tensor(a!)"); return table->callUnboxed(out, n, self); #endif } static inline Tensor polygamma(int64_t n, const Tensor & self) { #ifdef USE_STATIC_DISPATCH return TypeDefault::polygamma(n, self); #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::polygamma", ""}).value(); return c10::Dispatcher::singleton().callUnboxed( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self)), n, self); #endif } static inline Tensor erfinv(const Tensor & self) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self)))) { case Backend::CPU: return CPUType::erfinv(self); break; default: AT_ERROR("erfinv not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(self))); } #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::erfinv", ""}).value(); return c10::Dispatcher::singleton().callUnboxed( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self)), self); #endif } static inline Tensor & erfinv_out(Tensor & out, const Tensor & self) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(out, self)))) { case Backend::CPU: return CPUType::erfinv_out(out, self); break; default: AT_ERROR("erfinv_out not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(out, self))); } #else static auto table = globalATenDispatch().getOpTable("aten::erfinv.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)"); return table->callUnboxed(out, self); #endif } static inline Tensor sign(const Tensor & self) { #ifdef USE_STATIC_DISPATCH return TypeDefault::sign(self); #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::sign", ""}).value(); return c10::Dispatcher::singleton().callUnboxedOnly( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self)), self); #endif } static inline Tensor & sign_out(Tensor & out, const Tensor & self) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(out, self)))) { case Backend::CPU: return CPUType::sign_out(out, self); break; default: AT_ERROR("sign_out not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(out, self))); } #else static auto table = globalATenDispatch().getOpTable("aten::sign.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)"); return table->callUnboxed(out, self); #endif } static inline Tensor dist(const Tensor & self, const Tensor & other, Scalar p) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self, other)))) { case Backend::CPU: return CPUType::dist(self, other, p); break; default: AT_ERROR("dist not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(self, other))); } #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::dist", ""}).value(); return c10::Dispatcher::singleton().callUnboxed( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self, other)), self, other, p); #endif } static inline Tensor & atan2_out(Tensor & out, const Tensor & self, const Tensor & other) { #ifdef USE_STATIC_DISPATCH return TypeDefault::atan2_out(out, self, other); #else static auto table = globalATenDispatch().getOpTable("aten::atan2.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)"); return table->callUnboxed(out, self, other); #endif } static inline Tensor atan2(const Tensor & self, const Tensor & other) { #ifdef USE_STATIC_DISPATCH return TypeDefault::atan2(self, other); #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::atan2", ""}).value(); return c10::Dispatcher::singleton().callUnboxed( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self, other)), self, other); #endif } static inline Tensor & lerp_out(Tensor & out, const Tensor & self, const Tensor & end, Scalar weight) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(out, self, end)))) { case Backend::CPU: return CPUType::lerp_out(out, self, end, weight); break; default: AT_ERROR("lerp_out not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(out, self, end))); } #else static auto table = globalATenDispatch().getOpTable("aten::lerp.Scalar_out(Tensor self, Tensor end, Scalar weight, *, Tensor(a!) out) -> Tensor(a!)"); return table->callUnboxed(out, self, end, weight); #endif } static inline Tensor & lerp_out(Tensor & out, const Tensor & self, const Tensor & end, const Tensor & weight) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(out, self, end, weight)))) { case Backend::CPU: return CPUType::lerp_out(out, self, end, weight); break; default: AT_ERROR("lerp_out not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(out, self, end, weight))); } #else static auto table = globalATenDispatch().getOpTable("aten::lerp.Tensor_out(Tensor self, Tensor end, Tensor weight, *, Tensor(a!) out) -> Tensor(a!)"); return table->callUnboxed(out, self, end, weight); #endif } static inline Tensor lerp(const Tensor & self, const Tensor & end, Scalar weight) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self, end)))) { case Backend::CPU: return CPUType::lerp(self, end, weight); break; default: AT_ERROR("lerp not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(self, end))); } #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::lerp", "Scalar"}).value(); return c10::Dispatcher::singleton().callUnboxed( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self, end)), self, end, weight); #endif } static inline Tensor lerp(const Tensor & self, const Tensor & end, const Tensor & weight) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self, end, weight)))) { case Backend::CPU: return CPUType::lerp(self, end, weight); break; default: AT_ERROR("lerp not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(self, end, weight))); } #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::lerp", "Tensor"}).value(); return c10::Dispatcher::singleton().callUnboxed( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self, end, weight)), self, end, weight); #endif } static inline Tensor & histc_out(Tensor & out, const Tensor & self, int64_t bins, Scalar min, Scalar max) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(out, self)))) { case Backend::CPU: return CPUType::histc_out(out, self, bins, min, max); break; default: AT_ERROR("histc_out not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(out, self))); } #else static auto table = globalATenDispatch().getOpTable("aten::histc.out(Tensor self, int bins=100, Scalar min=0, Scalar max=0, *, Tensor(a!) out) -> Tensor(a!)"); return table->callUnboxed(out, self, bins, min, max); #endif } static inline Tensor histc(const Tensor & self, int64_t bins, Scalar min, Scalar max) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self)))) { case Backend::CPU: return CPUType::histc(self, bins, min, max); break; default: AT_ERROR("histc not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(self))); } #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::histc", ""}).value(); return c10::Dispatcher::singleton().callUnboxed( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self)), self, bins, min, max); #endif } static inline Tensor & fmod_out(Tensor & out, const Tensor & self, Scalar other) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(out, self)))) { case Backend::CPU: return CPUType::fmod_out(out, self, other); break; default: AT_ERROR("fmod_out not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(out, self))); } #else static auto table = globalATenDispatch().getOpTable("aten::fmod.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)"); return table->callUnboxed(out, self, other); #endif } static inline Tensor fmod(const Tensor & self, Scalar other) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self)))) { case Backend::CPU: return CPUType::fmod(self, other); break; default: AT_ERROR("fmod not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(self))); } #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::fmod", "Scalar"}).value(); return c10::Dispatcher::singleton().callUnboxed( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self)), self, other); #endif } static inline Tensor & fmod_out(Tensor & out, const Tensor & self, const Tensor & other) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(out, self, other)))) { case Backend::CPU: return CPUType::fmod_out(out, self, other); break; default: AT_ERROR("fmod_out not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(out, self, other))); } #else static auto table = globalATenDispatch().getOpTable("aten::fmod.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)"); return table->callUnboxed(out, self, other); #endif } static inline Tensor fmod(const Tensor & self, const Tensor & other) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self, other)))) { case Backend::CPU: return CPUType::fmod(self, other); break; default: AT_ERROR("fmod not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(self, other))); } #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::fmod", "Tensor"}).value(); return c10::Dispatcher::singleton().callUnboxed( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self, other)), self, other); #endif } static inline Tensor & remainder_out(Tensor & out, const Tensor & self, Scalar other) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(out, self)))) { case Backend::CPU: return CPUType::remainder_out(out, self, other); break; default: AT_ERROR("remainder_out not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(out, self))); } #else static auto table = globalATenDispatch().getOpTable("aten::remainder.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)"); return table->callUnboxed(out, self, other); #endif } static inline Tensor remainder(const Tensor & self, Scalar other) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self)))) { case Backend::CPU: return CPUType::remainder(self, other); break; default: AT_ERROR("remainder not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(self))); } #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::remainder", "Scalar"}).value(); return c10::Dispatcher::singleton().callUnboxed( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self)), self, other); #endif } static inline Tensor & remainder_out(Tensor & out, const Tensor & self, const Tensor & other) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(out, self, other)))) { case Backend::CPU: return CPUType::remainder_out(out, self, other); break; default: AT_ERROR("remainder_out not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(out, self, other))); } #else static auto table = globalATenDispatch().getOpTable("aten::remainder.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)"); return table->callUnboxed(out, self, other); #endif } static inline Tensor remainder(const Tensor & self, const Tensor & other) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self, other)))) { case Backend::CPU: return CPUType::remainder(self, other); break; default: AT_ERROR("remainder not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(self, other))); } #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::remainder", "Tensor"}).value(); return c10::Dispatcher::singleton().callUnboxed( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self, other)), self, other); #endif } static inline Tensor & min_out(Tensor & out, const Tensor & self, const Tensor & other) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(out, self, other)))) { case Backend::CPU: return CPUType::min_out(out, self, other); break; default: AT_ERROR("min_out not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(out, self, other))); } #else static auto table = globalATenDispatch().getOpTable("aten::min.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)"); return table->callUnboxed(out, self, other); #endif } static inline Tensor min(const Tensor & self, const Tensor & other) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self, other)))) { case Backend::CPU: return CPUType::min(self, other); break; default: AT_ERROR("min not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(self, other))); } #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::min", "other"}).value(); return c10::Dispatcher::singleton().callUnboxed( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self, other)), self, other); #endif } static inline Tensor min(const Tensor & self) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self)))) { case Backend::CPU: return CPUType::min(self); break; case Backend::QuantizedCPU: return QuantizedCPUType::min(self); break; default: AT_ERROR("min not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(self))); } #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::min", ""}).value(); return c10::Dispatcher::singleton().callUnboxed( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self)), self); #endif } static inline Tensor & max_out(Tensor & out, const Tensor & self, const Tensor & other) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(out, self, other)))) { case Backend::CPU: return CPUType::max_out(out, self, other); break; default: AT_ERROR("max_out not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(out, self, other))); } #else static auto table = globalATenDispatch().getOpTable("aten::max.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)"); return table->callUnboxed(out, self, other); #endif } static inline Tensor max(const Tensor & self, const Tensor & other) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self, other)))) { case Backend::CPU: return CPUType::max(self, other); break; default: AT_ERROR("max not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(self, other))); } #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::max", "other"}).value(); return c10::Dispatcher::singleton().callUnboxed( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self, other)), self, other); #endif } static inline Tensor max(const Tensor & self) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self)))) { case Backend::CPU: return CPUType::max(self); break; case Backend::QuantizedCPU: return QuantizedCPUType::max(self); break; default: AT_ERROR("max not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(self))); } #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::max", ""}).value(); return c10::Dispatcher::singleton().callUnboxed( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self)), self); #endif } static inline Tensor median(const Tensor & self) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self)))) { case Backend::CPU: return CPUType::median(self); break; default: AT_ERROR("median not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(self))); } #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::median", ""}).value(); return c10::Dispatcher::singleton().callUnboxed( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self)), self); #endif } static inline std::tuple sort_out(Tensor & values, Tensor & indices, const Tensor & self, int64_t dim, bool descending) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(values, indices, self)))) { case Backend::CPU: return CPUType::sort_out(values, indices, self, dim, descending); break; default: AT_ERROR("sort_out not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(values, indices, self))); } #else static auto table = globalATenDispatch().getOpTable("aten::sort.values(Tensor self, int dim=-1, bool descending=False, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices)"); return table->callUnboxed, Tensor &, Tensor &, const Tensor &, int64_t, bool>(values, indices, self, dim, descending); #endif } static inline std::tuple sort(const Tensor & self, int64_t dim, bool descending) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self)))) { case Backend::CPU: return CPUType::sort(self, dim, descending); break; case Backend::QuantizedCPU: return QuantizedCPUType::sort(self, dim, descending); break; default: AT_ERROR("sort not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(self))); } #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::sort", ""}).value(); return c10::Dispatcher::singleton().callUnboxedOnly, const Tensor &, int64_t, bool>( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self)), self, dim, descending); #endif } #ifdef BUILD_NAMEDTENSOR static inline std::tuple sort_out(Tensor & values, Tensor & indices, const Tensor & self, Dimname dim, bool descending) { #ifdef USE_STATIC_DISPATCH return TypeDefault::sort_out(values, indices, self, dim, descending); #else static auto table = globalATenDispatch().getOpTable("aten::sort.dimname_values(Tensor self, Dimname dim, bool descending=False, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices)"); return table->callUnboxed, Tensor &, Tensor &, const Tensor &, Dimname, bool>(values, indices, self, dim, descending); #endif } #endif #ifdef BUILD_NAMEDTENSOR static inline std::tuple sort(const Tensor & self, Dimname dim, bool descending) { #ifdef USE_STATIC_DISPATCH return TypeDefault::sort(self, dim, descending); #else static auto table = globalATenDispatch().getOpTable("aten::sort.dimname(Tensor self, Dimname dim, bool descending=False) -> (Tensor values, Tensor indices)"); return table->callUnboxed, const Tensor &, Dimname, bool>(self, dim, descending); #endif } #endif static inline Tensor argsort(const Tensor & self, int64_t dim, bool descending) { #ifdef USE_STATIC_DISPATCH return TypeDefault::argsort(self, dim, descending); #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::argsort", ""}).value(); return c10::Dispatcher::singleton().callUnboxed( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self)), self, dim, descending); #endif } #ifdef BUILD_NAMEDTENSOR static inline Tensor argsort(const Tensor & self, Dimname dim, bool descending) { #ifdef USE_STATIC_DISPATCH return TypeDefault::argsort(self, dim, descending); #else static auto table = globalATenDispatch().getOpTable("aten::argsort.dimname(Tensor self, Dimname dim, bool descending=False) -> Tensor"); return table->callUnboxed(self, dim, descending); #endif } #endif static inline std::tuple topk_out(Tensor & values, Tensor & indices, const Tensor & self, int64_t k, int64_t dim, bool largest, bool sorted) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(values, indices, self)))) { case Backend::CPU: return CPUType::topk_out(values, indices, self, k, dim, largest, sorted); break; default: AT_ERROR("topk_out not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(values, indices, self))); } #else static auto table = globalATenDispatch().getOpTable("aten::topk.values(Tensor self, int k, int dim=-1, bool largest=True, bool sorted=True, *, Tensor(a!) values, Tensor(b!) indices) ->(Tensor(a!) values, Tensor(b!) indices)"); return table->callUnboxed, Tensor &, Tensor &, const Tensor &, int64_t, int64_t, bool, bool>(values, indices, self, k, dim, largest, sorted); #endif } static inline std::tuple topk(const Tensor & self, int64_t k, int64_t dim, bool largest, bool sorted) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self)))) { case Backend::CPU: return CPUType::topk(self, k, dim, largest, sorted); break; case Backend::QuantizedCPU: return QuantizedCPUType::topk(self, k, dim, largest, sorted); break; default: AT_ERROR("topk not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(self))); } #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::topk", ""}).value(); return c10::Dispatcher::singleton().callUnboxedOnly, const Tensor &, int64_t, int64_t, bool, bool>( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self)), self, k, dim, largest, sorted); #endif } static inline Tensor all(const Tensor & self) { #ifdef USE_STATIC_DISPATCH return TypeDefault::all(self); #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::all", ""}).value(); return c10::Dispatcher::singleton().callUnboxed( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self)), self); #endif } static inline Tensor any(const Tensor & self) { #ifdef USE_STATIC_DISPATCH return TypeDefault::any(self); #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::any", ""}).value(); return c10::Dispatcher::singleton().callUnboxed( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self)), self); #endif } static inline Tensor & renorm_out(Tensor & out, const Tensor & self, Scalar p, int64_t dim, Scalar maxnorm) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(out, self)))) { case Backend::CPU: return CPUType::renorm_out(out, self, p, dim, maxnorm); break; default: AT_ERROR("renorm_out not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(out, self))); } #else static auto table = globalATenDispatch().getOpTable("aten::renorm.out(Tensor self, Scalar p, int dim, Scalar maxnorm, *, Tensor(a!) out) -> Tensor(a!)"); return table->callUnboxed(out, self, p, dim, maxnorm); #endif } static inline Tensor renorm(const Tensor & self, Scalar p, int64_t dim, Scalar maxnorm) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self)))) { case Backend::CPU: return CPUType::renorm(self, p, dim, maxnorm); break; default: AT_ERROR("renorm not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(self))); } #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::renorm", ""}).value(); return c10::Dispatcher::singleton().callUnboxed( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self)), self, p, dim, maxnorm); #endif } static inline bool equal(const Tensor & self, const Tensor & other) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self, other)))) { case Backend::CPU: return CPUType::equal(self, other); break; case Backend::QuantizedCPU: return QuantizedCPUType::equal(self, other); break; default: AT_ERROR("equal not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(self, other))); } #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::equal", ""}).value(); return c10::Dispatcher::singleton().callUnboxed( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self, other)), self, other); #endif } static inline Tensor & pow_out(Tensor & out, const Tensor & self, const Tensor & exponent) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(out, self, exponent)))) { case Backend::CPU: return CPUType::pow_out(out, self, exponent); break; default: AT_ERROR("pow_out not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(out, self, exponent))); } #else static auto table = globalATenDispatch().getOpTable("aten::pow.Tensor_Tensor_out(Tensor self, Tensor exponent, *, Tensor(a!) out) -> Tensor(a!)"); return table->callUnboxed(out, self, exponent); #endif } static inline Tensor pow(const Tensor & self, const Tensor & exponent) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self, exponent)))) { case Backend::CPU: return CPUType::pow(self, exponent); break; default: AT_ERROR("pow not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(self, exponent))); } #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::pow", "Tensor_Tensor"}).value(); return c10::Dispatcher::singleton().callUnboxed( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self, exponent)), self, exponent); #endif } static inline Tensor & pow_out(Tensor & out, Scalar self, const Tensor & exponent) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(out, exponent)))) { case Backend::CPU: return CPUType::pow_out(out, self, exponent); break; default: AT_ERROR("pow_out not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(out, exponent))); } #else static auto table = globalATenDispatch().getOpTable("aten::pow.Scalar_out(Scalar self, Tensor exponent, *, Tensor(a!) out) -> Tensor(a!)"); return table->callUnboxed(out, self, exponent); #endif } static inline Tensor pow(Scalar self, const Tensor & exponent) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(exponent)))) { case Backend::CPU: return CPUType::pow(self, exponent); break; default: AT_ERROR("pow not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(exponent))); } #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::pow", "Scalar"}).value(); return c10::Dispatcher::singleton().callUnboxed( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(exponent)), self, exponent); #endif } static inline Tensor & normal_out(Tensor & out, const Tensor & mean, double std, Generator * generator) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(out, mean)))) { case Backend::CPU: return CPUType::normal_out(out, mean, std, generator); break; default: AT_ERROR("normal_out not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(out, mean))); } #else static auto table = globalATenDispatch().getOpTable("aten::normal.Tensor_float_out(Tensor mean, float std=1, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!)"); return table->callUnboxed(out, mean, std, generator); #endif } static inline Tensor normal(const Tensor & mean, double std, Generator * generator) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(mean)))) { case Backend::CPU: return CPUType::normal(mean, std, generator); break; default: AT_ERROR("normal not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(mean))); } #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::normal", "Tensor_float"}).value(); return c10::Dispatcher::singleton().callUnboxedOnly( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(mean)), mean, std, generator); #endif } static inline Tensor & normal_out(Tensor & out, double mean, const Tensor & std, Generator * generator) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(out, std)))) { case Backend::CPU: return CPUType::normal_out(out, mean, std, generator); break; default: AT_ERROR("normal_out not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(out, std))); } #else static auto table = globalATenDispatch().getOpTable("aten::normal.float_Tensor_out(float mean, Tensor std, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!)"); return table->callUnboxed(out, mean, std, generator); #endif } static inline Tensor normal(double mean, const Tensor & std, Generator * generator) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(std)))) { case Backend::CPU: return CPUType::normal(mean, std, generator); break; default: AT_ERROR("normal not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(std))); } #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::normal", "float_Tensor"}).value(); return c10::Dispatcher::singleton().callUnboxedOnly( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(std)), mean, std, generator); #endif } static inline Tensor & normal_out(Tensor & out, const Tensor & mean, const Tensor & std, Generator * generator) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(out, mean, std)))) { case Backend::CPU: return CPUType::normal_out(out, mean, std, generator); break; default: AT_ERROR("normal_out not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(out, mean, std))); } #else static auto table = globalATenDispatch().getOpTable("aten::normal.Tensor_Tensor_out(Tensor mean, Tensor std, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!)"); return table->callUnboxed(out, mean, std, generator); #endif } static inline Tensor normal(const Tensor & mean, const Tensor & std, Generator * generator) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(mean, std)))) { case Backend::CPU: return CPUType::normal(mean, std, generator); break; default: AT_ERROR("normal not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(mean, std))); } #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::normal", "Tensor_Tensor"}).value(); return c10::Dispatcher::singleton().callUnboxedOnly( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(mean, std)), mean, std, generator); #endif } static inline Tensor normal(double mean, double std, IntArrayRef size, Generator * generator, const TensorOptions & options) { #ifdef USE_STATIC_DISPATCH return TypeDefault::normal(mean, std, size, generator, options); #else globalLegacyTypeDispatch().initForTensorTypeSet(at::detail::multi_dispatch_tensor_type_set(options)); static auto table = globalATenDispatch().getOpTable("aten::normal.float_float(float mean, float std, int[] size, *, Generator? generator=None, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor"); return table->callUnboxed(mean, std, size, generator, options); #endif } static inline Tensor & normal_out(Tensor & out, double mean, double std, IntArrayRef size, Generator * generator) { #ifdef USE_STATIC_DISPATCH return TypeDefault::normal_out(out, mean, std, size, generator); #else static auto table = globalATenDispatch().getOpTable("aten::normal.float_float_out(float mean, float std, int[] size, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!)"); return table->callUnboxed(out, mean, std, size, generator); #endif } static inline Tensor alias(const Tensor & self) { #ifdef USE_STATIC_DISPATCH return TypeDefault::alias(self); #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::alias", ""}).value(); return c10::Dispatcher::singleton().callUnboxedOnly( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self)), self); #endif } static inline Tensor _addr(const Tensor & self, const Tensor & vec1, const Tensor & vec2, Scalar beta, Scalar alpha) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self, vec1, vec2)))) { case Backend::CPU: return CPUType::_addr(self, vec1, vec2, beta, alpha); break; default: AT_ERROR("_addr not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(self, vec1, vec2))); } #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::_addr", ""}).value(); return c10::Dispatcher::singleton().callUnboxed( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self, vec1, vec2)), self, vec1, vec2, beta, alpha); #endif } static inline Tensor & _addr_(Tensor & self, const Tensor & vec1, const Tensor & vec2, Scalar beta, Scalar alpha) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self, vec1, vec2)))) { case Backend::CPU: return CPUType::_addr_(self, vec1, vec2, beta, alpha); break; default: AT_ERROR("_addr_ not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(self, vec1, vec2))); } #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::_addr_", ""}).value(); return c10::Dispatcher::singleton().callUnboxedOnly( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self, vec1, vec2)), self, vec1, vec2, beta, alpha); #endif } static inline Tensor & _addr_out(Tensor & out, const Tensor & self, const Tensor & vec1, const Tensor & vec2, Scalar beta, Scalar alpha) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(out, self, vec1, vec2)))) { case Backend::CPU: return CPUType::_addr_out(out, self, vec1, vec2, beta, alpha); break; default: AT_ERROR("_addr_out not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(out, self, vec1, vec2))); } #else static auto table = globalATenDispatch().getOpTable("aten::_addr.out(Tensor self, Tensor vec1, Tensor vec2, *, Scalar beta=1, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!)"); return table->callUnboxed(out, self, vec1, vec2, beta, alpha); #endif } static inline Tensor & _index_copy_(Tensor & self, int64_t dim, const Tensor & index, const Tensor & source) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self, index, source)))) { case Backend::CPU: return CPUType::_index_copy_(self, dim, index, source); break; default: AT_ERROR("_index_copy_ not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(self, index, source))); } #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::_index_copy_", ""}).value(); return c10::Dispatcher::singleton().callUnboxedOnly( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self, index, source)), self, dim, index, source); #endif } static inline Tensor _cumsum(const Tensor & self, int64_t dim) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self)))) { case Backend::CPU: return CPUType::_cumsum(self, dim); break; default: AT_ERROR("_cumsum not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(self))); } #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::_cumsum", ""}).value(); return c10::Dispatcher::singleton().callUnboxed( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self)), self, dim); #endif } static inline Tensor & _cumsum_out(Tensor & out, const Tensor & self, int64_t dim) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(out, self)))) { case Backend::CPU: return CPUType::_cumsum_out(out, self, dim); break; default: AT_ERROR("_cumsum_out not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(out, self))); } #else static auto table = globalATenDispatch().getOpTable("aten::_cumsum.out(Tensor self, int dim, *, Tensor(a!) out) -> Tensor(a!)"); return table->callUnboxed(out, self, dim); #endif } static inline Tensor _cumprod(const Tensor & self, int64_t dim) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self)))) { case Backend::CPU: return CPUType::_cumprod(self, dim); break; default: AT_ERROR("_cumprod not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(self))); } #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::_cumprod", ""}).value(); return c10::Dispatcher::singleton().callUnboxed( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self)), self, dim); #endif } static inline Tensor & _cumprod_out(Tensor & out, const Tensor & self, int64_t dim) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(out, self)))) { case Backend::CPU: return CPUType::_cumprod_out(out, self, dim); break; default: AT_ERROR("_cumprod_out not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(out, self))); } #else static auto table = globalATenDispatch().getOpTable("aten::_cumprod.out(Tensor self, int dim, *, Tensor(a!) out) -> Tensor(a!)"); return table->callUnboxed(out, self, dim); #endif } static inline Tensor _var(const Tensor & self, bool unbiased) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self)))) { case Backend::CPU: return CPUType::_var(self, unbiased); break; default: AT_ERROR("_var not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(self))); } #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::_var", ""}).value(); return c10::Dispatcher::singleton().callUnboxed( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self)), self, unbiased); #endif } static inline Tensor _std(const Tensor & self, bool unbiased) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self)))) { case Backend::CPU: return CPUType::_std(self, unbiased); break; default: AT_ERROR("_std not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(self))); } #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::_std", ""}).value(); return c10::Dispatcher::singleton().callUnboxed( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self)), self, unbiased); #endif } static inline Tensor _cat(TensorList tensors, int64_t dim) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(tensors)))) { case Backend::CPU: return CPUType::_cat(tensors, dim); break; default: AT_ERROR("_cat not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(tensors))); } #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::_cat", ""}).value(); return c10::Dispatcher::singleton().callUnboxedOnly( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(tensors)), tensors, dim); #endif } static inline Tensor & _cat_out(Tensor & out, TensorList tensors, int64_t dim) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(out, tensors)))) { case Backend::CPU: return CPUType::_cat_out(out, tensors, dim); break; default: AT_ERROR("_cat_out not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(out, tensors))); } #else static auto table = globalATenDispatch().getOpTable("aten::_cat.out(Tensor[] tensors, int dim=0, *, Tensor(a!) out) -> Tensor(a!)"); return table->callUnboxed(out, tensors, dim); #endif } static inline std::tuple _mode(const Tensor & self, int64_t dim, bool keepdim) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self)))) { case Backend::CPU: return CPUType::_mode(self, dim, keepdim); break; default: AT_ERROR("_mode not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(self))); } #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::_mode", ""}).value(); return c10::Dispatcher::singleton().callUnboxedOnly, const Tensor &, int64_t, bool>( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self)), self, dim, keepdim); #endif } static inline std::tuple _mode_out(Tensor & values, Tensor & indices, const Tensor & self, int64_t dim, bool keepdim) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(values, indices, self)))) { case Backend::CPU: return CPUType::_mode_out(values, indices, self, dim, keepdim); break; default: AT_ERROR("_mode_out not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(values, indices, self))); } #else static auto table = globalATenDispatch().getOpTable("aten::_mode.values(Tensor self, int dim=-1, bool keepdim=False, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!), Tensor(b!))"); return table->callUnboxed, Tensor &, Tensor &, const Tensor &, int64_t, bool>(values, indices, self, dim, keepdim); #endif } static inline std::tuple _max(const Tensor & self, int64_t dim, bool keepdim) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self)))) { case Backend::CPU: return CPUType::_max(self, dim, keepdim); break; default: AT_ERROR("_max not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(self))); } #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::_max", ""}).value(); return c10::Dispatcher::singleton().callUnboxedOnly, const Tensor &, int64_t, bool>( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self)), self, dim, keepdim); #endif } static inline std::tuple _max_out(Tensor & max, Tensor & max_indices, const Tensor & self, int64_t dim, bool keepdim) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(max, max_indices, self)))) { case Backend::CPU: return CPUType::_max_out(max, max_indices, self, dim, keepdim); break; default: AT_ERROR("_max_out not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(max, max_indices, self))); } #else static auto table = globalATenDispatch().getOpTable("aten::_max.max(Tensor self, int dim, bool keepdim=False, *, Tensor(a!) max, Tensor(b!) max_indices) -> (Tensor(a!), Tensor(b!))"); return table->callUnboxed, Tensor &, Tensor &, const Tensor &, int64_t, bool>(max, max_indices, self, dim, keepdim); #endif } static inline std::tuple _min(const Tensor & self, int64_t dim, bool keepdim) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self)))) { case Backend::CPU: return CPUType::_min(self, dim, keepdim); break; default: AT_ERROR("_min not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(self))); } #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::_min", ""}).value(); return c10::Dispatcher::singleton().callUnboxedOnly, const Tensor &, int64_t, bool>( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self)), self, dim, keepdim); #endif } static inline std::tuple _min_out(Tensor & min, Tensor & min_indices, const Tensor & self, int64_t dim, bool keepdim) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(min, min_indices, self)))) { case Backend::CPU: return CPUType::_min_out(min, min_indices, self, dim, keepdim); break; default: AT_ERROR("_min_out not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(min, min_indices, self))); } #else static auto table = globalATenDispatch().getOpTable("aten::_min.min(Tensor self, int dim, bool keepdim=False, *, Tensor(a!) min, Tensor(b!) min_indices) -> (Tensor(a!), Tensor(b!))"); return table->callUnboxed, Tensor &, Tensor &, const Tensor &, int64_t, bool>(min, min_indices, self, dim, keepdim); #endif } static inline Tensor & binary_cross_entropy_out(Tensor & out, const Tensor & self, const Tensor & target, const Tensor & weight, int64_t reduction) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(out, self, target, weight)))) { case Backend::CPU: return CPUType::binary_cross_entropy_out(out, self, target, weight, reduction); break; default: AT_ERROR("binary_cross_entropy_out not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(out, self, target, weight))); } #else static auto table = globalATenDispatch().getOpTable("aten::binary_cross_entropy.out(Tensor self, Tensor target, Tensor? weight=None, int reduction=Mean, *, Tensor(a!) out) -> Tensor(a!)"); return table->callUnboxed(out, self, target, weight, reduction); #endif } static inline Tensor binary_cross_entropy(const Tensor & self, const Tensor & target, const Tensor & weight, int64_t reduction) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self, target, weight)))) { case Backend::CPU: return CPUType::binary_cross_entropy(self, target, weight, reduction); break; default: AT_ERROR("binary_cross_entropy not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(self, target, weight))); } #else static auto table = globalATenDispatch().getOpTable("aten::binary_cross_entropy(Tensor self, Tensor target, Tensor? weight=None, int reduction=Mean) -> Tensor"); return table->callUnboxed(self, target, weight, reduction); #endif } static inline Tensor & binary_cross_entropy_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, const Tensor & target, const Tensor & weight, int64_t reduction) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(grad_input, grad_output, self, target, weight)))) { case Backend::CPU: return CPUType::binary_cross_entropy_backward_out(grad_input, grad_output, self, target, weight, reduction); break; default: AT_ERROR("binary_cross_entropy_backward_out not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(grad_input, grad_output, self, target, weight))); } #else static auto table = globalATenDispatch().getOpTable("aten::binary_cross_entropy_backward.grad_input(Tensor grad_output, Tensor self, Tensor target, Tensor? weight=None, int reduction=Mean, *, Tensor(a!) grad_input) -> Tensor(a!)"); return table->callUnboxed(grad_input, grad_output, self, target, weight, reduction); #endif } static inline Tensor binary_cross_entropy_backward(const Tensor & grad_output, const Tensor & self, const Tensor & target, const Tensor & weight, int64_t reduction) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(grad_output, self, target, weight)))) { case Backend::CPU: return CPUType::binary_cross_entropy_backward(grad_output, self, target, weight, reduction); break; default: AT_ERROR("binary_cross_entropy_backward not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(grad_output, self, target, weight))); } #else static auto table = globalATenDispatch().getOpTable("aten::binary_cross_entropy_backward(Tensor grad_output, Tensor self, Tensor target, Tensor? weight=None, int reduction=Mean) -> Tensor"); return table->callUnboxed(grad_output, self, target, weight, reduction); #endif } static inline Tensor & mse_loss_out(Tensor & out, const Tensor & self, const Tensor & target, int64_t reduction) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(out, self, target)))) { case Backend::CPU: return CPUType::mse_loss_out(out, self, target, reduction); break; default: AT_ERROR("mse_loss_out not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(out, self, target))); } #else static auto table = globalATenDispatch().getOpTable("aten::mse_loss.out(Tensor self, Tensor target, int reduction=Mean, *, Tensor(a!) out) -> Tensor(a!)"); return table->callUnboxed(out, self, target, reduction); #endif } static inline Tensor mse_loss(const Tensor & self, const Tensor & target, int64_t reduction) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self, target)))) { case Backend::CPU: return CPUType::mse_loss(self, target, reduction); break; default: AT_ERROR("mse_loss not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(self, target))); } #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::mse_loss", ""}).value(); return c10::Dispatcher::singleton().callUnboxed( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self, target)), self, target, reduction); #endif } static inline Tensor & mse_loss_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, const Tensor & target, int64_t reduction) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(grad_input, grad_output, self, target)))) { case Backend::CPU: return CPUType::mse_loss_backward_out(grad_input, grad_output, self, target, reduction); break; default: AT_ERROR("mse_loss_backward_out not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(grad_input, grad_output, self, target))); } #else static auto table = globalATenDispatch().getOpTable("aten::mse_loss_backward.grad_input(Tensor grad_output, Tensor self, Tensor target, int reduction, *, Tensor(a!) grad_input) -> Tensor(a!)"); return table->callUnboxed(grad_input, grad_output, self, target, reduction); #endif } static inline Tensor mse_loss_backward(const Tensor & grad_output, const Tensor & self, const Tensor & target, int64_t reduction) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(grad_output, self, target)))) { case Backend::CPU: return CPUType::mse_loss_backward(grad_output, self, target, reduction); break; default: AT_ERROR("mse_loss_backward not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(grad_output, self, target))); } #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::mse_loss_backward", ""}).value(); return c10::Dispatcher::singleton().callUnboxed( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(grad_output, self, target)), grad_output, self, target, reduction); #endif } static inline Tensor & l1_loss_out(Tensor & out, const Tensor & self, const Tensor & target, int64_t reduction) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(out, self, target)))) { case Backend::CPU: return CPUType::l1_loss_out(out, self, target, reduction); break; default: AT_ERROR("l1_loss_out not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(out, self, target))); } #else static auto table = globalATenDispatch().getOpTable("aten::l1_loss.out(Tensor self, Tensor target, int reduction=Mean, *, Tensor(a!) out) -> Tensor(a!)"); return table->callUnboxed(out, self, target, reduction); #endif } static inline Tensor l1_loss(const Tensor & self, const Tensor & target, int64_t reduction) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self, target)))) { case Backend::CPU: return CPUType::l1_loss(self, target, reduction); break; default: AT_ERROR("l1_loss not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(self, target))); } #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::l1_loss", ""}).value(); return c10::Dispatcher::singleton().callUnboxed( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self, target)), self, target, reduction); #endif } static inline Tensor & l1_loss_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, const Tensor & target, int64_t reduction) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(grad_input, grad_output, self, target)))) { case Backend::CPU: return CPUType::l1_loss_backward_out(grad_input, grad_output, self, target, reduction); break; default: AT_ERROR("l1_loss_backward_out not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(grad_input, grad_output, self, target))); } #else static auto table = globalATenDispatch().getOpTable("aten::l1_loss_backward.grad_input(Tensor grad_output, Tensor self, Tensor target, int reduction, *, Tensor(a!) grad_input) -> Tensor(a!)"); return table->callUnboxed(grad_input, grad_output, self, target, reduction); #endif } static inline Tensor l1_loss_backward(const Tensor & grad_output, const Tensor & self, const Tensor & target, int64_t reduction) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(grad_output, self, target)))) { case Backend::CPU: return CPUType::l1_loss_backward(grad_output, self, target, reduction); break; default: AT_ERROR("l1_loss_backward not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(grad_output, self, target))); } #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::l1_loss_backward", ""}).value(); return c10::Dispatcher::singleton().callUnboxed( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(grad_output, self, target)), grad_output, self, target, reduction); #endif } static inline Tensor & multi_margin_loss_out(Tensor & out, const Tensor & self, const Tensor & target, Scalar p, Scalar margin, const Tensor & weight, int64_t reduction) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(out, self, target, weight)))) { case Backend::CPU: return CPUType::multi_margin_loss_out(out, self, target, p, margin, weight, reduction); break; default: AT_ERROR("multi_margin_loss_out not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(out, self, target, weight))); } #else static auto table = globalATenDispatch().getOpTable("aten::multi_margin_loss.out(Tensor self, Tensor target, Scalar p=1, Scalar margin=1, Tensor? weight=None, int reduction=Mean, *, Tensor(a!) out) -> Tensor(a!)"); return table->callUnboxed(out, self, target, p, margin, weight, reduction); #endif } static inline Tensor multi_margin_loss(const Tensor & self, const Tensor & target, Scalar p, Scalar margin, const Tensor & weight, int64_t reduction) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self, target, weight)))) { case Backend::CPU: return CPUType::multi_margin_loss(self, target, p, margin, weight, reduction); break; default: AT_ERROR("multi_margin_loss not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(self, target, weight))); } #else static auto table = globalATenDispatch().getOpTable("aten::multi_margin_loss(Tensor self, Tensor target, Scalar p=1, Scalar margin=1, Tensor? weight=None, int reduction=Mean) -> Tensor"); return table->callUnboxed(self, target, p, margin, weight, reduction); #endif } static inline Tensor & multi_margin_loss_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, const Tensor & target, Scalar p, Scalar margin, const Tensor & weight, int64_t reduction) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(grad_input, grad_output, self, target, weight)))) { case Backend::CPU: return CPUType::multi_margin_loss_backward_out(grad_input, grad_output, self, target, p, margin, weight, reduction); break; default: AT_ERROR("multi_margin_loss_backward_out not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(grad_input, grad_output, self, target, weight))); } #else static auto table = globalATenDispatch().getOpTable("aten::multi_margin_loss_backward.grad_input(Tensor grad_output, Tensor self, Tensor target, Scalar p, Scalar margin, Tensor? weight=None, int reduction=Mean, *, Tensor(a!) grad_input) -> Tensor(a!)"); return table->callUnboxed(grad_input, grad_output, self, target, p, margin, weight, reduction); #endif } static inline Tensor multi_margin_loss_backward(const Tensor & grad_output, const Tensor & self, const Tensor & target, Scalar p, Scalar margin, const Tensor & weight, int64_t reduction) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(grad_output, self, target, weight)))) { case Backend::CPU: return CPUType::multi_margin_loss_backward(grad_output, self, target, p, margin, weight, reduction); break; default: AT_ERROR("multi_margin_loss_backward not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(grad_output, self, target, weight))); } #else static auto table = globalATenDispatch().getOpTable("aten::multi_margin_loss_backward(Tensor grad_output, Tensor self, Tensor target, Scalar p, Scalar margin, Tensor? weight=None, int reduction=Mean) -> Tensor"); return table->callUnboxed(grad_output, self, target, p, margin, weight, reduction); #endif } static inline Tensor & multilabel_margin_loss_out(Tensor & out, const Tensor & self, const Tensor & target, int64_t reduction) { #ifdef USE_STATIC_DISPATCH return TypeDefault::multilabel_margin_loss_out(out, self, target, reduction); #else static auto table = globalATenDispatch().getOpTable("aten::multilabel_margin_loss.out(Tensor self, Tensor target, int reduction=Mean, *, Tensor(a!) out) -> Tensor(a!)"); return table->callUnboxed(out, self, target, reduction); #endif } static inline Tensor multilabel_margin_loss(const Tensor & self, const Tensor & target, int64_t reduction) { #ifdef USE_STATIC_DISPATCH return TypeDefault::multilabel_margin_loss(self, target, reduction); #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::multilabel_margin_loss", ""}).value(); return c10::Dispatcher::singleton().callUnboxed( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self, target)), self, target, reduction); #endif } static inline std::tuple multilabel_margin_loss_forward_out(Tensor & output, Tensor & is_target, const Tensor & self, const Tensor & target, int64_t reduction) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(output, is_target, self, target)))) { case Backend::CPU: return CPUType::multilabel_margin_loss_forward_out(output, is_target, self, target, reduction); break; default: AT_ERROR("multilabel_margin_loss_forward_out not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(output, is_target, self, target))); } #else static auto table = globalATenDispatch().getOpTable("aten::multilabel_margin_loss_forward.output(Tensor self, Tensor target, int reduction, *, Tensor(a!) output, Tensor(b!) is_target) -> (Tensor(a!), Tensor(b!))"); return table->callUnboxed, Tensor &, Tensor &, const Tensor &, const Tensor &, int64_t>(output, is_target, self, target, reduction); #endif } static inline std::tuple multilabel_margin_loss_forward(const Tensor & self, const Tensor & target, int64_t reduction) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self, target)))) { case Backend::CPU: return CPUType::multilabel_margin_loss_forward(self, target, reduction); break; default: AT_ERROR("multilabel_margin_loss_forward not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(self, target))); } #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::multilabel_margin_loss_forward", ""}).value(); return c10::Dispatcher::singleton().callUnboxedOnly, const Tensor &, const Tensor &, int64_t>( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self, target)), self, target, reduction); #endif } static inline Tensor & multilabel_margin_loss_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, const Tensor & target, int64_t reduction, const Tensor & is_target) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(grad_input, grad_output, self, target, is_target)))) { case Backend::CPU: return CPUType::multilabel_margin_loss_backward_out(grad_input, grad_output, self, target, reduction, is_target); break; default: AT_ERROR("multilabel_margin_loss_backward_out not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(grad_input, grad_output, self, target, is_target))); } #else static auto table = globalATenDispatch().getOpTable("aten::multilabel_margin_loss_backward.grad_input(Tensor grad_output, Tensor self, Tensor target, int reduction, Tensor is_target, *, Tensor(a!) grad_input) -> Tensor(a!)"); return table->callUnboxed(grad_input, grad_output, self, target, reduction, is_target); #endif } static inline Tensor multilabel_margin_loss_backward(const Tensor & grad_output, const Tensor & self, const Tensor & target, int64_t reduction, const Tensor & is_target) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(grad_output, self, target, is_target)))) { case Backend::CPU: return CPUType::multilabel_margin_loss_backward(grad_output, self, target, reduction, is_target); break; default: AT_ERROR("multilabel_margin_loss_backward not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(grad_output, self, target, is_target))); } #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::multilabel_margin_loss_backward", ""}).value(); return c10::Dispatcher::singleton().callUnboxed( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(grad_output, self, target, is_target)), grad_output, self, target, reduction, is_target); #endif } static inline Tensor & nll_loss_out(Tensor & out, const Tensor & self, const Tensor & target, const Tensor & weight, int64_t reduction, int64_t ignore_index) { #ifdef USE_STATIC_DISPATCH return TypeDefault::nll_loss_out(out, self, target, weight, reduction, ignore_index); #else static auto table = globalATenDispatch().getOpTable("aten::nll_loss.out(Tensor self, Tensor target, Tensor? weight=None, int reduction=Mean, int ignore_index=-100, *, Tensor(a!) out) -> Tensor(a!)"); return table->callUnboxed(out, self, target, weight, reduction, ignore_index); #endif } static inline Tensor nll_loss(const Tensor & self, const Tensor & target, const Tensor & weight, int64_t reduction, int64_t ignore_index) { #ifdef USE_STATIC_DISPATCH return TypeDefault::nll_loss(self, target, weight, reduction, ignore_index); #else static auto table = globalATenDispatch().getOpTable("aten::nll_loss(Tensor self, Tensor target, Tensor? weight=None, int reduction=Mean, int ignore_index=-100) -> Tensor"); return table->callUnboxed(self, target, weight, reduction, ignore_index); #endif } static inline std::tuple nll_loss_forward_out(Tensor & output, Tensor & total_weight, const Tensor & self, const Tensor & target, const Tensor & weight, int64_t reduction, int64_t ignore_index) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(output, total_weight, self, target, weight)))) { case Backend::CPU: return CPUType::nll_loss_forward_out(output, total_weight, self, target, weight, reduction, ignore_index); break; default: AT_ERROR("nll_loss_forward_out not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(output, total_weight, self, target, weight))); } #else static auto table = globalATenDispatch().getOpTable("aten::nll_loss_forward.output(Tensor self, Tensor target, Tensor? weight, int reduction, int ignore_index, *, Tensor(a!) output, Tensor(b!) total_weight) -> (Tensor(a!), Tensor(b!))"); return table->callUnboxed, Tensor &, Tensor &, const Tensor &, const Tensor &, const Tensor &, int64_t, int64_t>(output, total_weight, self, target, weight, reduction, ignore_index); #endif } static inline std::tuple nll_loss_forward(const Tensor & self, const Tensor & target, const Tensor & weight, int64_t reduction, int64_t ignore_index) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self, target, weight)))) { case Backend::CPU: return CPUType::nll_loss_forward(self, target, weight, reduction, ignore_index); break; default: AT_ERROR("nll_loss_forward not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(self, target, weight))); } #else static auto table = globalATenDispatch().getOpTable("aten::nll_loss_forward(Tensor self, Tensor target, Tensor? weight, int reduction, int ignore_index) -> (Tensor output, Tensor total_weight)"); return table->callUnboxed, const Tensor &, const Tensor &, const Tensor &, int64_t, int64_t>(self, target, weight, reduction, ignore_index); #endif } static inline Tensor & nll_loss_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, const Tensor & target, const Tensor & weight, int64_t reduction, int64_t ignore_index, const Tensor & total_weight) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(grad_input, grad_output, self, target, weight, total_weight)))) { case Backend::CPU: return CPUType::nll_loss_backward_out(grad_input, grad_output, self, target, weight, reduction, ignore_index, total_weight); break; default: AT_ERROR("nll_loss_backward_out not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(grad_input, grad_output, self, target, weight, total_weight))); } #else static auto table = globalATenDispatch().getOpTable("aten::nll_loss_backward.grad_input(Tensor grad_output, Tensor self, Tensor target, Tensor? weight, int reduction, int ignore_index, Tensor total_weight, *, Tensor(a!) grad_input) -> Tensor(a!)"); return table->callUnboxed(grad_input, grad_output, self, target, weight, reduction, ignore_index, total_weight); #endif } static inline Tensor nll_loss_backward(const Tensor & grad_output, const Tensor & self, const Tensor & target, const Tensor & weight, int64_t reduction, int64_t ignore_index, const Tensor & total_weight) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(grad_output, self, target, weight, total_weight)))) { case Backend::CPU: return CPUType::nll_loss_backward(grad_output, self, target, weight, reduction, ignore_index, total_weight); break; default: AT_ERROR("nll_loss_backward not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(grad_output, self, target, weight, total_weight))); } #else static auto table = globalATenDispatch().getOpTable("aten::nll_loss_backward(Tensor grad_output, Tensor self, Tensor target, Tensor? weight, int reduction, int ignore_index, Tensor total_weight) -> Tensor"); return table->callUnboxed(grad_output, self, target, weight, reduction, ignore_index, total_weight); #endif } static inline Tensor & nll_loss2d_out(Tensor & out, const Tensor & self, const Tensor & target, const Tensor & weight, int64_t reduction, int64_t ignore_index) { #ifdef USE_STATIC_DISPATCH return TypeDefault::nll_loss2d_out(out, self, target, weight, reduction, ignore_index); #else static auto table = globalATenDispatch().getOpTable("aten::nll_loss2d.out(Tensor self, Tensor target, Tensor? weight=None, int reduction=Mean, int ignore_index=-100, *, Tensor(a!) out) -> Tensor(a!)"); return table->callUnboxed(out, self, target, weight, reduction, ignore_index); #endif } static inline Tensor nll_loss2d(const Tensor & self, const Tensor & target, const Tensor & weight, int64_t reduction, int64_t ignore_index) { #ifdef USE_STATIC_DISPATCH return TypeDefault::nll_loss2d(self, target, weight, reduction, ignore_index); #else static auto table = globalATenDispatch().getOpTable("aten::nll_loss2d(Tensor self, Tensor target, Tensor? weight=None, int reduction=Mean, int ignore_index=-100) -> Tensor"); return table->callUnboxed(self, target, weight, reduction, ignore_index); #endif } static inline std::tuple nll_loss2d_forward_out(Tensor & output, Tensor & total_weight, const Tensor & self, const Tensor & target, const Tensor & weight, int64_t reduction, int64_t ignore_index) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(output, total_weight, self, target, weight)))) { case Backend::CPU: return CPUType::nll_loss2d_forward_out(output, total_weight, self, target, weight, reduction, ignore_index); break; default: AT_ERROR("nll_loss2d_forward_out not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(output, total_weight, self, target, weight))); } #else static auto table = globalATenDispatch().getOpTable("aten::nll_loss2d_forward.output(Tensor self, Tensor target, Tensor? weight, int reduction, int ignore_index, *, Tensor(a!) output, Tensor(b!) total_weight) -> (Tensor(a!), Tensor(b!))"); return table->callUnboxed, Tensor &, Tensor &, const Tensor &, const Tensor &, const Tensor &, int64_t, int64_t>(output, total_weight, self, target, weight, reduction, ignore_index); #endif } static inline std::tuple nll_loss2d_forward(const Tensor & self, const Tensor & target, const Tensor & weight, int64_t reduction, int64_t ignore_index) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self, target, weight)))) { case Backend::CPU: return CPUType::nll_loss2d_forward(self, target, weight, reduction, ignore_index); break; default: AT_ERROR("nll_loss2d_forward not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(self, target, weight))); } #else static auto table = globalATenDispatch().getOpTable("aten::nll_loss2d_forward(Tensor self, Tensor target, Tensor? weight, int reduction, int ignore_index) -> (Tensor output, Tensor total_weight)"); return table->callUnboxed, const Tensor &, const Tensor &, const Tensor &, int64_t, int64_t>(self, target, weight, reduction, ignore_index); #endif } static inline Tensor & nll_loss2d_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, const Tensor & target, const Tensor & weight, int64_t reduction, int64_t ignore_index, const Tensor & total_weight) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(grad_input, grad_output, self, target, weight, total_weight)))) { case Backend::CPU: return CPUType::nll_loss2d_backward_out(grad_input, grad_output, self, target, weight, reduction, ignore_index, total_weight); break; default: AT_ERROR("nll_loss2d_backward_out not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(grad_input, grad_output, self, target, weight, total_weight))); } #else static auto table = globalATenDispatch().getOpTable("aten::nll_loss2d_backward.grad_input(Tensor grad_output, Tensor self, Tensor target, Tensor? weight, int reduction, int ignore_index, Tensor total_weight, *, Tensor(a!) grad_input) -> Tensor(a!)"); return table->callUnboxed(grad_input, grad_output, self, target, weight, reduction, ignore_index, total_weight); #endif } static inline Tensor nll_loss2d_backward(const Tensor & grad_output, const Tensor & self, const Tensor & target, const Tensor & weight, int64_t reduction, int64_t ignore_index, const Tensor & total_weight) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(grad_output, self, target, weight, total_weight)))) { case Backend::CPU: return CPUType::nll_loss2d_backward(grad_output, self, target, weight, reduction, ignore_index, total_weight); break; default: AT_ERROR("nll_loss2d_backward not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(grad_output, self, target, weight, total_weight))); } #else static auto table = globalATenDispatch().getOpTable("aten::nll_loss2d_backward(Tensor grad_output, Tensor self, Tensor target, Tensor? weight, int reduction, int ignore_index, Tensor total_weight) -> Tensor"); return table->callUnboxed(grad_output, self, target, weight, reduction, ignore_index, total_weight); #endif } static inline Tensor & smooth_l1_loss_out(Tensor & out, const Tensor & self, const Tensor & target, int64_t reduction) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(out, self, target)))) { case Backend::CPU: return CPUType::smooth_l1_loss_out(out, self, target, reduction); break; default: AT_ERROR("smooth_l1_loss_out not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(out, self, target))); } #else static auto table = globalATenDispatch().getOpTable("aten::smooth_l1_loss.out(Tensor self, Tensor target, int reduction=Mean, *, Tensor(a!) out) -> Tensor(a!)"); return table->callUnboxed(out, self, target, reduction); #endif } static inline Tensor smooth_l1_loss(const Tensor & self, const Tensor & target, int64_t reduction) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self, target)))) { case Backend::CPU: return CPUType::smooth_l1_loss(self, target, reduction); break; default: AT_ERROR("smooth_l1_loss not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(self, target))); } #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::smooth_l1_loss", ""}).value(); return c10::Dispatcher::singleton().callUnboxed( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self, target)), self, target, reduction); #endif } static inline Tensor & smooth_l1_loss_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, const Tensor & target, int64_t reduction) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(grad_input, grad_output, self, target)))) { case Backend::CPU: return CPUType::smooth_l1_loss_backward_out(grad_input, grad_output, self, target, reduction); break; default: AT_ERROR("smooth_l1_loss_backward_out not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(grad_input, grad_output, self, target))); } #else static auto table = globalATenDispatch().getOpTable("aten::smooth_l1_loss_backward.grad_input(Tensor grad_output, Tensor self, Tensor target, int reduction, *, Tensor(a!) grad_input) -> Tensor(a!)"); return table->callUnboxed(grad_input, grad_output, self, target, reduction); #endif } static inline Tensor smooth_l1_loss_backward(const Tensor & grad_output, const Tensor & self, const Tensor & target, int64_t reduction) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(grad_output, self, target)))) { case Backend::CPU: return CPUType::smooth_l1_loss_backward(grad_output, self, target, reduction); break; default: AT_ERROR("smooth_l1_loss_backward not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(grad_output, self, target))); } #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::smooth_l1_loss_backward", ""}).value(); return c10::Dispatcher::singleton().callUnboxed( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(grad_output, self, target)), grad_output, self, target, reduction); #endif } static inline Tensor & soft_margin_loss_out(Tensor & out, const Tensor & self, const Tensor & target, int64_t reduction) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(out, self, target)))) { case Backend::CPU: return CPUType::soft_margin_loss_out(out, self, target, reduction); break; default: AT_ERROR("soft_margin_loss_out not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(out, self, target))); } #else static auto table = globalATenDispatch().getOpTable("aten::soft_margin_loss.out(Tensor self, Tensor target, int reduction=Mean, *, Tensor(a!) out) -> Tensor(a!)"); return table->callUnboxed(out, self, target, reduction); #endif } static inline Tensor soft_margin_loss(const Tensor & self, const Tensor & target, int64_t reduction) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self, target)))) { case Backend::CPU: return CPUType::soft_margin_loss(self, target, reduction); break; default: AT_ERROR("soft_margin_loss not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(self, target))); } #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::soft_margin_loss", ""}).value(); return c10::Dispatcher::singleton().callUnboxed( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self, target)), self, target, reduction); #endif } static inline Tensor & soft_margin_loss_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, const Tensor & target, int64_t reduction) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(grad_input, grad_output, self, target)))) { case Backend::CPU: return CPUType::soft_margin_loss_backward_out(grad_input, grad_output, self, target, reduction); break; default: AT_ERROR("soft_margin_loss_backward_out not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(grad_input, grad_output, self, target))); } #else static auto table = globalATenDispatch().getOpTable("aten::soft_margin_loss_backward.grad_input(Tensor grad_output, Tensor self, Tensor target, int reduction, *, Tensor(a!) grad_input) -> Tensor(a!)"); return table->callUnboxed(grad_input, grad_output, self, target, reduction); #endif } static inline Tensor soft_margin_loss_backward(const Tensor & grad_output, const Tensor & self, const Tensor & target, int64_t reduction) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(grad_output, self, target)))) { case Backend::CPU: return CPUType::soft_margin_loss_backward(grad_output, self, target, reduction); break; default: AT_ERROR("soft_margin_loss_backward not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(grad_output, self, target))); } #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::soft_margin_loss_backward", ""}).value(); return c10::Dispatcher::singleton().callUnboxed( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(grad_output, self, target)), grad_output, self, target, reduction); #endif } static inline Tensor & elu_out(Tensor & out, const Tensor & self, Scalar alpha, Scalar scale, Scalar input_scale) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(out, self)))) { case Backend::CPU: return CPUType::elu_out(out, self, alpha, scale, input_scale); break; default: AT_ERROR("elu_out not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(out, self))); } #else static auto table = globalATenDispatch().getOpTable("aten::elu.out(Tensor self, Scalar alpha=1, Scalar scale=1, Scalar input_scale=1, *, Tensor(a!) out) -> Tensor(a!)"); return table->callUnboxed(out, self, alpha, scale, input_scale); #endif } static inline Tensor elu(const Tensor & self, Scalar alpha, Scalar scale, Scalar input_scale) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self)))) { case Backend::CPU: return CPUType::elu(self, alpha, scale, input_scale); break; default: AT_ERROR("elu not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(self))); } #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::elu", ""}).value(); return c10::Dispatcher::singleton().callUnboxed( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self)), self, alpha, scale, input_scale); #endif } static inline Tensor & elu_backward_out(Tensor & grad_input, const Tensor & grad_output, Scalar alpha, Scalar scale, Scalar input_scale, const Tensor & output) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(grad_input, grad_output, output)))) { case Backend::CPU: return CPUType::elu_backward_out(grad_input, grad_output, alpha, scale, input_scale, output); break; default: AT_ERROR("elu_backward_out not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(grad_input, grad_output, output))); } #else static auto table = globalATenDispatch().getOpTable("aten::elu_backward.grad_input(Tensor grad_output, Scalar alpha, Scalar scale, Scalar input_scale, Tensor output, *, Tensor(a!) grad_input) -> Tensor(a!)"); return table->callUnboxed(grad_input, grad_output, alpha, scale, input_scale, output); #endif } static inline Tensor elu_backward(const Tensor & grad_output, Scalar alpha, Scalar scale, Scalar input_scale, const Tensor & output) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(grad_output, output)))) { case Backend::CPU: return CPUType::elu_backward(grad_output, alpha, scale, input_scale, output); break; default: AT_ERROR("elu_backward not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(grad_output, output))); } #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::elu_backward", ""}).value(); return c10::Dispatcher::singleton().callUnboxed( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(grad_output, output)), grad_output, alpha, scale, input_scale, output); #endif } static inline Tensor & elu_(Tensor & self, Scalar alpha, Scalar scale, Scalar input_scale) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self)))) { case Backend::CPU: return CPUType::elu_(self, alpha, scale, input_scale); break; default: AT_ERROR("elu_ not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(self))); } #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::elu_", ""}).value(); return c10::Dispatcher::singleton().callUnboxedOnly( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self)), self, alpha, scale, input_scale); #endif } static inline Tensor & glu_out(Tensor & out, const Tensor & self, int64_t dim) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(out, self)))) { case Backend::CPU: return CPUType::glu_out(out, self, dim); break; default: AT_ERROR("glu_out not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(out, self))); } #else static auto table = globalATenDispatch().getOpTable("aten::glu.out(Tensor self, int dim=-1, *, Tensor(a!) out) -> Tensor(a!)"); return table->callUnboxed(out, self, dim); #endif } static inline Tensor glu(const Tensor & self, int64_t dim) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self)))) { case Backend::CPU: return CPUType::glu(self, dim); break; default: AT_ERROR("glu not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(self))); } #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::glu", ""}).value(); return c10::Dispatcher::singleton().callUnboxed( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self)), self, dim); #endif } static inline Tensor & glu_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, int64_t dim) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(grad_input, grad_output, self)))) { case Backend::CPU: return CPUType::glu_backward_out(grad_input, grad_output, self, dim); break; default: AT_ERROR("glu_backward_out not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(grad_input, grad_output, self))); } #else static auto table = globalATenDispatch().getOpTable("aten::glu_backward.grad_input(Tensor grad_output, Tensor self, int dim, *, Tensor(a!) grad_input) -> Tensor(a!)"); return table->callUnboxed(grad_input, grad_output, self, dim); #endif } static inline Tensor glu_backward(const Tensor & grad_output, const Tensor & self, int64_t dim) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(grad_output, self)))) { case Backend::CPU: return CPUType::glu_backward(grad_output, self, dim); break; default: AT_ERROR("glu_backward not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(grad_output, self))); } #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::glu_backward", ""}).value(); return c10::Dispatcher::singleton().callUnboxed( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(grad_output, self)), grad_output, self, dim); #endif } static inline Tensor & hardtanh_out(Tensor & out, const Tensor & self, Scalar min_val, Scalar max_val) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(out, self)))) { case Backend::CPU: return CPUType::hardtanh_out(out, self, min_val, max_val); break; default: AT_ERROR("hardtanh_out not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(out, self))); } #else static auto table = globalATenDispatch().getOpTable("aten::hardtanh.out(Tensor self, Scalar min_val=-1, Scalar max_val=1, *, Tensor(a!) out) -> Tensor(a!)"); return table->callUnboxed(out, self, min_val, max_val); #endif } static inline Tensor hardtanh(const Tensor & self, Scalar min_val, Scalar max_val) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self)))) { case Backend::CPU: return CPUType::hardtanh(self, min_val, max_val); break; default: AT_ERROR("hardtanh not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(self))); } #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::hardtanh", ""}).value(); return c10::Dispatcher::singleton().callUnboxed( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self)), self, min_val, max_val); #endif } static inline Tensor & hardtanh_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, Scalar min_val, Scalar max_val) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(grad_input, grad_output, self)))) { case Backend::CPU: return CPUType::hardtanh_backward_out(grad_input, grad_output, self, min_val, max_val); break; default: AT_ERROR("hardtanh_backward_out not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(grad_input, grad_output, self))); } #else static auto table = globalATenDispatch().getOpTable("aten::hardtanh_backward.grad_input(Tensor grad_output, Tensor self, Scalar min_val, Scalar max_val, *, Tensor(a!) grad_input) -> Tensor(a!)"); return table->callUnboxed(grad_input, grad_output, self, min_val, max_val); #endif } static inline Tensor hardtanh_backward(const Tensor & grad_output, const Tensor & self, Scalar min_val, Scalar max_val) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(grad_output, self)))) { case Backend::CPU: return CPUType::hardtanh_backward(grad_output, self, min_val, max_val); break; default: AT_ERROR("hardtanh_backward not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(grad_output, self))); } #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::hardtanh_backward", ""}).value(); return c10::Dispatcher::singleton().callUnboxed( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(grad_output, self)), grad_output, self, min_val, max_val); #endif } static inline Tensor & hardtanh_(Tensor & self, Scalar min_val, Scalar max_val) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self)))) { case Backend::CPU: return CPUType::hardtanh_(self, min_val, max_val); break; default: AT_ERROR("hardtanh_ not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(self))); } #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::hardtanh_", ""}).value(); return c10::Dispatcher::singleton().callUnboxedOnly( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self)), self, min_val, max_val); #endif } static inline Tensor & leaky_relu_out(Tensor & out, const Tensor & self, Scalar negative_slope) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(out, self)))) { case Backend::CPU: return CPUType::leaky_relu_out(out, self, negative_slope); break; default: AT_ERROR("leaky_relu_out not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(out, self))); } #else static auto table = globalATenDispatch().getOpTable("aten::leaky_relu.out(Tensor self, Scalar negative_slope=0.01, *, Tensor(a!) out) -> Tensor(a!)"); return table->callUnboxed(out, self, negative_slope); #endif } static inline Tensor leaky_relu(const Tensor & self, Scalar negative_slope) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self)))) { case Backend::CPU: return CPUType::leaky_relu(self, negative_slope); break; default: AT_ERROR("leaky_relu not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(self))); } #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::leaky_relu", ""}).value(); return c10::Dispatcher::singleton().callUnboxed( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self)), self, negative_slope); #endif } static inline Tensor & leaky_relu_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, Scalar negative_slope) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(grad_input, grad_output, self)))) { case Backend::CPU: return CPUType::leaky_relu_backward_out(grad_input, grad_output, self, negative_slope); break; default: AT_ERROR("leaky_relu_backward_out not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(grad_input, grad_output, self))); } #else static auto table = globalATenDispatch().getOpTable("aten::leaky_relu_backward.grad_input(Tensor grad_output, Tensor self, Scalar negative_slope, *, Tensor(a!) grad_input) -> Tensor(a!)"); return table->callUnboxed(grad_input, grad_output, self, negative_slope); #endif } static inline Tensor leaky_relu_backward(const Tensor & grad_output, const Tensor & self, Scalar negative_slope) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(grad_output, self)))) { case Backend::CPU: return CPUType::leaky_relu_backward(grad_output, self, negative_slope); break; default: AT_ERROR("leaky_relu_backward not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(grad_output, self))); } #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::leaky_relu_backward", ""}).value(); return c10::Dispatcher::singleton().callUnboxed( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(grad_output, self)), grad_output, self, negative_slope); #endif } static inline Tensor & leaky_relu_(Tensor & self, Scalar negative_slope) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self)))) { case Backend::CPU: return CPUType::leaky_relu_(self, negative_slope); break; default: AT_ERROR("leaky_relu_ not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(self))); } #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::leaky_relu_", ""}).value(); return c10::Dispatcher::singleton().callUnboxedOnly( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self)), self, negative_slope); #endif } static inline Tensor & log_sigmoid_out(Tensor & out, const Tensor & self) { #ifdef USE_STATIC_DISPATCH return TypeDefault::log_sigmoid_out(out, self); #else static auto table = globalATenDispatch().getOpTable("aten::log_sigmoid.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)"); return table->callUnboxed(out, self); #endif } static inline Tensor log_sigmoid(const Tensor & self) { #ifdef USE_STATIC_DISPATCH return TypeDefault::log_sigmoid(self); #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::log_sigmoid", ""}).value(); return c10::Dispatcher::singleton().callUnboxed( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self)), self); #endif } static inline std::tuple log_sigmoid_forward_out(Tensor & output, Tensor & buffer, const Tensor & self) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(output, buffer, self)))) { case Backend::CPU: return CPUType::log_sigmoid_forward_out(output, buffer, self); break; default: AT_ERROR("log_sigmoid_forward_out not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(output, buffer, self))); } #else static auto table = globalATenDispatch().getOpTable("aten::log_sigmoid_forward.output(Tensor self, *, Tensor(a!) output, Tensor(b!) buffer) -> (Tensor(a!), Tensor(b!))"); return table->callUnboxed, Tensor &, Tensor &, const Tensor &>(output, buffer, self); #endif } static inline std::tuple log_sigmoid_forward(const Tensor & self) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self)))) { case Backend::CPU: return CPUType::log_sigmoid_forward(self); break; default: AT_ERROR("log_sigmoid_forward not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(self))); } #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::log_sigmoid_forward", ""}).value(); return c10::Dispatcher::singleton().callUnboxedOnly, const Tensor &>( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self)), self); #endif } static inline Tensor & log_sigmoid_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, const Tensor & buffer) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(grad_input, grad_output, self, buffer)))) { case Backend::CPU: return CPUType::log_sigmoid_backward_out(grad_input, grad_output, self, buffer); break; default: AT_ERROR("log_sigmoid_backward_out not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(grad_input, grad_output, self, buffer))); } #else static auto table = globalATenDispatch().getOpTable("aten::log_sigmoid_backward.grad_input(Tensor grad_output, Tensor self, Tensor buffer, *, Tensor(a!) grad_input) -> Tensor(a!)"); return table->callUnboxed(grad_input, grad_output, self, buffer); #endif } static inline Tensor log_sigmoid_backward(const Tensor & grad_output, const Tensor & self, const Tensor & buffer) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(grad_output, self, buffer)))) { case Backend::CPU: return CPUType::log_sigmoid_backward(grad_output, self, buffer); break; default: AT_ERROR("log_sigmoid_backward not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(grad_output, self, buffer))); } #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::log_sigmoid_backward", ""}).value(); return c10::Dispatcher::singleton().callUnboxed( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(grad_output, self, buffer)), grad_output, self, buffer); #endif } static inline Tensor & rrelu_with_noise_out(Tensor & out, const Tensor & self, const Tensor & noise, Scalar lower, Scalar upper, bool training, Generator * generator) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(out, self, noise)))) { case Backend::CPU: return CPUType::rrelu_with_noise_out(out, self, noise, lower, upper, training, generator); break; default: AT_ERROR("rrelu_with_noise_out not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(out, self, noise))); } #else static auto table = globalATenDispatch().getOpTable("aten::rrelu_with_noise.out(Tensor self, Tensor noise, Scalar lower=0.125, Scalar upper=0.3333333333333333, bool training=False, Generator? generator=None, *, Tensor(a!) out) -> Tensor(a!)"); return table->callUnboxed(out, self, noise, lower, upper, training, generator); #endif } static inline Tensor rrelu_with_noise(const Tensor & self, const Tensor & noise, Scalar lower, Scalar upper, bool training, Generator * generator) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self, noise)))) { case Backend::CPU: return CPUType::rrelu_with_noise(self, noise, lower, upper, training, generator); break; default: AT_ERROR("rrelu_with_noise not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(self, noise))); } #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::rrelu_with_noise", ""}).value(); return c10::Dispatcher::singleton().callUnboxedOnly( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self, noise)), self, noise, lower, upper, training, generator); #endif } static inline Tensor & rrelu_with_noise_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, const Tensor & noise, Scalar lower, Scalar upper, bool training) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(grad_input, grad_output, self, noise)))) { case Backend::CPU: return CPUType::rrelu_with_noise_backward_out(grad_input, grad_output, self, noise, lower, upper, training); break; default: AT_ERROR("rrelu_with_noise_backward_out not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(grad_input, grad_output, self, noise))); } #else static auto table = globalATenDispatch().getOpTable("aten::rrelu_with_noise_backward.grad_input(Tensor grad_output, Tensor self, Tensor noise, Scalar lower, Scalar upper, bool training, *, Tensor(a!) grad_input) -> Tensor(a!)"); return table->callUnboxed(grad_input, grad_output, self, noise, lower, upper, training); #endif } static inline Tensor rrelu_with_noise_backward(const Tensor & grad_output, const Tensor & self, const Tensor & noise, Scalar lower, Scalar upper, bool training) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(grad_output, self, noise)))) { case Backend::CPU: return CPUType::rrelu_with_noise_backward(grad_output, self, noise, lower, upper, training); break; default: AT_ERROR("rrelu_with_noise_backward not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(grad_output, self, noise))); } #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::rrelu_with_noise_backward", ""}).value(); return c10::Dispatcher::singleton().callUnboxed( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(grad_output, self, noise)), grad_output, self, noise, lower, upper, training); #endif } static inline Tensor & rrelu_with_noise_(Tensor & self, const Tensor & noise, Scalar lower, Scalar upper, bool training, Generator * generator) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self, noise)))) { case Backend::CPU: return CPUType::rrelu_with_noise_(self, noise, lower, upper, training, generator); break; default: AT_ERROR("rrelu_with_noise_ not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(self, noise))); } #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::rrelu_with_noise_", ""}).value(); return c10::Dispatcher::singleton().callUnboxedOnly( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self, noise)), self, noise, lower, upper, training, generator); #endif } static inline Tensor & softplus_out(Tensor & out, const Tensor & self, Scalar beta, Scalar threshold) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(out, self)))) { case Backend::CPU: return CPUType::softplus_out(out, self, beta, threshold); break; default: AT_ERROR("softplus_out not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(out, self))); } #else static auto table = globalATenDispatch().getOpTable("aten::softplus.out(Tensor self, Scalar beta=1, Scalar threshold=20, *, Tensor(a!) out) -> Tensor(a!)"); return table->callUnboxed(out, self, beta, threshold); #endif } static inline Tensor softplus(const Tensor & self, Scalar beta, Scalar threshold) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self)))) { case Backend::CPU: return CPUType::softplus(self, beta, threshold); break; default: AT_ERROR("softplus not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(self))); } #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::softplus", ""}).value(); return c10::Dispatcher::singleton().callUnboxed( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self)), self, beta, threshold); #endif } static inline Tensor & softplus_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, Scalar beta, Scalar threshold, const Tensor & output) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(grad_input, grad_output, self, output)))) { case Backend::CPU: return CPUType::softplus_backward_out(grad_input, grad_output, self, beta, threshold, output); break; default: AT_ERROR("softplus_backward_out not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(grad_input, grad_output, self, output))); } #else static auto table = globalATenDispatch().getOpTable("aten::softplus_backward.grad_input(Tensor grad_output, Tensor self, Scalar beta, Scalar threshold, Tensor output, *, Tensor(a!) grad_input) -> Tensor(a!)"); return table->callUnboxed(grad_input, grad_output, self, beta, threshold, output); #endif } static inline Tensor softplus_backward(const Tensor & grad_output, const Tensor & self, Scalar beta, Scalar threshold, const Tensor & output) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(grad_output, self, output)))) { case Backend::CPU: return CPUType::softplus_backward(grad_output, self, beta, threshold, output); break; default: AT_ERROR("softplus_backward not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(grad_output, self, output))); } #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::softplus_backward", ""}).value(); return c10::Dispatcher::singleton().callUnboxed( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(grad_output, self, output)), grad_output, self, beta, threshold, output); #endif } static inline Tensor & softshrink_out(Tensor & out, const Tensor & self, Scalar lambd) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(out, self)))) { case Backend::CPU: return CPUType::softshrink_out(out, self, lambd); break; default: AT_ERROR("softshrink_out not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(out, self))); } #else static auto table = globalATenDispatch().getOpTable("aten::softshrink.out(Tensor self, Scalar lambd=0.5, *, Tensor(a!) out) -> Tensor(a!)"); return table->callUnboxed(out, self, lambd); #endif } static inline Tensor softshrink(const Tensor & self, Scalar lambd) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self)))) { case Backend::CPU: return CPUType::softshrink(self, lambd); break; default: AT_ERROR("softshrink not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(self))); } #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::softshrink", ""}).value(); return c10::Dispatcher::singleton().callUnboxed( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self)), self, lambd); #endif } static inline Tensor & softshrink_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, Scalar lambd) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(grad_input, grad_output, self)))) { case Backend::CPU: return CPUType::softshrink_backward_out(grad_input, grad_output, self, lambd); break; default: AT_ERROR("softshrink_backward_out not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(grad_input, grad_output, self))); } #else static auto table = globalATenDispatch().getOpTable("aten::softshrink_backward.grad_input(Tensor grad_output, Tensor self, Scalar lambd, *, Tensor(a!) grad_input) -> Tensor(a!)"); return table->callUnboxed(grad_input, grad_output, self, lambd); #endif } static inline Tensor softshrink_backward(const Tensor & grad_output, const Tensor & self, Scalar lambd) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(grad_output, self)))) { case Backend::CPU: return CPUType::softshrink_backward(grad_output, self, lambd); break; default: AT_ERROR("softshrink_backward not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(grad_output, self))); } #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::softshrink_backward", ""}).value(); return c10::Dispatcher::singleton().callUnboxed( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(grad_output, self)), grad_output, self, lambd); #endif } static inline Tensor & adaptive_avg_pool2d_out(Tensor & out, const Tensor & self, IntArrayRef output_size) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(out, self)))) { case Backend::CPU: return CPUType::adaptive_avg_pool2d_out(out, self, output_size); break; default: AT_ERROR("adaptive_avg_pool2d_out not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(out, self))); } #else static auto table = globalATenDispatch().getOpTable("aten::adaptive_avg_pool2d.out(Tensor self, int[2] output_size, *, Tensor(a!) out) -> Tensor(a!)"); return table->callUnboxed(out, self, output_size); #endif } static inline Tensor adaptive_avg_pool2d(const Tensor & self, IntArrayRef output_size) { #ifdef USE_STATIC_DISPATCH return TypeDefault::adaptive_avg_pool2d(self, output_size); #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::adaptive_avg_pool2d", ""}).value(); return c10::Dispatcher::singleton().callUnboxedOnly( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self)), self, output_size); #endif } static inline Tensor mkldnn_adaptive_avg_pool2d(const Tensor & self, IntArrayRef output_size) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self)))) { default: AT_ERROR("mkldnn_adaptive_avg_pool2d not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(self))); } #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::mkldnn_adaptive_avg_pool2d", ""}).value(); return c10::Dispatcher::singleton().callUnboxedOnly( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self)), self, output_size); #endif } static inline Tensor _adaptive_avg_pool2d(const Tensor & self, IntArrayRef output_size) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self)))) { case Backend::CPU: return CPUType::_adaptive_avg_pool2d(self, output_size); break; case Backend::QuantizedCPU: return QuantizedCPUType::_adaptive_avg_pool2d(self, output_size); break; default: AT_ERROR("_adaptive_avg_pool2d not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(self))); } #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::_adaptive_avg_pool2d", ""}).value(); return c10::Dispatcher::singleton().callUnboxedOnly( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self)), self, output_size); #endif } static inline Tensor _adaptive_avg_pool2d_backward(const Tensor & grad_output, const Tensor & self) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(grad_output, self)))) { case Backend::CPU: return CPUType::_adaptive_avg_pool2d_backward(grad_output, self); break; default: AT_ERROR("_adaptive_avg_pool2d_backward not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(grad_output, self))); } #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::_adaptive_avg_pool2d_backward", ""}).value(); return c10::Dispatcher::singleton().callUnboxed( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(grad_output, self)), grad_output, self); #endif } static inline Tensor & adaptive_avg_pool3d_out(Tensor & out, const Tensor & self, IntArrayRef output_size) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(out, self)))) { case Backend::CPU: return CPUType::adaptive_avg_pool3d_out(out, self, output_size); break; default: AT_ERROR("adaptive_avg_pool3d_out not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(out, self))); } #else static auto table = globalATenDispatch().getOpTable("aten::adaptive_avg_pool3d.out(Tensor self, int[3] output_size, *, Tensor(a!) out) -> Tensor(a!)"); return table->callUnboxed(out, self, output_size); #endif } static inline Tensor adaptive_avg_pool3d(const Tensor & self, IntArrayRef output_size) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self)))) { case Backend::CPU: return CPUType::adaptive_avg_pool3d(self, output_size); break; default: AT_ERROR("adaptive_avg_pool3d not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(self))); } #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::adaptive_avg_pool3d", ""}).value(); return c10::Dispatcher::singleton().callUnboxedOnly( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self)), self, output_size); #endif } static inline Tensor & adaptive_avg_pool3d_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(grad_input, grad_output, self)))) { case Backend::CPU: return CPUType::adaptive_avg_pool3d_backward_out(grad_input, grad_output, self); break; default: AT_ERROR("adaptive_avg_pool3d_backward_out not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(grad_input, grad_output, self))); } #else static auto table = globalATenDispatch().getOpTable("aten::adaptive_avg_pool3d_backward.grad_input(Tensor grad_output, Tensor self, *, Tensor(a!) grad_input) -> Tensor(a!)"); return table->callUnboxed(grad_input, grad_output, self); #endif } static inline Tensor adaptive_avg_pool3d_backward(const Tensor & grad_output, const Tensor & self) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(grad_output, self)))) { case Backend::CPU: return CPUType::adaptive_avg_pool3d_backward(grad_output, self); break; default: AT_ERROR("adaptive_avg_pool3d_backward not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(grad_output, self))); } #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::adaptive_avg_pool3d_backward", ""}).value(); return c10::Dispatcher::singleton().callUnboxed( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(grad_output, self)), grad_output, self); #endif } static inline std::tuple adaptive_max_pool2d_out(Tensor & out, Tensor & indices, const Tensor & self, IntArrayRef output_size) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(out, indices, self)))) { case Backend::CPU: return CPUType::adaptive_max_pool2d_out(out, indices, self, output_size); break; default: AT_ERROR("adaptive_max_pool2d_out not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(out, indices, self))); } #else static auto table = globalATenDispatch().getOpTable("aten::adaptive_max_pool2d.out(Tensor self, int[2] output_size, *, Tensor(a!) out, Tensor(b!) indices) -> (Tensor(a!), Tensor(b!))"); return table->callUnboxed, Tensor &, Tensor &, const Tensor &, IntArrayRef>(out, indices, self, output_size); #endif } static inline std::tuple adaptive_max_pool2d(const Tensor & self, IntArrayRef output_size) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self)))) { case Backend::CPU: return CPUType::adaptive_max_pool2d(self, output_size); break; default: AT_ERROR("adaptive_max_pool2d not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(self))); } #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::adaptive_max_pool2d", ""}).value(); return c10::Dispatcher::singleton().callUnboxedOnly, const Tensor &, IntArrayRef>( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self)), self, output_size); #endif } static inline Tensor & adaptive_max_pool2d_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, const Tensor & indices) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(grad_input, grad_output, self, indices)))) { case Backend::CPU: return CPUType::adaptive_max_pool2d_backward_out(grad_input, grad_output, self, indices); break; default: AT_ERROR("adaptive_max_pool2d_backward_out not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(grad_input, grad_output, self, indices))); } #else static auto table = globalATenDispatch().getOpTable("aten::adaptive_max_pool2d_backward.grad_input(Tensor grad_output, Tensor self, Tensor indices, *, Tensor(a!) grad_input) -> Tensor(a!)"); return table->callUnboxed(grad_input, grad_output, self, indices); #endif } static inline Tensor adaptive_max_pool2d_backward(const Tensor & grad_output, const Tensor & self, const Tensor & indices) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(grad_output, self, indices)))) { case Backend::CPU: return CPUType::adaptive_max_pool2d_backward(grad_output, self, indices); break; default: AT_ERROR("adaptive_max_pool2d_backward not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(grad_output, self, indices))); } #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::adaptive_max_pool2d_backward", ""}).value(); return c10::Dispatcher::singleton().callUnboxed( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(grad_output, self, indices)), grad_output, self, indices); #endif } static inline std::tuple adaptive_max_pool3d_out(Tensor & out, Tensor & indices, const Tensor & self, IntArrayRef output_size) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(out, indices, self)))) { case Backend::CPU: return CPUType::adaptive_max_pool3d_out(out, indices, self, output_size); break; default: AT_ERROR("adaptive_max_pool3d_out not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(out, indices, self))); } #else static auto table = globalATenDispatch().getOpTable("aten::adaptive_max_pool3d.out(Tensor self, int[3] output_size, *, Tensor(a!) out, Tensor(b!) indices) -> (Tensor(a!), Tensor(b!))"); return table->callUnboxed, Tensor &, Tensor &, const Tensor &, IntArrayRef>(out, indices, self, output_size); #endif } static inline std::tuple adaptive_max_pool3d(const Tensor & self, IntArrayRef output_size) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self)))) { case Backend::CPU: return CPUType::adaptive_max_pool3d(self, output_size); break; default: AT_ERROR("adaptive_max_pool3d not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(self))); } #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::adaptive_max_pool3d", ""}).value(); return c10::Dispatcher::singleton().callUnboxedOnly, const Tensor &, IntArrayRef>( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self)), self, output_size); #endif } static inline Tensor & adaptive_max_pool3d_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, const Tensor & indices) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(grad_input, grad_output, self, indices)))) { case Backend::CPU: return CPUType::adaptive_max_pool3d_backward_out(grad_input, grad_output, self, indices); break; default: AT_ERROR("adaptive_max_pool3d_backward_out not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(grad_input, grad_output, self, indices))); } #else static auto table = globalATenDispatch().getOpTable("aten::adaptive_max_pool3d_backward.grad_input(Tensor grad_output, Tensor self, Tensor indices, *, Tensor(a!) grad_input) -> Tensor(a!)"); return table->callUnboxed(grad_input, grad_output, self, indices); #endif } static inline Tensor adaptive_max_pool3d_backward(const Tensor & grad_output, const Tensor & self, const Tensor & indices) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(grad_output, self, indices)))) { case Backend::CPU: return CPUType::adaptive_max_pool3d_backward(grad_output, self, indices); break; default: AT_ERROR("adaptive_max_pool3d_backward not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(grad_output, self, indices))); } #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::adaptive_max_pool3d_backward", ""}).value(); return c10::Dispatcher::singleton().callUnboxed( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(grad_output, self, indices)), grad_output, self, indices); #endif } static inline Tensor & avg_pool2d_out(Tensor & out, const Tensor & self, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, bool ceil_mode, bool count_include_pad, c10::optional divisor_override) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(out, self)))) { case Backend::CPU: return CPUType::avg_pool2d_out(out, self, kernel_size, stride, padding, ceil_mode, count_include_pad, divisor_override); break; default: AT_ERROR("avg_pool2d_out not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(out, self))); } #else static auto table = globalATenDispatch().getOpTable("aten::avg_pool2d.out(Tensor self, int[2] kernel_size, int[2] stride=[], int[2] padding=0, bool ceil_mode=False, bool count_include_pad=True, int? divisor_override=None, *, Tensor(a!) out) -> Tensor(a!)"); return table->callUnboxed>(out, self, kernel_size, stride, padding, ceil_mode, count_include_pad, divisor_override); #endif } static inline Tensor avg_pool2d(const Tensor & self, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, bool ceil_mode, bool count_include_pad, c10::optional divisor_override) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self)))) { case Backend::CPU: return CPUType::avg_pool2d(self, kernel_size, stride, padding, ceil_mode, count_include_pad, divisor_override); break; case Backend::QuantizedCPU: return QuantizedCPUType::avg_pool2d(self, kernel_size, stride, padding, ceil_mode, count_include_pad, divisor_override); break; default: AT_ERROR("avg_pool2d not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(self))); } #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::avg_pool2d", ""}).value(); return c10::Dispatcher::singleton().callUnboxedOnly>( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self)), self, kernel_size, stride, padding, ceil_mode, count_include_pad, divisor_override); #endif } static inline Tensor & avg_pool2d_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, bool ceil_mode, bool count_include_pad, c10::optional divisor_override) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(grad_input, grad_output, self)))) { case Backend::CPU: return CPUType::avg_pool2d_backward_out(grad_input, grad_output, self, kernel_size, stride, padding, ceil_mode, count_include_pad, divisor_override); break; default: AT_ERROR("avg_pool2d_backward_out not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(grad_input, grad_output, self))); } #else static auto table = globalATenDispatch().getOpTable("aten::avg_pool2d_backward.grad_input(Tensor grad_output, Tensor self, int[2] kernel_size, int[2] stride, int[2] padding, bool ceil_mode, bool count_include_pad, int? divisor_override, *, Tensor(a!) grad_input) -> Tensor(a!)"); return table->callUnboxed>(grad_input, grad_output, self, kernel_size, stride, padding, ceil_mode, count_include_pad, divisor_override); #endif } static inline Tensor avg_pool2d_backward(const Tensor & grad_output, const Tensor & self, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, bool ceil_mode, bool count_include_pad, c10::optional divisor_override) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(grad_output, self)))) { case Backend::CPU: return CPUType::avg_pool2d_backward(grad_output, self, kernel_size, stride, padding, ceil_mode, count_include_pad, divisor_override); break; default: AT_ERROR("avg_pool2d_backward not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(grad_output, self))); } #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::avg_pool2d_backward", ""}).value(); return c10::Dispatcher::singleton().callUnboxedOnly>( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(grad_output, self)), grad_output, self, kernel_size, stride, padding, ceil_mode, count_include_pad, divisor_override); #endif } static inline Tensor & avg_pool3d_out(Tensor & out, const Tensor & self, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, bool ceil_mode, bool count_include_pad, c10::optional divisor_override) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(out, self)))) { case Backend::CPU: return CPUType::avg_pool3d_out(out, self, kernel_size, stride, padding, ceil_mode, count_include_pad, divisor_override); break; default: AT_ERROR("avg_pool3d_out not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(out, self))); } #else static auto table = globalATenDispatch().getOpTable("aten::avg_pool3d.out(Tensor self, int[3] kernel_size, int[3] stride=[], int[3] padding=0, bool ceil_mode=False, bool count_include_pad=True, int? divisor_override=None, *, Tensor(a!) out) -> Tensor(a!)"); return table->callUnboxed>(out, self, kernel_size, stride, padding, ceil_mode, count_include_pad, divisor_override); #endif } static inline Tensor avg_pool3d(const Tensor & self, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, bool ceil_mode, bool count_include_pad, c10::optional divisor_override) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self)))) { case Backend::CPU: return CPUType::avg_pool3d(self, kernel_size, stride, padding, ceil_mode, count_include_pad, divisor_override); break; default: AT_ERROR("avg_pool3d not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(self))); } #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::avg_pool3d", ""}).value(); return c10::Dispatcher::singleton().callUnboxedOnly>( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self)), self, kernel_size, stride, padding, ceil_mode, count_include_pad, divisor_override); #endif } static inline Tensor & avg_pool3d_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, bool ceil_mode, bool count_include_pad, c10::optional divisor_override) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(grad_input, grad_output, self)))) { case Backend::CPU: return CPUType::avg_pool3d_backward_out(grad_input, grad_output, self, kernel_size, stride, padding, ceil_mode, count_include_pad, divisor_override); break; default: AT_ERROR("avg_pool3d_backward_out not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(grad_input, grad_output, self))); } #else static auto table = globalATenDispatch().getOpTable("aten::avg_pool3d_backward.grad_input(Tensor grad_output, Tensor self, int[3] kernel_size, int[3] stride, int[3] padding, bool ceil_mode, bool count_include_pad, int? divisor_override, *, Tensor(a!) grad_input) -> Tensor(a!)"); return table->callUnboxed>(grad_input, grad_output, self, kernel_size, stride, padding, ceil_mode, count_include_pad, divisor_override); #endif } static inline Tensor avg_pool3d_backward(const Tensor & grad_output, const Tensor & self, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, bool ceil_mode, bool count_include_pad, c10::optional divisor_override) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(grad_output, self)))) { case Backend::CPU: return CPUType::avg_pool3d_backward(grad_output, self, kernel_size, stride, padding, ceil_mode, count_include_pad, divisor_override); break; default: AT_ERROR("avg_pool3d_backward not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(grad_output, self))); } #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::avg_pool3d_backward", ""}).value(); return c10::Dispatcher::singleton().callUnboxedOnly>( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(grad_output, self)), grad_output, self, kernel_size, stride, padding, ceil_mode, count_include_pad, divisor_override); #endif } static inline std::tuple fractional_max_pool2d_out(Tensor & output, Tensor & indices, const Tensor & self, IntArrayRef kernel_size, IntArrayRef output_size, const Tensor & random_samples) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(output, indices, self, random_samples)))) { case Backend::CPU: return CPUType::fractional_max_pool2d_out(output, indices, self, kernel_size, output_size, random_samples); break; default: AT_ERROR("fractional_max_pool2d_out not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(output, indices, self, random_samples))); } #else static auto table = globalATenDispatch().getOpTable("aten::fractional_max_pool2d.output(Tensor self, int[2] kernel_size, int[2] output_size, Tensor random_samples, *, Tensor(a!) output, Tensor(b!) indices) -> (Tensor(a!), Tensor(b!))"); return table->callUnboxed, Tensor &, Tensor &, const Tensor &, IntArrayRef, IntArrayRef, const Tensor &>(output, indices, self, kernel_size, output_size, random_samples); #endif } static inline std::tuple fractional_max_pool2d(const Tensor & self, IntArrayRef kernel_size, IntArrayRef output_size, const Tensor & random_samples) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self, random_samples)))) { case Backend::CPU: return CPUType::fractional_max_pool2d(self, kernel_size, output_size, random_samples); break; default: AT_ERROR("fractional_max_pool2d not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(self, random_samples))); } #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::fractional_max_pool2d", ""}).value(); return c10::Dispatcher::singleton().callUnboxedOnly, const Tensor &, IntArrayRef, IntArrayRef, const Tensor &>( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self, random_samples)), self, kernel_size, output_size, random_samples); #endif } static inline Tensor & fractional_max_pool2d_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, IntArrayRef kernel_size, IntArrayRef output_size, const Tensor & indices) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(grad_input, grad_output, self, indices)))) { case Backend::CPU: return CPUType::fractional_max_pool2d_backward_out(grad_input, grad_output, self, kernel_size, output_size, indices); break; default: AT_ERROR("fractional_max_pool2d_backward_out not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(grad_input, grad_output, self, indices))); } #else static auto table = globalATenDispatch().getOpTable("aten::fractional_max_pool2d_backward.grad_input(Tensor grad_output, Tensor self, int[2] kernel_size, int[2] output_size, Tensor indices, *, Tensor(a!) grad_input) -> Tensor(a!)"); return table->callUnboxed(grad_input, grad_output, self, kernel_size, output_size, indices); #endif } static inline Tensor fractional_max_pool2d_backward(const Tensor & grad_output, const Tensor & self, IntArrayRef kernel_size, IntArrayRef output_size, const Tensor & indices) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(grad_output, self, indices)))) { case Backend::CPU: return CPUType::fractional_max_pool2d_backward(grad_output, self, kernel_size, output_size, indices); break; default: AT_ERROR("fractional_max_pool2d_backward not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(grad_output, self, indices))); } #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::fractional_max_pool2d_backward", ""}).value(); return c10::Dispatcher::singleton().callUnboxedOnly( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(grad_output, self, indices)), grad_output, self, kernel_size, output_size, indices); #endif } static inline std::tuple fractional_max_pool3d_out(Tensor & output, Tensor & indices, const Tensor & self, IntArrayRef kernel_size, IntArrayRef output_size, const Tensor & random_samples) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(output, indices, self, random_samples)))) { case Backend::CPU: return CPUType::fractional_max_pool3d_out(output, indices, self, kernel_size, output_size, random_samples); break; default: AT_ERROR("fractional_max_pool3d_out not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(output, indices, self, random_samples))); } #else static auto table = globalATenDispatch().getOpTable("aten::fractional_max_pool3d.output(Tensor self, int[3] kernel_size, int[3] output_size, Tensor random_samples, *, Tensor(a!) output, Tensor(b!) indices) -> (Tensor(a!), Tensor(b!))"); return table->callUnboxed, Tensor &, Tensor &, const Tensor &, IntArrayRef, IntArrayRef, const Tensor &>(output, indices, self, kernel_size, output_size, random_samples); #endif } static inline std::tuple fractional_max_pool3d(const Tensor & self, IntArrayRef kernel_size, IntArrayRef output_size, const Tensor & random_samples) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self, random_samples)))) { case Backend::CPU: return CPUType::fractional_max_pool3d(self, kernel_size, output_size, random_samples); break; default: AT_ERROR("fractional_max_pool3d not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(self, random_samples))); } #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::fractional_max_pool3d", ""}).value(); return c10::Dispatcher::singleton().callUnboxedOnly, const Tensor &, IntArrayRef, IntArrayRef, const Tensor &>( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self, random_samples)), self, kernel_size, output_size, random_samples); #endif } static inline Tensor & fractional_max_pool3d_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, IntArrayRef kernel_size, IntArrayRef output_size, const Tensor & indices) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(grad_input, grad_output, self, indices)))) { case Backend::CPU: return CPUType::fractional_max_pool3d_backward_out(grad_input, grad_output, self, kernel_size, output_size, indices); break; default: AT_ERROR("fractional_max_pool3d_backward_out not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(grad_input, grad_output, self, indices))); } #else static auto table = globalATenDispatch().getOpTable("aten::fractional_max_pool3d_backward.grad_input(Tensor grad_output, Tensor self, int[3] kernel_size, int[3] output_size, Tensor indices, *, Tensor(a!) grad_input) -> Tensor(a!)"); return table->callUnboxed(grad_input, grad_output, self, kernel_size, output_size, indices); #endif } static inline Tensor fractional_max_pool3d_backward(const Tensor & grad_output, const Tensor & self, IntArrayRef kernel_size, IntArrayRef output_size, const Tensor & indices) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(grad_output, self, indices)))) { case Backend::CPU: return CPUType::fractional_max_pool3d_backward(grad_output, self, kernel_size, output_size, indices); break; default: AT_ERROR("fractional_max_pool3d_backward not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(grad_output, self, indices))); } #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::fractional_max_pool3d_backward", ""}).value(); return c10::Dispatcher::singleton().callUnboxedOnly( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(grad_output, self, indices)), grad_output, self, kernel_size, output_size, indices); #endif } static inline std::tuple max_pool2d_with_indices_out(Tensor & out, Tensor & indices, const Tensor & self, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, bool ceil_mode) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(out, indices, self)))) { case Backend::CPU: return CPUType::max_pool2d_with_indices_out(out, indices, self, kernel_size, stride, padding, dilation, ceil_mode); break; default: AT_ERROR("max_pool2d_with_indices_out not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(out, indices, self))); } #else static auto table = globalATenDispatch().getOpTable("aten::max_pool2d_with_indices.out(Tensor self, int[2] kernel_size, int[2] stride=[], int[2] padding=0, int[2] dilation=1, bool ceil_mode=False, *, Tensor(a!) out, Tensor(b!) indices) -> (Tensor(a!), Tensor(b!))"); return table->callUnboxed, Tensor &, Tensor &, const Tensor &, IntArrayRef, IntArrayRef, IntArrayRef, IntArrayRef, bool>(out, indices, self, kernel_size, stride, padding, dilation, ceil_mode); #endif } static inline std::tuple max_pool2d_with_indices(const Tensor & self, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, bool ceil_mode) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self)))) { case Backend::CPU: return CPUType::max_pool2d_with_indices(self, kernel_size, stride, padding, dilation, ceil_mode); break; default: AT_ERROR("max_pool2d_with_indices not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(self))); } #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::max_pool2d_with_indices", ""}).value(); return c10::Dispatcher::singleton().callUnboxedOnly, const Tensor &, IntArrayRef, IntArrayRef, IntArrayRef, IntArrayRef, bool>( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self)), self, kernel_size, stride, padding, dilation, ceil_mode); #endif } static inline Tensor & max_pool2d_with_indices_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, bool ceil_mode, const Tensor & indices) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(grad_input, grad_output, self, indices)))) { case Backend::CPU: return CPUType::max_pool2d_with_indices_backward_out(grad_input, grad_output, self, kernel_size, stride, padding, dilation, ceil_mode, indices); break; default: AT_ERROR("max_pool2d_with_indices_backward_out not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(grad_input, grad_output, self, indices))); } #else static auto table = globalATenDispatch().getOpTable("aten::max_pool2d_with_indices_backward.grad_input(Tensor grad_output, Tensor self, int[2] kernel_size, int[2] stride, int[2] padding, int[2] dilation, bool ceil_mode, Tensor indices, *, Tensor(a!) grad_input) -> Tensor(a!)"); return table->callUnboxed(grad_input, grad_output, self, kernel_size, stride, padding, dilation, ceil_mode, indices); #endif } static inline Tensor max_pool2d_with_indices_backward(const Tensor & grad_output, const Tensor & self, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, bool ceil_mode, const Tensor & indices) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(grad_output, self, indices)))) { case Backend::CPU: return CPUType::max_pool2d_with_indices_backward(grad_output, self, kernel_size, stride, padding, dilation, ceil_mode, indices); break; default: AT_ERROR("max_pool2d_with_indices_backward not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(grad_output, self, indices))); } #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::max_pool2d_with_indices_backward", ""}).value(); return c10::Dispatcher::singleton().callUnboxedOnly( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(grad_output, self, indices)), grad_output, self, kernel_size, stride, padding, dilation, ceil_mode, indices); #endif } static inline std::tuple max_pool3d_with_indices_out(Tensor & out, Tensor & indices, const Tensor & self, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, bool ceil_mode) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(out, indices, self)))) { case Backend::CPU: return CPUType::max_pool3d_with_indices_out(out, indices, self, kernel_size, stride, padding, dilation, ceil_mode); break; default: AT_ERROR("max_pool3d_with_indices_out not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(out, indices, self))); } #else static auto table = globalATenDispatch().getOpTable("aten::max_pool3d_with_indices.out(Tensor self, int[3] kernel_size, int[3] stride=[], int[3] padding=0, int[3] dilation=1, bool ceil_mode=False, *, Tensor(a!) out, Tensor(b!) indices) -> (Tensor(a!), Tensor(b!))"); return table->callUnboxed, Tensor &, Tensor &, const Tensor &, IntArrayRef, IntArrayRef, IntArrayRef, IntArrayRef, bool>(out, indices, self, kernel_size, stride, padding, dilation, ceil_mode); #endif } static inline std::tuple max_pool3d_with_indices(const Tensor & self, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, bool ceil_mode) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self)))) { case Backend::CPU: return CPUType::max_pool3d_with_indices(self, kernel_size, stride, padding, dilation, ceil_mode); break; default: AT_ERROR("max_pool3d_with_indices not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(self))); } #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::max_pool3d_with_indices", ""}).value(); return c10::Dispatcher::singleton().callUnboxedOnly, const Tensor &, IntArrayRef, IntArrayRef, IntArrayRef, IntArrayRef, bool>( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self)), self, kernel_size, stride, padding, dilation, ceil_mode); #endif } static inline Tensor & max_pool3d_with_indices_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, bool ceil_mode, const Tensor & indices) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(grad_input, grad_output, self, indices)))) { case Backend::CPU: return CPUType::max_pool3d_with_indices_backward_out(grad_input, grad_output, self, kernel_size, stride, padding, dilation, ceil_mode, indices); break; default: AT_ERROR("max_pool3d_with_indices_backward_out not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(grad_input, grad_output, self, indices))); } #else static auto table = globalATenDispatch().getOpTable("aten::max_pool3d_with_indices_backward.grad_input(Tensor grad_output, Tensor self, int[3] kernel_size, int[3] stride, int[3] padding, int[3] dilation, bool ceil_mode, Tensor indices, *, Tensor(a!) grad_input) -> Tensor(a!)"); return table->callUnboxed(grad_input, grad_output, self, kernel_size, stride, padding, dilation, ceil_mode, indices); #endif } static inline Tensor max_pool3d_with_indices_backward(const Tensor & grad_output, const Tensor & self, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, bool ceil_mode, const Tensor & indices) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(grad_output, self, indices)))) { case Backend::CPU: return CPUType::max_pool3d_with_indices_backward(grad_output, self, kernel_size, stride, padding, dilation, ceil_mode, indices); break; default: AT_ERROR("max_pool3d_with_indices_backward not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(grad_output, self, indices))); } #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::max_pool3d_with_indices_backward", ""}).value(); return c10::Dispatcher::singleton().callUnboxedOnly( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(grad_output, self, indices)), grad_output, self, kernel_size, stride, padding, dilation, ceil_mode, indices); #endif } static inline Tensor & max_unpool2d_out(Tensor & out, const Tensor & self, const Tensor & indices, IntArrayRef output_size) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(out, self, indices)))) { case Backend::CPU: return CPUType::max_unpool2d_out(out, self, indices, output_size); break; default: AT_ERROR("max_unpool2d_out not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(out, self, indices))); } #else static auto table = globalATenDispatch().getOpTable("aten::max_unpool2d.out(Tensor self, Tensor indices, int[2] output_size, *, Tensor(a!) out) -> Tensor(a!)"); return table->callUnboxed(out, self, indices, output_size); #endif } static inline Tensor max_unpool2d(const Tensor & self, const Tensor & indices, IntArrayRef output_size) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self, indices)))) { case Backend::CPU: return CPUType::max_unpool2d(self, indices, output_size); break; default: AT_ERROR("max_unpool2d not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(self, indices))); } #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::max_unpool2d", ""}).value(); return c10::Dispatcher::singleton().callUnboxedOnly( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self, indices)), self, indices, output_size); #endif } static inline Tensor & max_unpool2d_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, const Tensor & indices, IntArrayRef output_size) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(grad_input, grad_output, self, indices)))) { case Backend::CPU: return CPUType::max_unpool2d_backward_out(grad_input, grad_output, self, indices, output_size); break; default: AT_ERROR("max_unpool2d_backward_out not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(grad_input, grad_output, self, indices))); } #else static auto table = globalATenDispatch().getOpTable("aten::max_unpool2d_backward.grad_input(Tensor grad_output, Tensor self, Tensor indices, int[2] output_size, *, Tensor(a!) grad_input) -> Tensor(a!)"); return table->callUnboxed(grad_input, grad_output, self, indices, output_size); #endif } static inline Tensor max_unpool2d_backward(const Tensor & grad_output, const Tensor & self, const Tensor & indices, IntArrayRef output_size) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(grad_output, self, indices)))) { case Backend::CPU: return CPUType::max_unpool2d_backward(grad_output, self, indices, output_size); break; default: AT_ERROR("max_unpool2d_backward not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(grad_output, self, indices))); } #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::max_unpool2d_backward", ""}).value(); return c10::Dispatcher::singleton().callUnboxedOnly( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(grad_output, self, indices)), grad_output, self, indices, output_size); #endif } static inline Tensor & max_unpool3d_out(Tensor & out, const Tensor & self, const Tensor & indices, IntArrayRef output_size, IntArrayRef stride, IntArrayRef padding) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(out, self, indices)))) { case Backend::CPU: return CPUType::max_unpool3d_out(out, self, indices, output_size, stride, padding); break; default: AT_ERROR("max_unpool3d_out not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(out, self, indices))); } #else static auto table = globalATenDispatch().getOpTable("aten::max_unpool3d.out(Tensor self, Tensor indices, int[3] output_size, int[3] stride, int[3] padding, *, Tensor(a!) out) -> Tensor(a!)"); return table->callUnboxed(out, self, indices, output_size, stride, padding); #endif } static inline Tensor max_unpool3d(const Tensor & self, const Tensor & indices, IntArrayRef output_size, IntArrayRef stride, IntArrayRef padding) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self, indices)))) { case Backend::CPU: return CPUType::max_unpool3d(self, indices, output_size, stride, padding); break; default: AT_ERROR("max_unpool3d not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(self, indices))); } #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::max_unpool3d", ""}).value(); return c10::Dispatcher::singleton().callUnboxedOnly( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self, indices)), self, indices, output_size, stride, padding); #endif } static inline Tensor & max_unpool3d_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, const Tensor & indices, IntArrayRef output_size, IntArrayRef stride, IntArrayRef padding) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(grad_input, grad_output, self, indices)))) { case Backend::CPU: return CPUType::max_unpool3d_backward_out(grad_input, grad_output, self, indices, output_size, stride, padding); break; default: AT_ERROR("max_unpool3d_backward_out not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(grad_input, grad_output, self, indices))); } #else static auto table = globalATenDispatch().getOpTable("aten::max_unpool3d_backward.grad_input(Tensor grad_output, Tensor self, Tensor indices, int[3] output_size, int[3] stride, int[3] padding, *, Tensor(a!) grad_input) -> Tensor(a!)"); return table->callUnboxed(grad_input, grad_output, self, indices, output_size, stride, padding); #endif } static inline Tensor max_unpool3d_backward(const Tensor & grad_output, const Tensor & self, const Tensor & indices, IntArrayRef output_size, IntArrayRef stride, IntArrayRef padding) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(grad_output, self, indices)))) { case Backend::CPU: return CPUType::max_unpool3d_backward(grad_output, self, indices, output_size, stride, padding); break; default: AT_ERROR("max_unpool3d_backward not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(grad_output, self, indices))); } #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::max_unpool3d_backward", ""}).value(); return c10::Dispatcher::singleton().callUnboxedOnly( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(grad_output, self, indices)), grad_output, self, indices, output_size, stride, padding); #endif } static inline Tensor & reflection_pad1d_out(Tensor & out, const Tensor & self, IntArrayRef padding) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(out, self)))) { case Backend::CPU: return CPUType::reflection_pad1d_out(out, self, padding); break; default: AT_ERROR("reflection_pad1d_out not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(out, self))); } #else static auto table = globalATenDispatch().getOpTable("aten::reflection_pad1d.out(Tensor self, int[2] padding, *, Tensor(a!) out) -> Tensor(a!)"); return table->callUnboxed(out, self, padding); #endif } static inline Tensor reflection_pad1d(const Tensor & self, IntArrayRef padding) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self)))) { case Backend::CPU: return CPUType::reflection_pad1d(self, padding); break; default: AT_ERROR("reflection_pad1d not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(self))); } #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::reflection_pad1d", ""}).value(); return c10::Dispatcher::singleton().callUnboxedOnly( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self)), self, padding); #endif } static inline Tensor & reflection_pad1d_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, IntArrayRef padding) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(grad_input, grad_output, self)))) { case Backend::CPU: return CPUType::reflection_pad1d_backward_out(grad_input, grad_output, self, padding); break; default: AT_ERROR("reflection_pad1d_backward_out not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(grad_input, grad_output, self))); } #else static auto table = globalATenDispatch().getOpTable("aten::reflection_pad1d_backward.grad_input(Tensor grad_output, Tensor self, int[2] padding, *, Tensor(a!) grad_input) -> Tensor(a!)"); return table->callUnboxed(grad_input, grad_output, self, padding); #endif } static inline Tensor reflection_pad1d_backward(const Tensor & grad_output, const Tensor & self, IntArrayRef padding) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(grad_output, self)))) { case Backend::CPU: return CPUType::reflection_pad1d_backward(grad_output, self, padding); break; default: AT_ERROR("reflection_pad1d_backward not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(grad_output, self))); } #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::reflection_pad1d_backward", ""}).value(); return c10::Dispatcher::singleton().callUnboxedOnly( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(grad_output, self)), grad_output, self, padding); #endif } static inline Tensor & reflection_pad2d_out(Tensor & out, const Tensor & self, IntArrayRef padding) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(out, self)))) { case Backend::CPU: return CPUType::reflection_pad2d_out(out, self, padding); break; default: AT_ERROR("reflection_pad2d_out not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(out, self))); } #else static auto table = globalATenDispatch().getOpTable("aten::reflection_pad2d.out(Tensor self, int[4] padding, *, Tensor(a!) out) -> Tensor(a!)"); return table->callUnboxed(out, self, padding); #endif } static inline Tensor reflection_pad2d(const Tensor & self, IntArrayRef padding) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self)))) { case Backend::CPU: return CPUType::reflection_pad2d(self, padding); break; default: AT_ERROR("reflection_pad2d not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(self))); } #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::reflection_pad2d", ""}).value(); return c10::Dispatcher::singleton().callUnboxedOnly( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self)), self, padding); #endif } static inline Tensor & reflection_pad2d_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, IntArrayRef padding) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(grad_input, grad_output, self)))) { case Backend::CPU: return CPUType::reflection_pad2d_backward_out(grad_input, grad_output, self, padding); break; default: AT_ERROR("reflection_pad2d_backward_out not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(grad_input, grad_output, self))); } #else static auto table = globalATenDispatch().getOpTable("aten::reflection_pad2d_backward.grad_input(Tensor grad_output, Tensor self, int[4] padding, *, Tensor(a!) grad_input) -> Tensor(a!)"); return table->callUnboxed(grad_input, grad_output, self, padding); #endif } static inline Tensor reflection_pad2d_backward(const Tensor & grad_output, const Tensor & self, IntArrayRef padding) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(grad_output, self)))) { case Backend::CPU: return CPUType::reflection_pad2d_backward(grad_output, self, padding); break; default: AT_ERROR("reflection_pad2d_backward not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(grad_output, self))); } #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::reflection_pad2d_backward", ""}).value(); return c10::Dispatcher::singleton().callUnboxedOnly( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(grad_output, self)), grad_output, self, padding); #endif } static inline Tensor & replication_pad1d_out(Tensor & out, const Tensor & self, IntArrayRef padding) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(out, self)))) { case Backend::CPU: return CPUType::replication_pad1d_out(out, self, padding); break; default: AT_ERROR("replication_pad1d_out not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(out, self))); } #else static auto table = globalATenDispatch().getOpTable("aten::replication_pad1d.out(Tensor self, int[2] padding, *, Tensor(a!) out) -> Tensor(a!)"); return table->callUnboxed(out, self, padding); #endif } static inline Tensor replication_pad1d(const Tensor & self, IntArrayRef padding) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self)))) { case Backend::CPU: return CPUType::replication_pad1d(self, padding); break; default: AT_ERROR("replication_pad1d not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(self))); } #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::replication_pad1d", ""}).value(); return c10::Dispatcher::singleton().callUnboxedOnly( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self)), self, padding); #endif } static inline Tensor & replication_pad1d_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, IntArrayRef padding) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(grad_input, grad_output, self)))) { case Backend::CPU: return CPUType::replication_pad1d_backward_out(grad_input, grad_output, self, padding); break; default: AT_ERROR("replication_pad1d_backward_out not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(grad_input, grad_output, self))); } #else static auto table = globalATenDispatch().getOpTable("aten::replication_pad1d_backward.grad_input(Tensor grad_output, Tensor self, int[2] padding, *, Tensor(a!) grad_input) -> Tensor(a!)"); return table->callUnboxed(grad_input, grad_output, self, padding); #endif } static inline Tensor replication_pad1d_backward(const Tensor & grad_output, const Tensor & self, IntArrayRef padding) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(grad_output, self)))) { case Backend::CPU: return CPUType::replication_pad1d_backward(grad_output, self, padding); break; default: AT_ERROR("replication_pad1d_backward not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(grad_output, self))); } #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::replication_pad1d_backward", ""}).value(); return c10::Dispatcher::singleton().callUnboxedOnly( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(grad_output, self)), grad_output, self, padding); #endif } static inline Tensor & replication_pad2d_out(Tensor & out, const Tensor & self, IntArrayRef padding) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(out, self)))) { case Backend::CPU: return CPUType::replication_pad2d_out(out, self, padding); break; default: AT_ERROR("replication_pad2d_out not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(out, self))); } #else static auto table = globalATenDispatch().getOpTable("aten::replication_pad2d.out(Tensor self, int[4] padding, *, Tensor(a!) out) -> Tensor(a!)"); return table->callUnboxed(out, self, padding); #endif } static inline Tensor replication_pad2d(const Tensor & self, IntArrayRef padding) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self)))) { case Backend::CPU: return CPUType::replication_pad2d(self, padding); break; default: AT_ERROR("replication_pad2d not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(self))); } #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::replication_pad2d", ""}).value(); return c10::Dispatcher::singleton().callUnboxedOnly( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self)), self, padding); #endif } static inline Tensor & replication_pad2d_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, IntArrayRef padding) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(grad_input, grad_output, self)))) { case Backend::CPU: return CPUType::replication_pad2d_backward_out(grad_input, grad_output, self, padding); break; default: AT_ERROR("replication_pad2d_backward_out not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(grad_input, grad_output, self))); } #else static auto table = globalATenDispatch().getOpTable("aten::replication_pad2d_backward.grad_input(Tensor grad_output, Tensor self, int[4] padding, *, Tensor(a!) grad_input) -> Tensor(a!)"); return table->callUnboxed(grad_input, grad_output, self, padding); #endif } static inline Tensor replication_pad2d_backward(const Tensor & grad_output, const Tensor & self, IntArrayRef padding) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(grad_output, self)))) { case Backend::CPU: return CPUType::replication_pad2d_backward(grad_output, self, padding); break; default: AT_ERROR("replication_pad2d_backward not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(grad_output, self))); } #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::replication_pad2d_backward", ""}).value(); return c10::Dispatcher::singleton().callUnboxedOnly( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(grad_output, self)), grad_output, self, padding); #endif } static inline Tensor & replication_pad3d_out(Tensor & out, const Tensor & self, IntArrayRef padding) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(out, self)))) { case Backend::CPU: return CPUType::replication_pad3d_out(out, self, padding); break; default: AT_ERROR("replication_pad3d_out not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(out, self))); } #else static auto table = globalATenDispatch().getOpTable("aten::replication_pad3d.out(Tensor self, int[6] padding, *, Tensor(a!) out) -> Tensor(a!)"); return table->callUnboxed(out, self, padding); #endif } static inline Tensor replication_pad3d(const Tensor & self, IntArrayRef padding) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self)))) { case Backend::CPU: return CPUType::replication_pad3d(self, padding); break; default: AT_ERROR("replication_pad3d not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(self))); } #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::replication_pad3d", ""}).value(); return c10::Dispatcher::singleton().callUnboxedOnly( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self)), self, padding); #endif } static inline Tensor & replication_pad3d_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, IntArrayRef padding) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(grad_input, grad_output, self)))) { case Backend::CPU: return CPUType::replication_pad3d_backward_out(grad_input, grad_output, self, padding); break; default: AT_ERROR("replication_pad3d_backward_out not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(grad_input, grad_output, self))); } #else static auto table = globalATenDispatch().getOpTable("aten::replication_pad3d_backward.grad_input(Tensor grad_output, Tensor self, int[6] padding, *, Tensor(a!) grad_input) -> Tensor(a!)"); return table->callUnboxed(grad_input, grad_output, self, padding); #endif } static inline Tensor replication_pad3d_backward(const Tensor & grad_output, const Tensor & self, IntArrayRef padding) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(grad_output, self)))) { case Backend::CPU: return CPUType::replication_pad3d_backward(grad_output, self, padding); break; default: AT_ERROR("replication_pad3d_backward not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(grad_output, self))); } #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::replication_pad3d_backward", ""}).value(); return c10::Dispatcher::singleton().callUnboxedOnly( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(grad_output, self)), grad_output, self, padding); #endif } static inline Tensor & upsample_linear1d_out(Tensor & out, const Tensor & self, IntArrayRef output_size, bool align_corners) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(out, self)))) { case Backend::CPU: return CPUType::upsample_linear1d_out(out, self, output_size, align_corners); break; default: AT_ERROR("upsample_linear1d_out not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(out, self))); } #else static auto table = globalATenDispatch().getOpTable("aten::upsample_linear1d.out(Tensor self, int[1] output_size, bool align_corners, *, Tensor(a!) out) -> Tensor(a!)"); return table->callUnboxed(out, self, output_size, align_corners); #endif } static inline Tensor upsample_linear1d(const Tensor & self, IntArrayRef output_size, bool align_corners) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self)))) { case Backend::CPU: return CPUType::upsample_linear1d(self, output_size, align_corners); break; default: AT_ERROR("upsample_linear1d not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(self))); } #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::upsample_linear1d", ""}).value(); return c10::Dispatcher::singleton().callUnboxedOnly( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self)), self, output_size, align_corners); #endif } static inline Tensor & upsample_linear1d_backward_out(Tensor & grad_input, const Tensor & grad_output, IntArrayRef output_size, IntArrayRef input_size, bool align_corners) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(grad_input, grad_output)))) { case Backend::CPU: return CPUType::upsample_linear1d_backward_out(grad_input, grad_output, output_size, input_size, align_corners); break; default: AT_ERROR("upsample_linear1d_backward_out not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(grad_input, grad_output))); } #else static auto table = globalATenDispatch().getOpTable("aten::upsample_linear1d_backward.grad_input(Tensor grad_output, int[1] output_size, int[3] input_size, bool align_corners, *, Tensor(a!) grad_input) -> Tensor(a!)"); return table->callUnboxed(grad_input, grad_output, output_size, input_size, align_corners); #endif } static inline Tensor upsample_linear1d_backward(const Tensor & grad_output, IntArrayRef output_size, IntArrayRef input_size, bool align_corners) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(grad_output)))) { case Backend::CPU: return CPUType::upsample_linear1d_backward(grad_output, output_size, input_size, align_corners); break; default: AT_ERROR("upsample_linear1d_backward not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(grad_output))); } #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::upsample_linear1d_backward", ""}).value(); return c10::Dispatcher::singleton().callUnboxedOnly( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(grad_output)), grad_output, output_size, input_size, align_corners); #endif } static inline Tensor & upsample_bilinear2d_out(Tensor & out, const Tensor & self, IntArrayRef output_size, bool align_corners) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(out, self)))) { case Backend::CPU: return CPUType::upsample_bilinear2d_out(out, self, output_size, align_corners); break; default: AT_ERROR("upsample_bilinear2d_out not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(out, self))); } #else static auto table = globalATenDispatch().getOpTable("aten::upsample_bilinear2d.out(Tensor self, int[2] output_size, bool align_corners, *, Tensor(a!) out) -> Tensor(a!)"); return table->callUnboxed(out, self, output_size, align_corners); #endif } static inline Tensor upsample_bilinear2d(const Tensor & self, IntArrayRef output_size, bool align_corners) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self)))) { case Backend::CPU: return CPUType::upsample_bilinear2d(self, output_size, align_corners); break; case Backend::QuantizedCPU: return QuantizedCPUType::upsample_bilinear2d(self, output_size, align_corners); break; default: AT_ERROR("upsample_bilinear2d not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(self))); } #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::upsample_bilinear2d", ""}).value(); return c10::Dispatcher::singleton().callUnboxedOnly( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self)), self, output_size, align_corners); #endif } static inline Tensor & upsample_bilinear2d_backward_out(Tensor & grad_input, const Tensor & grad_output, IntArrayRef output_size, IntArrayRef input_size, bool align_corners) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(grad_input, grad_output)))) { case Backend::CPU: return CPUType::upsample_bilinear2d_backward_out(grad_input, grad_output, output_size, input_size, align_corners); break; default: AT_ERROR("upsample_bilinear2d_backward_out not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(grad_input, grad_output))); } #else static auto table = globalATenDispatch().getOpTable("aten::upsample_bilinear2d_backward.grad_input(Tensor grad_output, int[2] output_size, int[4] input_size, bool align_corners, *, Tensor(a!) grad_input) -> Tensor(a!)"); return table->callUnboxed(grad_input, grad_output, output_size, input_size, align_corners); #endif } static inline Tensor upsample_bilinear2d_backward(const Tensor & grad_output, IntArrayRef output_size, IntArrayRef input_size, bool align_corners) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(grad_output)))) { case Backend::CPU: return CPUType::upsample_bilinear2d_backward(grad_output, output_size, input_size, align_corners); break; default: AT_ERROR("upsample_bilinear2d_backward not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(grad_output))); } #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::upsample_bilinear2d_backward", ""}).value(); return c10::Dispatcher::singleton().callUnboxedOnly( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(grad_output)), grad_output, output_size, input_size, align_corners); #endif } static inline Tensor & upsample_bicubic2d_out(Tensor & out, const Tensor & self, IntArrayRef output_size, bool align_corners) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(out, self)))) { case Backend::CPU: return CPUType::upsample_bicubic2d_out(out, self, output_size, align_corners); break; default: AT_ERROR("upsample_bicubic2d_out not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(out, self))); } #else static auto table = globalATenDispatch().getOpTable("aten::upsample_bicubic2d.out(Tensor self, int[2] output_size, bool align_corners, *, Tensor(a!) out) -> Tensor(a!)"); return table->callUnboxed(out, self, output_size, align_corners); #endif } static inline Tensor upsample_bicubic2d(const Tensor & self, IntArrayRef output_size, bool align_corners) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self)))) { case Backend::CPU: return CPUType::upsample_bicubic2d(self, output_size, align_corners); break; default: AT_ERROR("upsample_bicubic2d not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(self))); } #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::upsample_bicubic2d", ""}).value(); return c10::Dispatcher::singleton().callUnboxedOnly( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self)), self, output_size, align_corners); #endif } static inline Tensor & upsample_bicubic2d_backward_out(Tensor & grad_input, const Tensor & grad_output, IntArrayRef output_size, IntArrayRef input_size, bool align_corners) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(grad_input, grad_output)))) { case Backend::CPU: return CPUType::upsample_bicubic2d_backward_out(grad_input, grad_output, output_size, input_size, align_corners); break; default: AT_ERROR("upsample_bicubic2d_backward_out not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(grad_input, grad_output))); } #else static auto table = globalATenDispatch().getOpTable("aten::upsample_bicubic2d_backward.grad_input(Tensor grad_output, int[2] output_size, int[4] input_size, bool align_corners, *, Tensor(a!) grad_input) -> Tensor(a!)"); return table->callUnboxed(grad_input, grad_output, output_size, input_size, align_corners); #endif } static inline Tensor upsample_bicubic2d_backward(const Tensor & grad_output, IntArrayRef output_size, IntArrayRef input_size, bool align_corners) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(grad_output)))) { case Backend::CPU: return CPUType::upsample_bicubic2d_backward(grad_output, output_size, input_size, align_corners); break; default: AT_ERROR("upsample_bicubic2d_backward not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(grad_output))); } #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::upsample_bicubic2d_backward", ""}).value(); return c10::Dispatcher::singleton().callUnboxedOnly( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(grad_output)), grad_output, output_size, input_size, align_corners); #endif } static inline Tensor & upsample_trilinear3d_out(Tensor & out, const Tensor & self, IntArrayRef output_size, bool align_corners) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(out, self)))) { case Backend::CPU: return CPUType::upsample_trilinear3d_out(out, self, output_size, align_corners); break; default: AT_ERROR("upsample_trilinear3d_out not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(out, self))); } #else static auto table = globalATenDispatch().getOpTable("aten::upsample_trilinear3d.out(Tensor self, int[3] output_size, bool align_corners, *, Tensor(a!) out) -> Tensor(a!)"); return table->callUnboxed(out, self, output_size, align_corners); #endif } static inline Tensor upsample_trilinear3d(const Tensor & self, IntArrayRef output_size, bool align_corners) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self)))) { case Backend::CPU: return CPUType::upsample_trilinear3d(self, output_size, align_corners); break; default: AT_ERROR("upsample_trilinear3d not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(self))); } #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::upsample_trilinear3d", ""}).value(); return c10::Dispatcher::singleton().callUnboxedOnly( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self)), self, output_size, align_corners); #endif } static inline Tensor & upsample_trilinear3d_backward_out(Tensor & grad_input, const Tensor & grad_output, IntArrayRef output_size, IntArrayRef input_size, bool align_corners) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(grad_input, grad_output)))) { case Backend::CPU: return CPUType::upsample_trilinear3d_backward_out(grad_input, grad_output, output_size, input_size, align_corners); break; default: AT_ERROR("upsample_trilinear3d_backward_out not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(grad_input, grad_output))); } #else static auto table = globalATenDispatch().getOpTable("aten::upsample_trilinear3d_backward.grad_input(Tensor grad_output, int[3] output_size, int[5] input_size, bool align_corners, *, Tensor(a!) grad_input) -> Tensor(a!)"); return table->callUnboxed(grad_input, grad_output, output_size, input_size, align_corners); #endif } static inline Tensor upsample_trilinear3d_backward(const Tensor & grad_output, IntArrayRef output_size, IntArrayRef input_size, bool align_corners) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(grad_output)))) { case Backend::CPU: return CPUType::upsample_trilinear3d_backward(grad_output, output_size, input_size, align_corners); break; default: AT_ERROR("upsample_trilinear3d_backward not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(grad_output))); } #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::upsample_trilinear3d_backward", ""}).value(); return c10::Dispatcher::singleton().callUnboxedOnly( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(grad_output)), grad_output, output_size, input_size, align_corners); #endif } static inline Tensor & upsample_nearest1d_out(Tensor & out, const Tensor & self, IntArrayRef output_size) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(out, self)))) { case Backend::CPU: return CPUType::upsample_nearest1d_out(out, self, output_size); break; default: AT_ERROR("upsample_nearest1d_out not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(out, self))); } #else static auto table = globalATenDispatch().getOpTable("aten::upsample_nearest1d.out(Tensor self, int[1] output_size, *, Tensor(a!) out) -> Tensor(a!)"); return table->callUnboxed(out, self, output_size); #endif } static inline Tensor upsample_nearest1d(const Tensor & self, IntArrayRef output_size) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self)))) { case Backend::CPU: return CPUType::upsample_nearest1d(self, output_size); break; default: AT_ERROR("upsample_nearest1d not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(self))); } #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::upsample_nearest1d", ""}).value(); return c10::Dispatcher::singleton().callUnboxedOnly( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self)), self, output_size); #endif } static inline Tensor & upsample_nearest1d_backward_out(Tensor & grad_input, const Tensor & grad_output, IntArrayRef output_size, IntArrayRef input_size) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(grad_input, grad_output)))) { case Backend::CPU: return CPUType::upsample_nearest1d_backward_out(grad_input, grad_output, output_size, input_size); break; default: AT_ERROR("upsample_nearest1d_backward_out not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(grad_input, grad_output))); } #else static auto table = globalATenDispatch().getOpTable("aten::upsample_nearest1d_backward.grad_input(Tensor grad_output, int[1] output_size, int[3] input_size, *, Tensor(a!) grad_input) -> Tensor(a!)"); return table->callUnboxed(grad_input, grad_output, output_size, input_size); #endif } static inline Tensor upsample_nearest1d_backward(const Tensor & grad_output, IntArrayRef output_size, IntArrayRef input_size) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(grad_output)))) { case Backend::CPU: return CPUType::upsample_nearest1d_backward(grad_output, output_size, input_size); break; default: AT_ERROR("upsample_nearest1d_backward not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(grad_output))); } #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::upsample_nearest1d_backward", ""}).value(); return c10::Dispatcher::singleton().callUnboxedOnly( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(grad_output)), grad_output, output_size, input_size); #endif } static inline Tensor & upsample_nearest2d_out(Tensor & out, const Tensor & self, IntArrayRef output_size) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(out, self)))) { case Backend::CPU: return CPUType::upsample_nearest2d_out(out, self, output_size); break; default: AT_ERROR("upsample_nearest2d_out not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(out, self))); } #else static auto table = globalATenDispatch().getOpTable("aten::upsample_nearest2d.out(Tensor self, int[2] output_size, *, Tensor(a!) out) -> Tensor(a!)"); return table->callUnboxed(out, self, output_size); #endif } static inline Tensor upsample_nearest2d(const Tensor & self, IntArrayRef output_size) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self)))) { case Backend::CPU: return CPUType::upsample_nearest2d(self, output_size); break; case Backend::QuantizedCPU: return QuantizedCPUType::upsample_nearest2d(self, output_size); break; default: AT_ERROR("upsample_nearest2d not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(self))); } #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::upsample_nearest2d", ""}).value(); return c10::Dispatcher::singleton().callUnboxedOnly( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self)), self, output_size); #endif } static inline Tensor & upsample_nearest2d_backward_out(Tensor & grad_input, const Tensor & grad_output, IntArrayRef output_size, IntArrayRef input_size) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(grad_input, grad_output)))) { case Backend::CPU: return CPUType::upsample_nearest2d_backward_out(grad_input, grad_output, output_size, input_size); break; default: AT_ERROR("upsample_nearest2d_backward_out not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(grad_input, grad_output))); } #else static auto table = globalATenDispatch().getOpTable("aten::upsample_nearest2d_backward.grad_input(Tensor grad_output, int[2] output_size, int[4] input_size, *, Tensor(a!) grad_input) -> Tensor(a!)"); return table->callUnboxed(grad_input, grad_output, output_size, input_size); #endif } static inline Tensor upsample_nearest2d_backward(const Tensor & grad_output, IntArrayRef output_size, IntArrayRef input_size) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(grad_output)))) { case Backend::CPU: return CPUType::upsample_nearest2d_backward(grad_output, output_size, input_size); break; default: AT_ERROR("upsample_nearest2d_backward not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(grad_output))); } #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::upsample_nearest2d_backward", ""}).value(); return c10::Dispatcher::singleton().callUnboxedOnly( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(grad_output)), grad_output, output_size, input_size); #endif } static inline Tensor & upsample_nearest3d_out(Tensor & out, const Tensor & self, IntArrayRef output_size) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(out, self)))) { case Backend::CPU: return CPUType::upsample_nearest3d_out(out, self, output_size); break; default: AT_ERROR("upsample_nearest3d_out not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(out, self))); } #else static auto table = globalATenDispatch().getOpTable("aten::upsample_nearest3d.out(Tensor self, int[3] output_size, *, Tensor(a!) out) -> Tensor(a!)"); return table->callUnboxed(out, self, output_size); #endif } static inline Tensor upsample_nearest3d(const Tensor & self, IntArrayRef output_size) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self)))) { case Backend::CPU: return CPUType::upsample_nearest3d(self, output_size); break; default: AT_ERROR("upsample_nearest3d not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(self))); } #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::upsample_nearest3d", ""}).value(); return c10::Dispatcher::singleton().callUnboxedOnly( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self)), self, output_size); #endif } static inline Tensor & upsample_nearest3d_backward_out(Tensor & grad_input, const Tensor & grad_output, IntArrayRef output_size, IntArrayRef input_size) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(grad_input, grad_output)))) { case Backend::CPU: return CPUType::upsample_nearest3d_backward_out(grad_input, grad_output, output_size, input_size); break; default: AT_ERROR("upsample_nearest3d_backward_out not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(grad_input, grad_output))); } #else static auto table = globalATenDispatch().getOpTable("aten::upsample_nearest3d_backward.grad_input(Tensor grad_output, int[3] output_size, int[5] input_size, *, Tensor(a!) grad_input) -> Tensor(a!)"); return table->callUnboxed(grad_input, grad_output, output_size, input_size); #endif } static inline Tensor upsample_nearest3d_backward(const Tensor & grad_output, IntArrayRef output_size, IntArrayRef input_size) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(grad_output)))) { case Backend::CPU: return CPUType::upsample_nearest3d_backward(grad_output, output_size, input_size); break; default: AT_ERROR("upsample_nearest3d_backward not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(grad_output))); } #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::upsample_nearest3d_backward", ""}).value(); return c10::Dispatcher::singleton().callUnboxedOnly( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(grad_output)), grad_output, output_size, input_size); #endif } static inline Tensor & sigmoid_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & output) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(grad_input, grad_output, output)))) { case Backend::CPU: return CPUType::sigmoid_backward_out(grad_input, grad_output, output); break; default: AT_ERROR("sigmoid_backward_out not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(grad_input, grad_output, output))); } #else static auto table = globalATenDispatch().getOpTable("aten::sigmoid_backward.grad_input(Tensor grad_output, Tensor output, *, Tensor(a!) grad_input) -> Tensor(a!)"); return table->callUnboxed(grad_input, grad_output, output); #endif } static inline Tensor sigmoid_backward(const Tensor & grad_output, const Tensor & output) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(grad_output, output)))) { case Backend::CPU: return CPUType::sigmoid_backward(grad_output, output); break; default: AT_ERROR("sigmoid_backward not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(grad_output, output))); } #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::sigmoid_backward", ""}).value(); return c10::Dispatcher::singleton().callUnboxed( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(grad_output, output)), grad_output, output); #endif } static inline Tensor & tanh_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & output) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(grad_input, grad_output, output)))) { case Backend::CPU: return CPUType::tanh_backward_out(grad_input, grad_output, output); break; default: AT_ERROR("tanh_backward_out not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(grad_input, grad_output, output))); } #else static auto table = globalATenDispatch().getOpTable("aten::tanh_backward.grad_input(Tensor grad_output, Tensor output, *, Tensor(a!) grad_input) -> Tensor(a!)"); return table->callUnboxed(grad_input, grad_output, output); #endif } static inline Tensor tanh_backward(const Tensor & grad_output, const Tensor & output) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(grad_output, output)))) { case Backend::CPU: return CPUType::tanh_backward(grad_output, output); break; default: AT_ERROR("tanh_backward not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(grad_output, output))); } #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::tanh_backward", ""}).value(); return c10::Dispatcher::singleton().callUnboxed( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(grad_output, output)), grad_output, output); #endif } static inline Tensor & slow_conv_transpose2d_out(Tensor & out, const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const Tensor & bias, IntArrayRef stride, IntArrayRef padding, IntArrayRef output_padding, IntArrayRef dilation) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(out, self, weight, bias)))) { case Backend::CPU: return CPUType::slow_conv_transpose2d_out(out, self, weight, kernel_size, bias, stride, padding, output_padding, dilation); break; default: AT_ERROR("slow_conv_transpose2d_out not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(out, self, weight, bias))); } #else static auto table = globalATenDispatch().getOpTable("aten::slow_conv_transpose2d.out(Tensor self, Tensor weight, int[2] kernel_size, Tensor? bias=None, int[2] stride=1, int[2] padding=0, int[2] output_padding=0, int[2] dilation=1, *, Tensor(a!) out) -> Tensor(a!)"); return table->callUnboxed(out, self, weight, kernel_size, bias, stride, padding, output_padding, dilation); #endif } static inline Tensor slow_conv_transpose2d(const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const Tensor & bias, IntArrayRef stride, IntArrayRef padding, IntArrayRef output_padding, IntArrayRef dilation) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self, weight, bias)))) { case Backend::CPU: return CPUType::slow_conv_transpose2d(self, weight, kernel_size, bias, stride, padding, output_padding, dilation); break; default: AT_ERROR("slow_conv_transpose2d not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(self, weight, bias))); } #else static auto table = globalATenDispatch().getOpTable("aten::slow_conv_transpose2d(Tensor self, Tensor weight, int[2] kernel_size, Tensor? bias=None, int[2] stride=1, int[2] padding=0, int[2] output_padding=0, int[2] dilation=1) -> Tensor"); return table->callUnboxed(self, weight, kernel_size, bias, stride, padding, output_padding, dilation); #endif } static inline std::tuple slow_conv_transpose2d_backward_out(Tensor & grad_input, Tensor & grad_weight, Tensor & grad_bias, const Tensor & grad_output, const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef output_padding, IntArrayRef dilation, const Tensor & columns, const Tensor & ones) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(grad_input, grad_weight, grad_bias, grad_output, self, weight, columns, ones)))) { case Backend::CPU: return CPUType::slow_conv_transpose2d_backward_out(grad_input, grad_weight, grad_bias, grad_output, self, weight, kernel_size, stride, padding, output_padding, dilation, columns, ones); break; default: AT_ERROR("slow_conv_transpose2d_backward_out not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(grad_input, grad_weight, grad_bias, grad_output, self, weight, columns, ones))); } #else static auto table = globalATenDispatch().getOpTable("aten::slow_conv_transpose2d_backward.grad_output(Tensor grad_output, Tensor self, Tensor weight, int[2] kernel_size, int[2] stride, int[2] padding, int[2] output_padding, int[2] dilation, Tensor columns, Tensor ones, *, Tensor?(a!) grad_input, Tensor?(b!) grad_weight, Tensor?(c!) grad_bias) -> (Tensor(a!), Tensor(b!), Tensor(c!))"); return table->callUnboxed, Tensor &, Tensor &, Tensor &, const Tensor &, const Tensor &, const Tensor &, IntArrayRef, IntArrayRef, IntArrayRef, IntArrayRef, IntArrayRef, const Tensor &, const Tensor &>(grad_input, grad_weight, grad_bias, grad_output, self, weight, kernel_size, stride, padding, output_padding, dilation, columns, ones); #endif } static inline std::tuple slow_conv_transpose2d_backward(const Tensor & grad_output, const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef output_padding, IntArrayRef dilation, const Tensor & columns, const Tensor & ones, std::array output_mask) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(grad_output, self, weight, columns, ones)))) { case Backend::CPU: return CPUType::slow_conv_transpose2d_backward(grad_output, self, weight, kernel_size, stride, padding, output_padding, dilation, columns, ones, output_mask); break; default: AT_ERROR("slow_conv_transpose2d_backward not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(grad_output, self, weight, columns, ones))); } #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::slow_conv_transpose2d_backward", "output_mask"}).value(); return c10::Dispatcher::singleton().callUnboxedOnly, const Tensor &, const Tensor &, const Tensor &, IntArrayRef, IntArrayRef, IntArrayRef, IntArrayRef, IntArrayRef, const Tensor &, const Tensor &, std::array>( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(grad_output, self, weight, columns, ones)), grad_output, self, weight, kernel_size, stride, padding, output_padding, dilation, columns, ones, output_mask); #endif } static inline Tensor & slow_conv_transpose3d_out(Tensor & out, const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const Tensor & bias, IntArrayRef stride, IntArrayRef padding, IntArrayRef output_padding, IntArrayRef dilation) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(out, self, weight, bias)))) { case Backend::CPU: return CPUType::slow_conv_transpose3d_out(out, self, weight, kernel_size, bias, stride, padding, output_padding, dilation); break; default: AT_ERROR("slow_conv_transpose3d_out not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(out, self, weight, bias))); } #else static auto table = globalATenDispatch().getOpTable("aten::slow_conv_transpose3d.out(Tensor self, Tensor weight, int[3] kernel_size, Tensor? bias=None, int[3] stride=1, int[3] padding=0, int[3] output_padding=0, int[3] dilation=1, *, Tensor(a!) out) -> Tensor(a!)"); return table->callUnboxed(out, self, weight, kernel_size, bias, stride, padding, output_padding, dilation); #endif } static inline Tensor slow_conv_transpose3d(const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const Tensor & bias, IntArrayRef stride, IntArrayRef padding, IntArrayRef output_padding, IntArrayRef dilation) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self, weight, bias)))) { case Backend::CPU: return CPUType::slow_conv_transpose3d(self, weight, kernel_size, bias, stride, padding, output_padding, dilation); break; default: AT_ERROR("slow_conv_transpose3d not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(self, weight, bias))); } #else static auto table = globalATenDispatch().getOpTable("aten::slow_conv_transpose3d(Tensor self, Tensor weight, int[3] kernel_size, Tensor? bias=None, int[3] stride=1, int[3] padding=0, int[3] output_padding=0, int[3] dilation=1) -> Tensor"); return table->callUnboxed(self, weight, kernel_size, bias, stride, padding, output_padding, dilation); #endif } static inline std::tuple slow_conv_transpose3d_backward_out(Tensor & grad_input, Tensor & grad_weight, Tensor & grad_bias, const Tensor & grad_output, const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef output_padding, IntArrayRef dilation, const Tensor & finput, const Tensor & fgrad_input) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(grad_input, grad_weight, grad_bias, grad_output, self, weight, finput, fgrad_input)))) { case Backend::CPU: return CPUType::slow_conv_transpose3d_backward_out(grad_input, grad_weight, grad_bias, grad_output, self, weight, kernel_size, stride, padding, output_padding, dilation, finput, fgrad_input); break; default: AT_ERROR("slow_conv_transpose3d_backward_out not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(grad_input, grad_weight, grad_bias, grad_output, self, weight, finput, fgrad_input))); } #else static auto table = globalATenDispatch().getOpTable("aten::slow_conv_transpose3d_backward.grad_output(Tensor grad_output, Tensor self, Tensor weight, int[3] kernel_size, int[3] stride, int[3] padding, int[3] output_padding, int[3] dilation, Tensor finput, Tensor fgrad_input, *, Tensor?(a!) grad_input, Tensor?(b!) grad_weight, Tensor?(c!) grad_bias) -> (Tensor(a!), Tensor(b!), Tensor(c!))"); return table->callUnboxed, Tensor &, Tensor &, Tensor &, const Tensor &, const Tensor &, const Tensor &, IntArrayRef, IntArrayRef, IntArrayRef, IntArrayRef, IntArrayRef, const Tensor &, const Tensor &>(grad_input, grad_weight, grad_bias, grad_output, self, weight, kernel_size, stride, padding, output_padding, dilation, finput, fgrad_input); #endif } static inline std::tuple slow_conv_transpose3d_backward(const Tensor & grad_output, const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef output_padding, IntArrayRef dilation, const Tensor & finput, const Tensor & fgrad_input, std::array output_mask) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(grad_output, self, weight, finput, fgrad_input)))) { case Backend::CPU: return CPUType::slow_conv_transpose3d_backward(grad_output, self, weight, kernel_size, stride, padding, output_padding, dilation, finput, fgrad_input, output_mask); break; default: AT_ERROR("slow_conv_transpose3d_backward not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(grad_output, self, weight, finput, fgrad_input))); } #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::slow_conv_transpose3d_backward", "output_mask"}).value(); return c10::Dispatcher::singleton().callUnboxedOnly, const Tensor &, const Tensor &, const Tensor &, IntArrayRef, IntArrayRef, IntArrayRef, IntArrayRef, IntArrayRef, const Tensor &, const Tensor &, std::array>( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(grad_output, self, weight, finput, fgrad_input)), grad_output, self, weight, kernel_size, stride, padding, output_padding, dilation, finput, fgrad_input, output_mask); #endif } static inline Tensor & thnn_conv2d_out(Tensor & out, const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const Tensor & bias, IntArrayRef stride, IntArrayRef padding) { #ifdef USE_STATIC_DISPATCH return TypeDefault::thnn_conv2d_out(out, self, weight, kernel_size, bias, stride, padding); #else static auto table = globalATenDispatch().getOpTable("aten::thnn_conv2d.out(Tensor self, Tensor weight, int[2] kernel_size, Tensor? bias=None, int[2] stride=1, int[2] padding=0, *, Tensor(a!) out) -> Tensor(a!)"); return table->callUnboxed(out, self, weight, kernel_size, bias, stride, padding); #endif } static inline Tensor thnn_conv2d(const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const Tensor & bias, IntArrayRef stride, IntArrayRef padding) { #ifdef USE_STATIC_DISPATCH return TypeDefault::thnn_conv2d(self, weight, kernel_size, bias, stride, padding); #else static auto table = globalATenDispatch().getOpTable("aten::thnn_conv2d(Tensor self, Tensor weight, int[2] kernel_size, Tensor? bias=None, int[2] stride=1, int[2] padding=0) -> Tensor"); return table->callUnboxed(self, weight, kernel_size, bias, stride, padding); #endif } static inline std::tuple thnn_conv2d_forward_out(Tensor & output, Tensor & finput, Tensor & fgrad_input, const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const Tensor & bias, IntArrayRef stride, IntArrayRef padding) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(output, finput, fgrad_input, self, weight, bias)))) { case Backend::CPU: return CPUType::thnn_conv2d_forward_out(output, finput, fgrad_input, self, weight, kernel_size, bias, stride, padding); break; default: AT_ERROR("thnn_conv2d_forward_out not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(output, finput, fgrad_input, self, weight, bias))); } #else static auto table = globalATenDispatch().getOpTable("aten::thnn_conv2d_forward.output(Tensor self, Tensor weight, int[2] kernel_size, Tensor? bias, int[2] stride, int[2] padding, *, Tensor(a!) output, Tensor(b!) finput, Tensor(c!) fgrad_input) -> (Tensor(a!), Tensor(b!), Tensor(c!))"); return table->callUnboxed, Tensor &, Tensor &, Tensor &, const Tensor &, const Tensor &, IntArrayRef, const Tensor &, IntArrayRef, IntArrayRef>(output, finput, fgrad_input, self, weight, kernel_size, bias, stride, padding); #endif } static inline std::tuple thnn_conv2d_forward(const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const Tensor & bias, IntArrayRef stride, IntArrayRef padding) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self, weight, bias)))) { case Backend::CPU: return CPUType::thnn_conv2d_forward(self, weight, kernel_size, bias, stride, padding); break; default: AT_ERROR("thnn_conv2d_forward not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(self, weight, bias))); } #else static auto table = globalATenDispatch().getOpTable("aten::thnn_conv2d_forward(Tensor self, Tensor weight, int[2] kernel_size, Tensor? bias, int[2] stride, int[2] padding) -> (Tensor output, Tensor finput, Tensor fgrad_input)"); return table->callUnboxed, const Tensor &, const Tensor &, IntArrayRef, const Tensor &, IntArrayRef, IntArrayRef>(self, weight, kernel_size, bias, stride, padding); #endif } static inline std::tuple thnn_conv2d_backward_out(Tensor & grad_input, Tensor & grad_weight, Tensor & grad_bias, const Tensor & grad_output, const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, const Tensor & finput, const Tensor & fgrad_input) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(grad_input, grad_weight, grad_bias, grad_output, self, weight, finput, fgrad_input)))) { case Backend::CPU: return CPUType::thnn_conv2d_backward_out(grad_input, grad_weight, grad_bias, grad_output, self, weight, kernel_size, stride, padding, finput, fgrad_input); break; default: AT_ERROR("thnn_conv2d_backward_out not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(grad_input, grad_weight, grad_bias, grad_output, self, weight, finput, fgrad_input))); } #else static auto table = globalATenDispatch().getOpTable("aten::thnn_conv2d_backward.grad_input(Tensor grad_output, Tensor self, Tensor weight, int[2] kernel_size, int[2] stride, int[2] padding, Tensor finput, Tensor fgrad_input, *, Tensor?(a!) grad_input, Tensor?(b!) grad_weight, Tensor?(c!) grad_bias) -> (Tensor(a!), Tensor(b!), Tensor(c!))"); return table->callUnboxed, Tensor &, Tensor &, Tensor &, const Tensor &, const Tensor &, const Tensor &, IntArrayRef, IntArrayRef, IntArrayRef, const Tensor &, const Tensor &>(grad_input, grad_weight, grad_bias, grad_output, self, weight, kernel_size, stride, padding, finput, fgrad_input); #endif } static inline std::tuple thnn_conv2d_backward(const Tensor & grad_output, const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, const Tensor & finput, const Tensor & fgrad_input, std::array output_mask) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(grad_output, self, weight, finput, fgrad_input)))) { case Backend::CPU: return CPUType::thnn_conv2d_backward(grad_output, self, weight, kernel_size, stride, padding, finput, fgrad_input, output_mask); break; default: AT_ERROR("thnn_conv2d_backward not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(grad_output, self, weight, finput, fgrad_input))); } #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::thnn_conv2d_backward", "output_mask"}).value(); return c10::Dispatcher::singleton().callUnboxedOnly, const Tensor &, const Tensor &, const Tensor &, IntArrayRef, IntArrayRef, IntArrayRef, const Tensor &, const Tensor &, std::array>( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(grad_output, self, weight, finput, fgrad_input)), grad_output, self, weight, kernel_size, stride, padding, finput, fgrad_input, output_mask); #endif } static inline Tensor & thnn_conv_depthwise2d_out(Tensor & out, const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const Tensor & bias, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation) { #ifdef USE_STATIC_DISPATCH return TypeDefault::thnn_conv_depthwise2d_out(out, self, weight, kernel_size, bias, stride, padding, dilation); #else static auto table = globalATenDispatch().getOpTable("aten::thnn_conv_depthwise2d.out(Tensor self, Tensor weight, int[2] kernel_size, Tensor? bias=None, int[2] stride=1, int[2] padding=0, int[2] dilation=1, *, Tensor(a!) out) -> Tensor(a!)"); return table->callUnboxed(out, self, weight, kernel_size, bias, stride, padding, dilation); #endif } static inline Tensor thnn_conv_depthwise2d(const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const Tensor & bias, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation) { #ifdef USE_STATIC_DISPATCH return TypeDefault::thnn_conv_depthwise2d(self, weight, kernel_size, bias, stride, padding, dilation); #else static auto table = globalATenDispatch().getOpTable("aten::thnn_conv_depthwise2d(Tensor self, Tensor weight, int[2] kernel_size, Tensor? bias=None, int[2] stride=1, int[2] padding=0, int[2] dilation=1) -> Tensor"); return table->callUnboxed(self, weight, kernel_size, bias, stride, padding, dilation); #endif } static inline Tensor & thnn_conv_depthwise2d_forward_out(Tensor & out, const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const Tensor & bias, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(out, self, weight, bias)))) { default: AT_ERROR("thnn_conv_depthwise2d_forward_out not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(out, self, weight, bias))); } #else static auto table = globalATenDispatch().getOpTable("aten::thnn_conv_depthwise2d_forward.out(Tensor self, Tensor weight, int[2] kernel_size, Tensor? bias, int[2] stride, int[2] padding, int[2] dilation, *, Tensor(a!) out) -> Tensor(a!)"); return table->callUnboxed(out, self, weight, kernel_size, bias, stride, padding, dilation); #endif } static inline Tensor thnn_conv_depthwise2d_forward(const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const Tensor & bias, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self, weight, bias)))) { default: AT_ERROR("thnn_conv_depthwise2d_forward not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(self, weight, bias))); } #else static auto table = globalATenDispatch().getOpTable("aten::thnn_conv_depthwise2d_forward(Tensor self, Tensor weight, int[2] kernel_size, Tensor? bias, int[2] stride, int[2] padding, int[2] dilation) -> Tensor"); return table->callUnboxed(self, weight, kernel_size, bias, stride, padding, dilation); #endif } static inline std::tuple thnn_conv_depthwise2d_backward_out(Tensor & grad_input, Tensor & grad_weight, const Tensor & grad_output, const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(grad_input, grad_weight, grad_output, self, weight)))) { default: AT_ERROR("thnn_conv_depthwise2d_backward_out not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(grad_input, grad_weight, grad_output, self, weight))); } #else static auto table = globalATenDispatch().getOpTable("aten::thnn_conv_depthwise2d_backward.grad_input(Tensor grad_output, Tensor self, Tensor weight, int[2] kernel_size, int[2] stride, int[2] padding, int[2] dilation, *, Tensor?(a!) grad_input, Tensor?(b!) grad_weight) -> (Tensor(a!), Tensor(b!))"); return table->callUnboxed, Tensor &, Tensor &, const Tensor &, const Tensor &, const Tensor &, IntArrayRef, IntArrayRef, IntArrayRef, IntArrayRef>(grad_input, grad_weight, grad_output, self, weight, kernel_size, stride, padding, dilation); #endif } static inline std::tuple thnn_conv_depthwise2d_backward(const Tensor & grad_output, const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, std::array output_mask) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(grad_output, self, weight)))) { default: AT_ERROR("thnn_conv_depthwise2d_backward not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(grad_output, self, weight))); } #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::thnn_conv_depthwise2d_backward", "output_mask"}).value(); return c10::Dispatcher::singleton().callUnboxedOnly, const Tensor &, const Tensor &, const Tensor &, IntArrayRef, IntArrayRef, IntArrayRef, IntArrayRef, std::array>( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(grad_output, self, weight)), grad_output, self, weight, kernel_size, stride, padding, dilation, output_mask); #endif } static inline Tensor & thnn_conv3d_out(Tensor & out, const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const Tensor & bias, IntArrayRef stride, IntArrayRef padding) { #ifdef USE_STATIC_DISPATCH return TypeDefault::thnn_conv3d_out(out, self, weight, kernel_size, bias, stride, padding); #else static auto table = globalATenDispatch().getOpTable("aten::thnn_conv3d.out(Tensor self, Tensor weight, int[3] kernel_size, Tensor? bias=None, int[3] stride=1, int[3] padding=0, *, Tensor(a!) out) -> Tensor(a!)"); return table->callUnboxed(out, self, weight, kernel_size, bias, stride, padding); #endif } static inline Tensor thnn_conv3d(const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const Tensor & bias, IntArrayRef stride, IntArrayRef padding) { #ifdef USE_STATIC_DISPATCH return TypeDefault::thnn_conv3d(self, weight, kernel_size, bias, stride, padding); #else static auto table = globalATenDispatch().getOpTable("aten::thnn_conv3d(Tensor self, Tensor weight, int[3] kernel_size, Tensor? bias=None, int[3] stride=1, int[3] padding=0) -> Tensor"); return table->callUnboxed(self, weight, kernel_size, bias, stride, padding); #endif } static inline std::tuple thnn_conv3d_forward_out(Tensor & output, Tensor & finput, Tensor & fgrad_input, const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const Tensor & bias, IntArrayRef stride, IntArrayRef padding) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(output, finput, fgrad_input, self, weight, bias)))) { case Backend::CPU: return CPUType::thnn_conv3d_forward_out(output, finput, fgrad_input, self, weight, kernel_size, bias, stride, padding); break; default: AT_ERROR("thnn_conv3d_forward_out not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(output, finput, fgrad_input, self, weight, bias))); } #else static auto table = globalATenDispatch().getOpTable("aten::thnn_conv3d_forward.output(Tensor self, Tensor weight, int[3] kernel_size, Tensor? bias, int[3] stride, int[3] padding, *, Tensor(a!) output, Tensor(b!) finput, Tensor(c!) fgrad_input) -> (Tensor(a!), Tensor(b!), Tensor(c!))"); return table->callUnboxed, Tensor &, Tensor &, Tensor &, const Tensor &, const Tensor &, IntArrayRef, const Tensor &, IntArrayRef, IntArrayRef>(output, finput, fgrad_input, self, weight, kernel_size, bias, stride, padding); #endif } static inline std::tuple thnn_conv3d_forward(const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const Tensor & bias, IntArrayRef stride, IntArrayRef padding) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self, weight, bias)))) { case Backend::CPU: return CPUType::thnn_conv3d_forward(self, weight, kernel_size, bias, stride, padding); break; default: AT_ERROR("thnn_conv3d_forward not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(self, weight, bias))); } #else static auto table = globalATenDispatch().getOpTable("aten::thnn_conv3d_forward(Tensor self, Tensor weight, int[3] kernel_size, Tensor? bias, int[3] stride, int[3] padding) -> (Tensor output, Tensor finput, Tensor fgrad_input)"); return table->callUnboxed, const Tensor &, const Tensor &, IntArrayRef, const Tensor &, IntArrayRef, IntArrayRef>(self, weight, kernel_size, bias, stride, padding); #endif } static inline std::tuple thnn_conv3d_backward_out(Tensor & grad_input, Tensor & grad_weight, Tensor & grad_bias, const Tensor & grad_output, const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, const Tensor & finput, const Tensor & fgrad_input) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(grad_input, grad_weight, grad_bias, grad_output, self, weight, finput, fgrad_input)))) { case Backend::CPU: return CPUType::thnn_conv3d_backward_out(grad_input, grad_weight, grad_bias, grad_output, self, weight, kernel_size, stride, padding, finput, fgrad_input); break; default: AT_ERROR("thnn_conv3d_backward_out not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(grad_input, grad_weight, grad_bias, grad_output, self, weight, finput, fgrad_input))); } #else static auto table = globalATenDispatch().getOpTable("aten::thnn_conv3d_backward.grad_input(Tensor grad_output, Tensor self, Tensor weight, int[3] kernel_size, int[3] stride, int[3] padding, Tensor finput, Tensor fgrad_input, *, Tensor?(a!) grad_input, Tensor?(b!) grad_weight, Tensor?(c!) grad_bias) -> (Tensor(a!), Tensor(b!), Tensor(c!))"); return table->callUnboxed, Tensor &, Tensor &, Tensor &, const Tensor &, const Tensor &, const Tensor &, IntArrayRef, IntArrayRef, IntArrayRef, const Tensor &, const Tensor &>(grad_input, grad_weight, grad_bias, grad_output, self, weight, kernel_size, stride, padding, finput, fgrad_input); #endif } static inline std::tuple thnn_conv3d_backward(const Tensor & grad_output, const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, const Tensor & finput, const Tensor & fgrad_input, std::array output_mask) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(grad_output, self, weight, finput, fgrad_input)))) { case Backend::CPU: return CPUType::thnn_conv3d_backward(grad_output, self, weight, kernel_size, stride, padding, finput, fgrad_input, output_mask); break; default: AT_ERROR("thnn_conv3d_backward not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(grad_output, self, weight, finput, fgrad_input))); } #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::thnn_conv3d_backward", "output_mask"}).value(); return c10::Dispatcher::singleton().callUnboxedOnly, const Tensor &, const Tensor &, const Tensor &, IntArrayRef, IntArrayRef, IntArrayRef, const Tensor &, const Tensor &, std::array>( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(grad_output, self, weight, finput, fgrad_input)), grad_output, self, weight, kernel_size, stride, padding, finput, fgrad_input, output_mask); #endif } static inline Tensor slow_conv_dilated2d(const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const Tensor & bias, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self, weight, bias)))) { case Backend::CPU: return CPUType::slow_conv_dilated2d(self, weight, kernel_size, bias, stride, padding, dilation); break; default: AT_ERROR("slow_conv_dilated2d not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(self, weight, bias))); } #else static auto table = globalATenDispatch().getOpTable("aten::slow_conv_dilated2d(Tensor self, Tensor weight, int[2] kernel_size, Tensor? bias=None, int[2] stride=1, int[2] padding=0, int[2] dilation=1) -> Tensor"); return table->callUnboxed(self, weight, kernel_size, bias, stride, padding, dilation); #endif } static inline std::tuple slow_conv_dilated2d_backward(const Tensor & grad_output, const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, std::array output_mask) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(grad_output, self, weight)))) { case Backend::CPU: return CPUType::slow_conv_dilated2d_backward(grad_output, self, weight, kernel_size, stride, padding, dilation, output_mask); break; default: AT_ERROR("slow_conv_dilated2d_backward not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(grad_output, self, weight))); } #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::slow_conv_dilated2d_backward", ""}).value(); return c10::Dispatcher::singleton().callUnboxedOnly, const Tensor &, const Tensor &, const Tensor &, IntArrayRef, IntArrayRef, IntArrayRef, IntArrayRef, std::array>( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(grad_output, self, weight)), grad_output, self, weight, kernel_size, stride, padding, dilation, output_mask); #endif } static inline Tensor slow_conv_dilated3d(const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const Tensor & bias, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self, weight, bias)))) { case Backend::CPU: return CPUType::slow_conv_dilated3d(self, weight, kernel_size, bias, stride, padding, dilation); break; default: AT_ERROR("slow_conv_dilated3d not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(self, weight, bias))); } #else static auto table = globalATenDispatch().getOpTable("aten::slow_conv_dilated3d(Tensor self, Tensor weight, int[3] kernel_size, Tensor? bias=None, int[3] stride=1, int[3] padding=0, int[3] dilation=1) -> Tensor"); return table->callUnboxed(self, weight, kernel_size, bias, stride, padding, dilation); #endif } static inline std::tuple slow_conv_dilated3d_backward(const Tensor & grad_output, const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, std::array output_mask) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(grad_output, self, weight)))) { case Backend::CPU: return CPUType::slow_conv_dilated3d_backward(grad_output, self, weight, kernel_size, stride, padding, dilation, output_mask); break; default: AT_ERROR("slow_conv_dilated3d_backward not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(grad_output, self, weight))); } #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::slow_conv_dilated3d_backward", ""}).value(); return c10::Dispatcher::singleton().callUnboxedOnly, const Tensor &, const Tensor &, const Tensor &, IntArrayRef, IntArrayRef, IntArrayRef, IntArrayRef, std::array>( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(grad_output, self, weight)), grad_output, self, weight, kernel_size, stride, padding, dilation, output_mask); #endif } static inline Tensor & col2im_out(Tensor & out, const Tensor & self, IntArrayRef output_size, IntArrayRef kernel_size, IntArrayRef dilation, IntArrayRef padding, IntArrayRef stride) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(out, self)))) { case Backend::CPU: return CPUType::col2im_out(out, self, output_size, kernel_size, dilation, padding, stride); break; default: AT_ERROR("col2im_out not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(out, self))); } #else static auto table = globalATenDispatch().getOpTable("aten::col2im.out(Tensor self, int[2] output_size, int[2] kernel_size, int[2] dilation, int[2] padding, int[2] stride, *, Tensor(a!) out) -> Tensor(a!)"); return table->callUnboxed(out, self, output_size, kernel_size, dilation, padding, stride); #endif } static inline Tensor col2im(const Tensor & self, IntArrayRef output_size, IntArrayRef kernel_size, IntArrayRef dilation, IntArrayRef padding, IntArrayRef stride) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self)))) { case Backend::CPU: return CPUType::col2im(self, output_size, kernel_size, dilation, padding, stride); break; default: AT_ERROR("col2im not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(self))); } #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::col2im", ""}).value(); return c10::Dispatcher::singleton().callUnboxedOnly( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self)), self, output_size, kernel_size, dilation, padding, stride); #endif } static inline Tensor & col2im_backward_out(Tensor & grad_input, const Tensor & grad_output, IntArrayRef kernel_size, IntArrayRef dilation, IntArrayRef padding, IntArrayRef stride) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(grad_input, grad_output)))) { case Backend::CPU: return CPUType::col2im_backward_out(grad_input, grad_output, kernel_size, dilation, padding, stride); break; default: AT_ERROR("col2im_backward_out not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(grad_input, grad_output))); } #else static auto table = globalATenDispatch().getOpTable("aten::col2im_backward.grad_input(Tensor grad_output, int[2] kernel_size, int[2] dilation, int[2] padding, int[2] stride, *, Tensor(a!) grad_input) -> Tensor(a!)"); return table->callUnboxed(grad_input, grad_output, kernel_size, dilation, padding, stride); #endif } static inline Tensor col2im_backward(const Tensor & grad_output, IntArrayRef kernel_size, IntArrayRef dilation, IntArrayRef padding, IntArrayRef stride) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(grad_output)))) { case Backend::CPU: return CPUType::col2im_backward(grad_output, kernel_size, dilation, padding, stride); break; default: AT_ERROR("col2im_backward not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(grad_output))); } #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::col2im_backward", ""}).value(); return c10::Dispatcher::singleton().callUnboxedOnly( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(grad_output)), grad_output, kernel_size, dilation, padding, stride); #endif } static inline Tensor & im2col_out(Tensor & out, const Tensor & self, IntArrayRef kernel_size, IntArrayRef dilation, IntArrayRef padding, IntArrayRef stride) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(out, self)))) { case Backend::CPU: return CPUType::im2col_out(out, self, kernel_size, dilation, padding, stride); break; default: AT_ERROR("im2col_out not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(out, self))); } #else static auto table = globalATenDispatch().getOpTable("aten::im2col.out(Tensor self, int[2] kernel_size, int[2] dilation, int[2] padding, int[2] stride, *, Tensor(a!) out) -> Tensor(a!)"); return table->callUnboxed(out, self, kernel_size, dilation, padding, stride); #endif } static inline Tensor im2col(const Tensor & self, IntArrayRef kernel_size, IntArrayRef dilation, IntArrayRef padding, IntArrayRef stride) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self)))) { case Backend::CPU: return CPUType::im2col(self, kernel_size, dilation, padding, stride); break; default: AT_ERROR("im2col not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(self))); } #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::im2col", ""}).value(); return c10::Dispatcher::singleton().callUnboxedOnly( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(self)), self, kernel_size, dilation, padding, stride); #endif } static inline Tensor & im2col_backward_out(Tensor & grad_input, const Tensor & grad_output, IntArrayRef input_size, IntArrayRef kernel_size, IntArrayRef dilation, IntArrayRef padding, IntArrayRef stride) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(grad_input, grad_output)))) { case Backend::CPU: return CPUType::im2col_backward_out(grad_input, grad_output, input_size, kernel_size, dilation, padding, stride); break; default: AT_ERROR("im2col_backward_out not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(grad_input, grad_output))); } #else static auto table = globalATenDispatch().getOpTable("aten::im2col_backward.grad_input(Tensor grad_output, int[2] input_size, int[2] kernel_size, int[2] dilation, int[2] padding, int[2] stride, *, Tensor(a!) grad_input) -> Tensor(a!)"); return table->callUnboxed(grad_input, grad_output, input_size, kernel_size, dilation, padding, stride); #endif } static inline Tensor im2col_backward(const Tensor & grad_output, IntArrayRef input_size, IntArrayRef kernel_size, IntArrayRef dilation, IntArrayRef padding, IntArrayRef stride) { #ifdef USE_STATIC_DISPATCH switch(tensorTypeIdToBackend(impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(grad_output)))) { case Backend::CPU: return CPUType::im2col_backward(grad_output, input_size, kernel_size, dilation, padding, stride); break; default: AT_ERROR("im2col_backward not implemented for ", at::toString(at::detail::multi_dispatch_tensor_type_set(grad_output))); } #else static c10::OperatorHandle op = c10::Dispatcher::singleton() .findSchema({"aten::im2col_backward", ""}).value(); return c10::Dispatcher::singleton().callUnboxedOnly( op, impl::dispatchTypeId(at::detail::multi_dispatch_tensor_type_set(grad_output)), grad_output, input_size, kernel_size, dilation, padding, stride); #endif } }