#pragma once // @generated by aten/src/ATen/gen.py #include #include #include #include #include #include #include #include #include namespace c10 { struct Storage; } namespace at { class Tensor; using TensorList = ArrayRef; class Context; struct Generator; struct Quantizer; // This is temporary typedef to enable Quantizer in aten native function API // we'll remove them when we are actually exposing Quantizer class // to frontend using ConstQuantizerPtr = const c10::intrusive_ptr&; #ifdef USE_STATIC_DISPATCH namespace MkldnnCPUType { Tensor add(const Tensor & self, const Tensor & other, Scalar alpha); Tensor & add_(Tensor & self, const Tensor & other, Scalar alpha); Tensor & add_out(Tensor & out, const Tensor & self, const Tensor & other, Scalar alpha); Tensor empty(IntArrayRef size, const TensorOptions & options, c10::optional memory_format); Tensor mkldnn_linear(const Tensor & input, const Tensor & weight, const Tensor & bias); Tensor mkldnn_max_pool2d(const Tensor & self, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, bool ceil_mode); Tensor mul(const Tensor & self, const Tensor & other); Tensor & mul_(Tensor & self, const Tensor & other); Tensor & mul_out(Tensor & out, const Tensor & self, const Tensor & other); std::tuple native_batch_norm(const Tensor & input, const Tensor & weight, const Tensor & bias, const Tensor & running_mean, const Tensor & running_var, bool training, double momentum, double eps); Tensor _mkldnn_reshape(const Tensor & self, IntArrayRef shape); Tensor relu(const Tensor & self); Tensor & relu_(Tensor & self); Tensor sigmoid(const Tensor & self); Tensor & sigmoid_(Tensor & self); Tensor _softmax(const Tensor & self, int64_t dim, bool half_to_float); Tensor _mkldnn_transpose(const Tensor & self, int64_t dim0, int64_t dim1); Tensor & _mkldnn_transpose_(Tensor & self, int64_t dim0, int64_t dim1); Tensor clone(const Tensor & self); Tensor & zero_(Tensor & self); Tensor to_dense(const Tensor & self); Tensor mkldnn_reorder_conv2d_weight(const Tensor & self, IntArrayRef padding, IntArrayRef stride, IntArrayRef dilation, int64_t groups); Tensor view(const Tensor & self, IntArrayRef size); Tensor & adaptive_avg_pool2d_out(Tensor & out, const Tensor & self, IntArrayRef output_size); Tensor mkldnn_adaptive_avg_pool2d(const Tensor & self, IntArrayRef output_size); Tensor & avg_pool2d_out(Tensor & out, const Tensor & self, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, bool ceil_mode, bool count_include_pad, c10::optional divisor_override); Tensor avg_pool2d(const Tensor & self, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, bool ceil_mode, bool count_include_pad, c10::optional divisor_override); } #endif } // namespace at