#pragma once
|
|
#include <c10/core/Device.h>
|
#include <c10/core/Layout.h>
|
#include <c10/core/MemoryFormat.h>
|
#include <c10/core/QScheme.h>
|
#include <c10/core/Scalar.h>
|
#include <c10/core/ScalarType.h>
|
#include <c10/core/Storage.h>
|
#include <ATen/core/TensorAccessor.h>
|
#include <c10/core/TensorImpl.h>
|
#include <c10/core/UndefinedTensorImpl.h>
|
#include <c10/util/Exception.h>
|
#include <c10/util/Deprecated.h>
|
#include <c10/util/Optional.h>
|
#include <c10/util/intrusive_ptr.h>
|
#include <ATen/core/DeprecatedTypePropertiesRegistry.h>
|
#include <ATen/core/DeprecatedTypeProperties.h>
|
#include <ATen/core/EnableNamedTensor.h>
|
#include <ATen/core/NamedTensor.h>
|
|
namespace caffe2 {
|
class Tensor;
|
}
|
namespace c10{
|
struct TensorOptions;
|
}
|
namespace at {
|
struct Generator;
|
struct Type;
|
class DeprecatedTypeProperties;
|
class Tensor;
|
} // namespace at
|
|
namespace at {
|
|
class Tensor;
|
using TensorList = ArrayRef<Tensor>;
|
|
struct Quantizer;
|
// This is temporary typedef to enable Quantizer in aten native function API
|
// we'll remove them when we are actually exposing Quantizer class
|
// to frontend
|
using QuantizerPtr = c10::intrusive_ptr<Quantizer>;
|
using ConstQuantizerPtr = const c10::intrusive_ptr<Quantizer>&;
|
|
// Tensor is a "generic" object holding a pointer to the underlying TensorImpl object, which
|
// has an embedded reference count. In this way, Tensor is similar to boost::intrusive_ptr.
|
//
|
// For example:
|
//
|
// void func(Tensor a) {
|
// Tensor b = a;
|
// ...
|
// }
|
//
|
// In this example, when we say Tensor b = a, we are creating a new object that points to the
|
// same underlying TensorImpl, and bumps its reference count. When b goes out of scope, the
|
// destructor decrements the reference count by calling release() on the TensorImpl it points to.
|
// The existing constructors, operator overloads, etc. take care to implement the correct semantics.
|
//
|
// Note that Tensor can also be NULL, i.e. it is not associated with any underlying TensorImpl, and
|
// special care must be taken to handle this.
|
class CAFFE2_API Tensor {
|
public:
|
Tensor(){};
|
// This constructor should not be used by end users and is an implementation
|
// detail invoked by autogenerated code.
|
explicit Tensor(
|
c10::intrusive_ptr<TensorImpl, UndefinedTensorImpl> tensor_impl)
|
: impl_(std::move(tensor_impl)) {
|
if (impl_.get() == nullptr) {
|
throw std::runtime_error("TensorImpl with nullptr is not supported");
|
}
|
}
|
Tensor(const Tensor&) = default;
|
Tensor(Tensor&&) = default;
|
|
|
public:
|
// Creates a new wrapper from TensorImpl. Intentionally a free method because
|
// it should be used with care. Checks necessary invariants
|
static Tensor wrap_tensor_impl(
|
c10::intrusive_ptr<TensorImpl, UndefinedTensorImpl> tensor_impl) {
|
Tensor r(std::move(tensor_impl));
|
r.enforce_invariants();
|
return r;
|
}
|
|
int64_t dim() const {
|
return impl_->dim();
|
}
|
int64_t storage_offset() const {
|
return impl_->storage_offset();
|
}
|
|
TensorImpl * unsafeGetTensorImpl() const {
|
return impl_.get();
|
}
|
TensorImpl * unsafeReleaseTensorImpl() {
|
return impl_.release();
|
}
|
const c10::intrusive_ptr<TensorImpl, UndefinedTensorImpl>& getIntrusivePtr() const {
|
return impl_;
|
}
|
|
bool defined() const {
|
return impl_;
|
}
|
|
void reset() {
|
impl_.reset();
|
}
|
|
// The following overloads are very intruiging. Consider the following
|
// program:
|
//
|
// x[1] = 3;
|
//
|
// We would expect that the first entry of x is written to 3. But how can we
|
// actually achieve this? x[1] evaluates to a tensor...
|
//
|
// The answer is, using a ref-qualifier. x[1] is an rvalue, which cannot be
|
// (profitably) assigned to in the traditional sense, so we overload
|
// assignment to mean, "Actually, copy 3 into the tensor data." This is done
|
// with an rvalue-reference ref-qualified overload (the methods with && at the
|
// end of their type.)
|
//
|
// There's one more fly in the ointment: We also want
|
//
|
// Tensor x = y;
|
//
|
// to work, and we want it NOT to copy. So we need a traditional operator=
|
// overload. But we MUST specify a mutable lvalue ref-qualifier, to
|
// disambiguate the traditional overload from the rvalue-reference
|
// ref-qualified overload. Otherwise, it will be ambiguous, because
|
// a non ref-qualified method is eligible for all situations.
|
|
// Unfortunately, we have to write these constructors out manually
|
// to work around an MSVC bug:
|
// error C2580: 'at::Tensor &at::Tensor::operator =(const at::Tensor &) &':
|
// multiple versions of a defaulted special member functions are not allowed
|
// Tensor& operator=(const Tensor&) & = default;
|
// Tensor& operator=(Tensor&&) & = default;
|
Tensor& operator=(const Tensor& x) & {
|
impl_ = x.impl_;
|
return *this;
|
}
|
Tensor& operator=(Tensor&& x) & {
|
impl_ = std::move(x.impl_);
|
return *this;
|
}
|
|
Tensor& operator=(Scalar v) &&;
|
Tensor& operator=(const Tensor&) &&;
|
Tensor& operator=(Tensor&&) &&;
|
|
bool is_same(const Tensor& other) const noexcept {
|
return impl_ == other.impl_;
|
}
|
size_t use_count() const noexcept {
|
return impl_.use_count();
|
}
|
size_t weak_use_count() const noexcept {
|
return impl_.weak_use_count();
|
}
|
|
std::string toString() const;
|
|
IntArrayRef sizes() const {
|
return impl_->sizes();
|
}
|
IntArrayRef strides() const {
|
return impl_->strides();
|
}
|
#ifdef BUILD_NAMEDTENSOR
|
// See impl::get_opt_names in ATen/NamedTensor.h for docs.
|
optional<DimnameList> opt_names() const {
|
return impl::get_opt_names(unsafeGetTensorImpl());
|
}
|
// See impl::get_names in ATen/NamedTensor.h for docs.
|
DimnameList names() const {
|
return impl::get_names(unsafeGetTensorImpl());
|
}
|
#endif
|
int64_t ndimension() const {
|
return dim();
|
}
|
bool is_contiguous(at::MemoryFormat memory_format=at::MemoryFormat::Contiguous) const {
|
return impl_->is_contiguous(memory_format);
|
}
|
|
at::MemoryFormat suggest_memory_format() const {
|
if (impl_->is_strides_like_channels_last()) {
|
return at::MemoryFormat::ChannelsLast;
|
}
|
return at::MemoryFormat::Contiguous;
|
}
|
|
// Total bytes consumed by the "view" of elements of the array. Does not
|
// include size of metadata. The number reported here does not necessarily
|
// correspond to the true physical memory consumed by a tensor; instead,
|
// it reports the memory the tensor would take *if* it were contiguous.
|
// Defined to be numel() * itemsize()
|
size_t nbytes() const {
|
return impl_->numel() * impl_->itemsize();
|
}
|
|
// Length of one array element in bytes. This is the traditional
|
// Numpy naming.
|
size_t itemsize() const {
|
return impl_->itemsize();
|
}
|
|
// Same as itemsize(). This is the PyTorch naming.
|
size_t element_size() const {
|
return impl_->itemsize();
|
}
|
|
DeprecatedTypeProperties & type() const {
|
return globalDeprecatedTypePropertiesRegistry().getDeprecatedTypeProperties(
|
tensorTypeIdToBackend(legacyExtractTypeId(type_set())),
|
scalar_type(),
|
is_variable());
|
}
|
TensorTypeSet type_set() const {
|
return impl_->type_set();
|
}
|
ScalarType scalar_type() const {
|
return typeMetaToScalarType(impl_->dtype());
|
}
|
bool has_storage() const {
|
return defined() && impl_->has_storage();
|
}
|
const Storage& storage() const {
|
return impl_->storage();
|
}
|
bool is_alias_of(const at::Tensor& other) const{
|
return impl_->storage().is_alias_of(other.storage());
|
}
|
Tensor toType(ScalarType t) const;
|
Tensor toBackend(Backend b) const;
|
|
/// Returns true if the `Tensor` is actually a `torch::autograd::Variable`.
|
/// Defined in Type.h because of include order issues.
|
bool is_variable() const noexcept {
|
return impl_->is_variable();
|
}
|
|
/// Returns a `Tensor`'s layout. Defined in Type.h
|
Layout layout() const noexcept;
|
|
/// Returns a `Tensor`'s dtype (`TypeMeta`). Defined in TensorMethods.h
|
caffe2::TypeMeta dtype() const noexcept;
|
|
/// Returns a `Tensor`'s device.
|
Device device() const;
|
|
/// Returns a `Tensor`'s device index.
|
int64_t get_device() const;
|
|
/// Returns if a `Tensor` has CUDA backend.
|
bool is_cuda() const;
|
|
/// Returns if a `Tensor` has HIP backend.
|
bool is_hip() const;
|
|
/// Returns if a `Tensor` has sparse backend.
|
bool is_sparse() const;
|
|
/// Returns if a `Tensor` is mkldnn tensor.
|
bool is_mkldnn() const;
|
|
/// Returns if a `Tensor` has quantized backend.
|
bool is_quantized() const;
|
|
/// If a tensor is a quantized tensor, returns its quantizer
|
/// TODO: it's not in native_functions.yaml yet as it's not exposed to python
|
QuantizerPtr quantizer() const;
|
|
#ifdef BUILD_NAMEDTENSOR
|
/// Returns if a `Tensor` has any dimension names
|
bool has_names() const;
|
|
/// Returns a `Tensor`'s dimension names data structure
|
const NamedTensorMeta* get_named_tensor_meta() const;
|
NamedTensorMeta* get_named_tensor_meta();
|
#endif
|
|
/// Returns the `TensorOptions` corresponding to this `Tensor`. Defined in
|
/// TensorOptions.h.
|
TensorOptions options() const;
|
|
void* data_ptr() const {
|
return this->unsafeGetTensorImpl()->data();
|
}
|
|
template <typename T>
|
T * data_ptr() const;
|
|
template<typename T>
|
C10_DEPRECATED_MESSAGE("Tensor.data<T>() is deprecated. Please use Tensor.data_ptr<T>() instead.")
|
T * data() const {
|
return data_ptr<T>();
|
}
|
|
template <typename T>
|
T item() const;
|
|
// Purposely not defined here to avoid inlining
|
void print() const;
|
|
// Return a `TensorAccessor` for CPU `Tensor`s. You have to specify scalar type and
|
// dimension.
|
template<typename T, size_t N>
|
TensorAccessor<T,N> accessor() const& {
|
static_assert(N > 0, "accessor is used for indexing tensor, for scalars use *data_ptr<T>()");
|
TORCH_CHECK(dim() == N, "expected ", N, " dims but tensor has ", dim());
|
return TensorAccessor<T,N>(data_ptr<T>(),sizes().data(),strides().data());
|
}
|
template<typename T, size_t N>
|
TensorAccessor<T,N> accessor() && = delete;
|
|
// Return a `GenericPackedTensorAccessor` for CUDA `Tensor`s. You have to specify scalar type and
|
// dimension. You can optionally specify RestrictPtrTraits as a template parameter to
|
// cast the data pointer to a __restrict__ pointer.
|
// In order to use this, your CUDA kernel has to take a corresponding GenericPackedTensorAccessor
|
// as an argument.
|
template<typename T, size_t N, template <typename U> class PtrTraits = DefaultPtrTraits, typename index_t = int64_t>
|
GenericPackedTensorAccessor<T,N,PtrTraits,index_t> generic_packed_accessor() const& {
|
static_assert(N > 0, "accessor is used for indexing tensor, for scalars use *data_ptr<T>()");
|
TORCH_CHECK(dim() == N, "expected ", N, " dims but tensor has ", dim());
|
return GenericPackedTensorAccessor<T,N,PtrTraits,index_t>(static_cast<typename PtrTraits<T>::PtrType>(data_ptr<T>()),sizes().data(),strides().data());
|
}
|
template<typename T, size_t N, template <typename U> class PtrTraits = DefaultPtrTraits, typename index_t = int64_t>
|
GenericPackedTensorAccessor<T,N> generic_packed_accessor() && = delete;
|
|
template<typename T, size_t N, template <typename U> class PtrTraits = DefaultPtrTraits>
|
PackedTensorAccessor32<T,N,PtrTraits> packed_accessor32() const& {
|
return generic_packed_accessor<T,N,PtrTraits,int32_t>();
|
}
|
template<typename T, size_t N, template <typename U> class PtrTraits = DefaultPtrTraits>
|
PackedTensorAccessor32<T,N,PtrTraits> packed_accessor32() && = delete;
|
|
template<typename T, size_t N, template <typename U> class PtrTraits = DefaultPtrTraits>
|
PackedTensorAccessor64<T,N,PtrTraits> packed_accessor64() const& {
|
return generic_packed_accessor<T,N,PtrTraits,int64_t>();
|
}
|
template<typename T, size_t N, template <typename U> class PtrTraits = DefaultPtrTraits>
|
PackedTensorAccessor64<T,N,PtrTraits> packed_accessor64() && = delete;
|
|
template<typename T, size_t N, template <typename U> class PtrTraits = DefaultPtrTraits, typename index_t = int64_t>
|
C10_DEPRECATED_MESSAGE("packed_accessor is deprecated, use packed_accessor32 or packed_accessor64 instead")
|
GenericPackedTensorAccessor<T,N,PtrTraits,index_t> packed_accessor() const & {
|
return generic_packed_accessor<T,N,PtrTraits,index_t>();
|
}
|
template<typename T, size_t N, template <typename U> class PtrTraits = DefaultPtrTraits, typename index_t = int64_t>
|
C10_DEPRECATED_MESSAGE("packed_accessor is deprecated, use packed_accessor32 or packed_accessor64 instead")
|
GenericPackedTensorAccessor<T,N,PtrTraits,index_t> packed_accessor() && = delete;
|
|
Tensor operator-() const;
|
Tensor& operator+=(const Tensor & other);
|
Tensor& operator+=(Scalar other);
|
Tensor& operator-=(const Tensor & other);
|
Tensor& operator-=(Scalar other);
|
Tensor& operator*=(const Tensor & other);
|
Tensor& operator*=(Scalar other);
|
Tensor& operator/=(const Tensor & other);
|
Tensor& operator/=(Scalar other);
|
Tensor operator[](Scalar index) const;
|
Tensor operator[](Tensor index) const;
|
Tensor operator[](int64_t index) const;
|
|
Tensor cpu() const;
|
Tensor cuda() const;
|
Tensor hip() const;
|
|
// ~~~~~ Autograd API ~~~~~
|
|
Tensor& set_requires_grad(bool requires_grad) {
|
impl_->set_requires_grad(requires_grad);
|
return *this;
|
}
|
bool requires_grad() const {
|
return impl_->requires_grad();
|
}
|
|
Tensor& grad() {
|
return impl_->grad();
|
}
|
const Tensor& grad() const {
|
return impl_->grad();
|
}
|
|
// STOP. Thinking of adding a method here, which only makes use
|
// of other ATen methods? Define it in native_functions.yaml.
|
|
//example
|
//Tensor * add(Tensor & b);
|
void backward(const Tensor & gradient={}, bool keep_graph=false, bool create_graph=false) const;
|
void set_data(const Tensor & new_data) const;
|
Tensor data() const;
|
bool is_leaf() const;
|
int64_t output_nr() const;
|
int64_t _version() const;
|
#ifdef BUILD_NAMEDTENSOR
|
Tensor & rename_(c10::optional<DimnameList> names) const;
|
#endif
|
#ifdef BUILD_NAMEDTENSOR
|
Tensor rename(c10::optional<DimnameList> names) const;
|
#endif
|
#ifdef BUILD_NAMEDTENSOR
|
Tensor align_to(DimnameList names) const;
|
#endif
|
#ifdef BUILD_NAMEDTENSOR
|
Tensor align_as(const Tensor & other) const;
|
#endif
|
#ifdef BUILD_NAMEDTENSOR
|
Tensor refine_names(DimnameList names) const;
|
#endif
|
#ifdef BUILD_NAMEDTENSOR
|
Tensor unflatten(Dimname dim, IntArrayRef sizes, DimnameList names) const;
|
#endif
|
#ifdef BUILD_NAMEDTENSOR
|
Tensor unflatten(int64_t dim, IntArrayRef sizes, DimnameList names) const;
|
#endif
|
Tensor abs() const;
|
Tensor & abs_() const;
|
Tensor acos() const;
|
Tensor & acos_() const;
|
Tensor add(const Tensor & other, Scalar alpha=1) const;
|
Tensor & add_(const Tensor & other, Scalar alpha=1) const;
|
Tensor add(Scalar other, Scalar alpha=1) const;
|
Tensor & add_(Scalar other, Scalar alpha=1) const;
|
Tensor addmv(const Tensor & mat, const Tensor & vec, Scalar beta=1, Scalar alpha=1) const;
|
Tensor & addmv_(const Tensor & mat, const Tensor & vec, Scalar beta=1, Scalar alpha=1) const;
|
Tensor addr(const Tensor & vec1, const Tensor & vec2, Scalar beta=1, Scalar alpha=1) const;
|
Tensor & addr_(const Tensor & vec1, const Tensor & vec2, Scalar beta=1, Scalar alpha=1) const;
|
Tensor all(int64_t dim, bool keepdim=false) const;
|
#ifdef BUILD_NAMEDTENSOR
|
Tensor all(Dimname dim, bool keepdim=false) const;
|
#endif
|
bool allclose(const Tensor & other, double rtol=1e-05, double atol=1e-08, bool equal_nan=false) const;
|
Tensor any(int64_t dim, bool keepdim=false) const;
|
#ifdef BUILD_NAMEDTENSOR
|
Tensor any(Dimname dim, bool keepdim=false) const;
|
#endif
|
Tensor argmax(c10::optional<int64_t> dim=c10::nullopt, bool keepdim=false) const;
|
Tensor argmin(c10::optional<int64_t> dim=c10::nullopt, bool keepdim=false) const;
|
Tensor as_strided(IntArrayRef size, IntArrayRef stride, c10::optional<int64_t> storage_offset=c10::nullopt) const;
|
Tensor & as_strided_(IntArrayRef size, IntArrayRef stride, c10::optional<int64_t> storage_offset=c10::nullopt) const;
|
Tensor asin() const;
|
Tensor & asin_() const;
|
Tensor atan() const;
|
Tensor & atan_() const;
|
Tensor baddbmm(const Tensor & batch1, const Tensor & batch2, Scalar beta=1, Scalar alpha=1) const;
|
Tensor & baddbmm_(const Tensor & batch1, const Tensor & batch2, Scalar beta=1, Scalar alpha=1) const;
|
Tensor bernoulli(Generator * generator=nullptr) const;
|
Tensor & bernoulli_(const Tensor & p, Generator * generator=nullptr) const;
|
Tensor & bernoulli_(double p=0.5, Generator * generator=nullptr) const;
|
Tensor bernoulli(double p, Generator * generator=nullptr) const;
|
Tensor bincount(const Tensor & weights={}, int64_t minlength=0) const;
|
Tensor bitwise_not() const;
|
Tensor & bitwise_not_() const;
|
Tensor logical_not() const;
|
Tensor & logical_not_() const;
|
Tensor logical_xor(const Tensor & other) const;
|
Tensor & logical_xor_(const Tensor & other) const;
|
Tensor bmm(const Tensor & mat2) const;
|
Tensor ceil() const;
|
Tensor & ceil_() const;
|
std::vector<Tensor> chunk(int64_t chunks, int64_t dim=0) const;
|
Tensor clamp(c10::optional<Scalar> min=c10::nullopt, c10::optional<Scalar> max=c10::nullopt) const;
|
Tensor & clamp_(c10::optional<Scalar> min=c10::nullopt, c10::optional<Scalar> max=c10::nullopt) const;
|
Tensor clamp_max(Scalar max) const;
|
Tensor & clamp_max_(Scalar max) const;
|
Tensor clamp_min(Scalar min) const;
|
Tensor & clamp_min_(Scalar min) const;
|
Tensor contiguous(MemoryFormat memory_format=MemoryFormat::Contiguous) const;
|
Tensor & copy_(const Tensor & src, bool non_blocking=false) const;
|
Tensor cos() const;
|
Tensor & cos_() const;
|
Tensor cosh() const;
|
Tensor & cosh_() const;
|
Tensor cumsum(int64_t dim, c10::optional<ScalarType> dtype=c10::nullopt) const;
|
#ifdef BUILD_NAMEDTENSOR
|
Tensor cumsum(Dimname dim, c10::optional<ScalarType> dtype=c10::nullopt) const;
|
#endif
|
Tensor cumprod(int64_t dim, c10::optional<ScalarType> dtype=c10::nullopt) const;
|
#ifdef BUILD_NAMEDTENSOR
|
Tensor cumprod(Dimname dim, c10::optional<ScalarType> dtype=c10::nullopt) const;
|
#endif
|
Tensor det() const;
|
Tensor diag_embed(int64_t offset=0, int64_t dim1=-2, int64_t dim2=-1) const;
|
Tensor diagflat(int64_t offset=0) const;
|
Tensor diagonal(int64_t offset=0, int64_t dim1=0, int64_t dim2=1) const;
|
Tensor & fill_diagonal_(Scalar fill_value, bool wrap=false) const;
|
Tensor div(const Tensor & other) const;
|
Tensor & div_(const Tensor & other) const;
|
Tensor div(Scalar other) const;
|
Tensor & div_(Scalar other) const;
|
Tensor dot(const Tensor & tensor) const;
|
Tensor new_empty(IntArrayRef size, const TensorOptions & options={}) const;
|
Tensor new_full(IntArrayRef size, Scalar fill_value, const TensorOptions & options={}) const;
|
Tensor & resize_(IntArrayRef size) const;
|
Tensor erf() const;
|
Tensor & erf_() const;
|
Tensor erfc() const;
|
Tensor & erfc_() const;
|
Tensor exp() const;
|
Tensor & exp_() const;
|
Tensor expm1() const;
|
Tensor & expm1_() const;
|
Tensor expand(IntArrayRef size, bool implicit=false) const;
|
Tensor expand_as(const Tensor & other) const;
|
Tensor flatten(int64_t start_dim=0, int64_t end_dim=-1) const;
|
#ifdef BUILD_NAMEDTENSOR
|
Tensor flatten(int64_t start_dim, int64_t end_dim, Dimname out_dim) const;
|
#endif
|
#ifdef BUILD_NAMEDTENSOR
|
Tensor flatten(Dimname start_dim, Dimname end_dim, Dimname out_dim) const;
|
#endif
|
#ifdef BUILD_NAMEDTENSOR
|
Tensor flatten(DimnameList dims, Dimname out_dim) const;
|
#endif
|
Tensor & fill_(Scalar value) const;
|
Tensor & fill_(const Tensor & value) const;
|
Tensor floor() const;
|
Tensor & floor_() const;
|
Tensor frac() const;
|
Tensor & frac_() const;
|
Tensor ger(const Tensor & vec2) const;
|
Tensor fft(int64_t signal_ndim, bool normalized=false) const;
|
Tensor ifft(int64_t signal_ndim, bool normalized=false) const;
|
Tensor rfft(int64_t signal_ndim, bool normalized=false, bool onesided=true) const;
|
Tensor irfft(int64_t signal_ndim, bool normalized=false, bool onesided=true, IntArrayRef signal_sizes={}) const;
|
Tensor index(TensorList indices) const;
|
Tensor & index_copy_(int64_t dim, const Tensor & index, const Tensor & source) const;
|
Tensor index_copy(int64_t dim, const Tensor & index, const Tensor & source) const;
|
#ifdef BUILD_NAMEDTENSOR
|
Tensor & index_copy_(Dimname dim, const Tensor & index, const Tensor & source) const;
|
#endif
|
#ifdef BUILD_NAMEDTENSOR
|
Tensor index_copy(Dimname dim, const Tensor & index, const Tensor & source) const;
|
#endif
|
Tensor & index_put_(TensorList indices, const Tensor & values, bool accumulate=false) const;
|
Tensor index_put(TensorList indices, const Tensor & values, bool accumulate=false) const;
|
Tensor inverse() const;
|
Tensor isclose(const Tensor & other, double rtol=1e-05, double atol=1e-08, bool equal_nan=false) const;
|
bool is_distributed() const;
|
bool is_floating_point() const;
|
bool is_complex() const;
|
bool is_nonzero() const;
|
bool is_same_size(const Tensor & other) const;
|
bool is_signed() const;
|
std::tuple<Tensor,Tensor> kthvalue(int64_t k, int64_t dim=-1, bool keepdim=false) const;
|
#ifdef BUILD_NAMEDTENSOR
|
std::tuple<Tensor,Tensor> kthvalue(int64_t k, Dimname dim, bool keepdim=false) const;
|
#endif
|
Tensor log() const;
|
Tensor & log_() const;
|
Tensor log10() const;
|
Tensor & log10_() const;
|
Tensor log1p() const;
|
Tensor & log1p_() const;
|
Tensor log2() const;
|
Tensor & log2_() const;
|
Tensor logdet() const;
|
Tensor log_softmax(int64_t dim, c10::optional<ScalarType> dtype=c10::nullopt) const;
|
#ifdef BUILD_NAMEDTENSOR
|
Tensor log_softmax(Dimname dim, c10::optional<ScalarType> dtype=c10::nullopt) const;
|
#endif
|
Tensor logsumexp(IntArrayRef dim, bool keepdim=false) const;
|
#ifdef BUILD_NAMEDTENSOR
|
Tensor logsumexp(DimnameList dim, bool keepdim=false) const;
|
#endif
|
Tensor matmul(const Tensor & other) const;
|
Tensor matrix_power(int64_t n) const;
|
std::tuple<Tensor,Tensor> max(int64_t dim, bool keepdim=false) const;
|
Tensor max_values(IntArrayRef dim, bool keepdim=false) const;
|
#ifdef BUILD_NAMEDTENSOR
|
std::tuple<Tensor,Tensor> max(Dimname dim, bool keepdim=false) const;
|
#endif
|
#ifdef BUILD_NAMEDTENSOR
|
Tensor max_values(DimnameList dim, bool keepdim=false) const;
|
#endif
|
Tensor mean(c10::optional<ScalarType> dtype=c10::nullopt) const;
|
Tensor mean(IntArrayRef dim, bool keepdim=false, c10::optional<ScalarType> dtype=c10::nullopt) const;
|
#ifdef BUILD_NAMEDTENSOR
|
Tensor mean(DimnameList dim, bool keepdim=false, c10::optional<ScalarType> dtype=c10::nullopt) const;
|
#endif
|
std::tuple<Tensor,Tensor> median(int64_t dim, bool keepdim=false) const;
|
#ifdef BUILD_NAMEDTENSOR
|
std::tuple<Tensor,Tensor> median(Dimname dim, bool keepdim=false) const;
|
#endif
|
std::tuple<Tensor,Tensor> min(int64_t dim, bool keepdim=false) const;
|
Tensor min_values(IntArrayRef dim, bool keepdim=false) const;
|
#ifdef BUILD_NAMEDTENSOR
|
std::tuple<Tensor,Tensor> min(Dimname dim, bool keepdim=false) const;
|
#endif
|
#ifdef BUILD_NAMEDTENSOR
|
Tensor min_values(DimnameList dim, bool keepdim=false) const;
|
#endif
|
Tensor mm(const Tensor & mat2) const;
|
std::tuple<Tensor,Tensor> mode(int64_t dim=-1, bool keepdim=false) const;
|
#ifdef BUILD_NAMEDTENSOR
|
std::tuple<Tensor,Tensor> mode(Dimname dim, bool keepdim=false) const;
|
#endif
|
Tensor mul(const Tensor & other) const;
|
Tensor & mul_(const Tensor & other) const;
|
Tensor mul(Scalar other) const;
|
Tensor & mul_(Scalar other) const;
|
Tensor mv(const Tensor & vec) const;
|
Tensor mvlgamma(int64_t p) const;
|
Tensor & mvlgamma_(int64_t p) const;
|
Tensor narrow_copy(int64_t dim, int64_t start, int64_t length) const;
|
Tensor narrow(int64_t dim, int64_t start, int64_t length) const;
|
Tensor permute(IntArrayRef dims) const;
|
Tensor numpy_T() const;
|
bool is_pinned() const;
|
Tensor pin_memory() const;
|
Tensor pinverse(double rcond=1e-15) const;
|
Tensor reciprocal() const;
|
Tensor & reciprocal_() const;
|
Tensor neg() const;
|
Tensor & neg_() const;
|
Tensor repeat(IntArrayRef repeats) const;
|
Tensor repeat_interleave(const Tensor & repeats, c10::optional<int64_t> dim=c10::nullopt) const;
|
Tensor repeat_interleave(int64_t repeats, c10::optional<int64_t> dim=c10::nullopt) const;
|
Tensor reshape(IntArrayRef shape) const;
|
Tensor reshape_as(const Tensor & other) const;
|
Tensor round() const;
|
Tensor & round_() const;
|
Tensor relu() const;
|
Tensor & relu_() const;
|
Tensor prelu(const Tensor & weight) const;
|
std::tuple<Tensor,Tensor> prelu_backward(const Tensor & grad_output, const Tensor & weight) const;
|
Tensor hardshrink(Scalar lambd=0.5) const;
|
Tensor hardshrink_backward(const Tensor & grad_out, Scalar lambd) const;
|
Tensor rsqrt() const;
|
Tensor & rsqrt_() const;
|
#ifdef BUILD_NAMEDTENSOR
|
Tensor select(Dimname dim, int64_t index) const;
|
#endif
|
Tensor select(int64_t dim, int64_t index) const;
|
Tensor sigmoid() const;
|
Tensor & sigmoid_() const;
|
Tensor sin() const;
|
Tensor & sin_() const;
|
Tensor sinh() const;
|
Tensor & sinh_() const;
|
Tensor detach() const;
|
Tensor & detach_() const;
|
int64_t size(int64_t dim) const;
|
#ifdef BUILD_NAMEDTENSOR
|
int64_t size(Dimname dim) const;
|
#endif
|
Tensor slice(int64_t dim=0, int64_t start=0, int64_t end=9223372036854775807, int64_t step=1) const;
|
std::tuple<Tensor,Tensor> slogdet() const;
|
Tensor smm(const Tensor & mat2) const;
|
Tensor softmax(int64_t dim, c10::optional<ScalarType> dtype=c10::nullopt) const;
|
#ifdef BUILD_NAMEDTENSOR
|
Tensor softmax(Dimname dim, c10::optional<ScalarType> dtype=c10::nullopt) const;
|
#endif
|
std::vector<Tensor> split(int64_t split_size, int64_t dim=0) const;
|
std::vector<Tensor> split_with_sizes(IntArrayRef split_sizes, int64_t dim=0) const;
|
Tensor squeeze() const;
|
Tensor squeeze(int64_t dim) const;
|
#ifdef BUILD_NAMEDTENSOR
|
Tensor squeeze(Dimname dim) const;
|
#endif
|
Tensor & squeeze_() const;
|
Tensor & squeeze_(int64_t dim) const;
|
#ifdef BUILD_NAMEDTENSOR
|
Tensor & squeeze_(Dimname dim) const;
|
#endif
|
Tensor sspaddmm(const Tensor & mat1, const Tensor & mat2, Scalar beta=1, Scalar alpha=1) const;
|
Tensor stft(int64_t n_fft, c10::optional<int64_t> hop_length=c10::nullopt, c10::optional<int64_t> win_length=c10::nullopt, const Tensor & window={}, bool normalized=false, bool onesided=true) const;
|
int64_t stride(int64_t dim) const;
|
#ifdef BUILD_NAMEDTENSOR
|
int64_t stride(Dimname dim) const;
|
#endif
|
Tensor sum(c10::optional<ScalarType> dtype=c10::nullopt) const;
|
Tensor sum(IntArrayRef dim, bool keepdim=false, c10::optional<ScalarType> dtype=c10::nullopt) const;
|
#ifdef BUILD_NAMEDTENSOR
|
Tensor sum(DimnameList dim, bool keepdim=false, c10::optional<ScalarType> dtype=c10::nullopt) const;
|
#endif
|
Tensor sum_to_size(IntArrayRef size) const;
|
Tensor sqrt() const;
|
Tensor & sqrt_() const;
|
Tensor std(bool unbiased=true) const;
|
Tensor std(IntArrayRef dim, bool unbiased=true, bool keepdim=false) const;
|
#ifdef BUILD_NAMEDTENSOR
|
Tensor std(DimnameList dim, bool unbiased=true, bool keepdim=false) const;
|
#endif
|
Tensor prod(c10::optional<ScalarType> dtype=c10::nullopt) const;
|
Tensor prod(int64_t dim, bool keepdim=false, c10::optional<ScalarType> dtype=c10::nullopt) const;
|
#ifdef BUILD_NAMEDTENSOR
|
Tensor prod(Dimname dim, bool keepdim=false, c10::optional<ScalarType> dtype=c10::nullopt) const;
|
#endif
|
Tensor t() const;
|
Tensor & t_() const;
|
Tensor tan() const;
|
Tensor & tan_() const;
|
Tensor tanh() const;
|
Tensor & tanh_() const;
|
Tensor transpose(int64_t dim0, int64_t dim1) const;
|
#ifdef BUILD_NAMEDTENSOR
|
Tensor transpose(Dimname dim0, Dimname dim1) const;
|
#endif
|
Tensor & transpose_(int64_t dim0, int64_t dim1) const;
|
Tensor flip(IntArrayRef dims) const;
|
Tensor roll(IntArrayRef shifts, IntArrayRef dims={}) const;
|
Tensor rot90(int64_t k=1, IntArrayRef dims={0,1}) const;
|
Tensor trunc() const;
|
Tensor & trunc_() const;
|
Tensor type_as(const Tensor & other) const;
|
Tensor unsqueeze(int64_t dim) const;
|
Tensor & unsqueeze_(int64_t dim) const;
|
Tensor var(bool unbiased=true) const;
|
Tensor var(IntArrayRef dim, bool unbiased=true, bool keepdim=false) const;
|
#ifdef BUILD_NAMEDTENSOR
|
Tensor var(DimnameList dim, bool unbiased=true, bool keepdim=false) const;
|
#endif
|
Tensor view_as(const Tensor & other) const;
|
Tensor where(const Tensor & condition, const Tensor & other) const;
|
Tensor norm(c10::optional<Scalar> p, ScalarType dtype) const;
|
Tensor norm(Scalar p=2) const;
|
Tensor norm(c10::optional<Scalar> p, IntArrayRef dim, bool keepdim, ScalarType dtype) const;
|
Tensor norm(c10::optional<Scalar> p, IntArrayRef dim, bool keepdim=false) const;
|
#ifdef BUILD_NAMEDTENSOR
|
Tensor norm(c10::optional<Scalar> p, DimnameList dim, bool keepdim, ScalarType dtype) const;
|
#endif
|
#ifdef BUILD_NAMEDTENSOR
|
Tensor norm(c10::optional<Scalar> p, DimnameList dim, bool keepdim=false) const;
|
#endif
|
Tensor clone() const;
|
Tensor & resize_as_(const Tensor & the_template) const;
|
Tensor pow(Scalar exponent) const;
|
Tensor & zero_() const;
|
Tensor sub(const Tensor & other, Scalar alpha=1) const;
|
Tensor & sub_(const Tensor & other, Scalar alpha=1) const;
|
Tensor sub(Scalar other, Scalar alpha=1) const;
|
Tensor & sub_(Scalar other, Scalar alpha=1) const;
|
Tensor addmm(const Tensor & mat1, const Tensor & mat2, Scalar beta=1, Scalar alpha=1) const;
|
Tensor & addmm_(const Tensor & mat1, const Tensor & mat2, Scalar beta=1, Scalar alpha=1) const;
|
Tensor & sparse_resize_(IntArrayRef size, int64_t sparse_dim, int64_t dense_dim) const;
|
Tensor & sparse_resize_and_clear_(IntArrayRef size, int64_t sparse_dim, int64_t dense_dim) const;
|
Tensor sparse_mask(const Tensor & mask) const;
|
Tensor to_dense() const;
|
int64_t sparse_dim() const;
|
int64_t _dimI() const;
|
int64_t dense_dim() const;
|
int64_t _dimV() const;
|
int64_t _nnz() const;
|
Tensor coalesce() const;
|
bool is_coalesced() const;
|
Tensor _indices() const;
|
Tensor _values() const;
|
Tensor & _coalesced_(bool coalesced) const;
|
Tensor indices() const;
|
Tensor values() const;
|
int64_t numel() const;
|
std::vector<Tensor> unbind(int64_t dim=0) const;
|
#ifdef BUILD_NAMEDTENSOR
|
std::vector<Tensor> unbind(Dimname dim) const;
|
#endif
|
Tensor to_sparse(int64_t sparse_dim) const;
|
Tensor to_sparse() const;
|
Tensor to_mkldnn() const;
|
Tensor dequantize() const;
|
double q_scale() const;
|
int64_t q_zero_point() const;
|
Tensor q_per_channel_scales() const;
|
Tensor q_per_channel_zero_points() const;
|
int64_t q_per_channel_axis() const;
|
Tensor int_repr() const;
|
QScheme qscheme() const;
|
Tensor to(const TensorOptions & options, bool non_blocking=false, bool copy=false) const;
|
Tensor to(Device device, ScalarType dtype, bool non_blocking=false, bool copy=false) const;
|
Tensor to(ScalarType dtype, bool non_blocking=false, bool copy=false) const;
|
Tensor to(const Tensor & other, bool non_blocking=false, bool copy=false) const;
|
Scalar item() const;
|
Tensor & set_(Storage source) const;
|
Tensor & set_(Storage source, int64_t storage_offset, IntArrayRef size, IntArrayRef stride={}) const;
|
Tensor & set_(const Tensor & source) const;
|
Tensor & set_() const;
|
Tensor & set_quantizer_(ConstQuantizerPtr quantizer) const;
|
bool is_set_to(const Tensor & tensor) const;
|
Tensor & masked_fill_(const Tensor & mask, Scalar value) const;
|
Tensor masked_fill(const Tensor & mask, Scalar value) const;
|
Tensor & masked_fill_(const Tensor & mask, const Tensor & value) const;
|
Tensor masked_fill(const Tensor & mask, const Tensor & value) const;
|
Tensor & masked_scatter_(const Tensor & mask, const Tensor & source) const;
|
Tensor masked_scatter(const Tensor & mask, const Tensor & source) const;
|
Tensor view(IntArrayRef size) const;
|
Tensor & put_(const Tensor & index, const Tensor & source, bool accumulate=false) const;
|
Tensor & index_add_(int64_t dim, const Tensor & index, const Tensor & source) const;
|
Tensor index_add(int64_t dim, const Tensor & index, const Tensor & source) const;
|
#ifdef BUILD_NAMEDTENSOR
|
Tensor index_add(Dimname dim, const Tensor & index, const Tensor & source) const;
|
#endif
|
Tensor & index_fill_(int64_t dim, const Tensor & index, Scalar value) const;
|
Tensor index_fill(int64_t dim, const Tensor & index, Scalar value) const;
|
Tensor & index_fill_(int64_t dim, const Tensor & index, const Tensor & value) const;
|
Tensor index_fill(int64_t dim, const Tensor & index, const Tensor & value) const;
|
#ifdef BUILD_NAMEDTENSOR
|
Tensor & index_fill_(Dimname dim, const Tensor & index, Scalar value) const;
|
#endif
|
#ifdef BUILD_NAMEDTENSOR
|
Tensor & index_fill_(Dimname dim, const Tensor & index, const Tensor & value) const;
|
#endif
|
#ifdef BUILD_NAMEDTENSOR
|
Tensor index_fill(Dimname dim, const Tensor & index, Scalar value) const;
|
#endif
|
#ifdef BUILD_NAMEDTENSOR
|
Tensor index_fill(Dimname dim, const Tensor & index, const Tensor & value) const;
|
#endif
|
Tensor & scatter_(int64_t dim, const Tensor & index, const Tensor & src) const;
|
Tensor scatter(int64_t dim, const Tensor & index, const Tensor & src) const;
|
Tensor & scatter_(int64_t dim, const Tensor & index, Scalar value) const;
|
Tensor scatter(int64_t dim, const Tensor & index, Scalar value) const;
|
#ifdef BUILD_NAMEDTENSOR
|
Tensor scatter(Dimname dim, const Tensor & index, const Tensor & src) const;
|
#endif
|
#ifdef BUILD_NAMEDTENSOR
|
Tensor scatter(Dimname dim, const Tensor & index, Scalar value) const;
|
#endif
|
Tensor & scatter_add_(int64_t dim, const Tensor & index, const Tensor & src) const;
|
Tensor scatter_add(int64_t dim, const Tensor & index, const Tensor & src) const;
|
#ifdef BUILD_NAMEDTENSOR
|
Tensor scatter_add(Dimname dim, const Tensor & index, const Tensor & src) const;
|
#endif
|
Tensor & lt_(Scalar other) const;
|
Tensor & lt_(const Tensor & other) const;
|
Tensor & gt_(Scalar other) const;
|
Tensor & gt_(const Tensor & other) const;
|
Tensor & le_(Scalar other) const;
|
Tensor & le_(const Tensor & other) const;
|
Tensor & ge_(Scalar other) const;
|
Tensor & ge_(const Tensor & other) const;
|
Tensor & eq_(Scalar other) const;
|
Tensor & eq_(const Tensor & other) const;
|
Tensor & ne_(Scalar other) const;
|
Tensor & ne_(const Tensor & other) const;
|
Tensor __and__(Scalar other) const;
|
Tensor __and__(const Tensor & other) const;
|
Tensor & __iand__(Scalar other) const;
|
Tensor & __iand__(const Tensor & other) const;
|
Tensor __or__(Scalar other) const;
|
Tensor __or__(const Tensor & other) const;
|
Tensor & __ior__(Scalar other) const;
|
Tensor & __ior__(const Tensor & other) const;
|
Tensor __xor__(Scalar other) const;
|
Tensor __xor__(const Tensor & other) const;
|
Tensor & __ixor__(Scalar other) const;
|
Tensor & __ixor__(const Tensor & other) const;
|
Tensor __lshift__(Scalar other) const;
|
Tensor __lshift__(const Tensor & other) const;
|
Tensor & __ilshift__(Scalar other) const;
|
Tensor & __ilshift__(const Tensor & other) const;
|
Tensor __rshift__(Scalar other) const;
|
Tensor __rshift__(const Tensor & other) const;
|
Tensor & __irshift__(Scalar other) const;
|
Tensor & __irshift__(const Tensor & other) const;
|
Tensor & lgamma_() const;
|
Tensor & atan2_(const Tensor & other) const;
|
Tensor & tril_(int64_t diagonal=0) const;
|
Tensor & triu_(int64_t diagonal=0) const;
|
Tensor & digamma_() const;
|
Tensor & polygamma_(int64_t n) const;
|
Tensor & renorm_(Scalar p, int64_t dim, Scalar maxnorm) const;
|
Tensor & pow_(Scalar exponent) const;
|
Tensor & pow_(const Tensor & exponent) const;
|
Tensor & lerp_(const Tensor & end, Scalar weight) const;
|
Tensor & lerp_(const Tensor & end, const Tensor & weight) const;
|
Tensor & fmod_(Scalar other) const;
|
Tensor & fmod_(const Tensor & other) const;
|
Tensor & remainder_(Scalar other) const;
|
Tensor & remainder_(const Tensor & other) const;
|
Tensor & addbmm_(const Tensor & batch1, const Tensor & batch2, Scalar beta=1, Scalar alpha=1) const;
|
Tensor addbmm(const Tensor & batch1, const Tensor & batch2, Scalar beta=1, Scalar alpha=1) const;
|
Tensor & addcdiv_(const Tensor & tensor1, const Tensor & tensor2, Scalar value=1) const;
|
Tensor & random_(int64_t from, int64_t to, Generator * generator=nullptr) const;
|
Tensor & random_(int64_t to, Generator * generator=nullptr) const;
|
Tensor & random_(Generator * generator=nullptr) const;
|
Tensor & uniform_(double from=0, double to=1, Generator * generator=nullptr) const;
|
Tensor & normal_(double mean=0, double std=1, Generator * generator=nullptr) const;
|
Tensor & cauchy_(double median=0, double sigma=1, Generator * generator=nullptr) const;
|
Tensor & log_normal_(double mean=1, double std=2, Generator * generator=nullptr) const;
|
Tensor & exponential_(double lambd=1, Generator * generator=nullptr) const;
|
Tensor & geometric_(double p, Generator * generator=nullptr) const;
|
Tensor diag(int64_t diagonal=0) const;
|
Tensor cross(const Tensor & other, c10::optional<int64_t> dim=c10::nullopt) const;
|
Tensor triu(int64_t diagonal=0) const;
|
Tensor tril(int64_t diagonal=0) const;
|
Tensor trace() const;
|
Tensor ne(Scalar other) const;
|
Tensor ne(const Tensor & other) const;
|
Tensor eq(Scalar other) const;
|
Tensor eq(const Tensor & other) const;
|
Tensor ge(Scalar other) const;
|
Tensor ge(const Tensor & other) const;
|
Tensor le(Scalar other) const;
|
Tensor le(const Tensor & other) const;
|
Tensor gt(Scalar other) const;
|
Tensor gt(const Tensor & other) const;
|
Tensor lt(Scalar other) const;
|
Tensor lt(const Tensor & other) const;
|
Tensor take(const Tensor & index) const;
|
Tensor index_select(int64_t dim, const Tensor & index) const;
|
#ifdef BUILD_NAMEDTENSOR
|
Tensor index_select(Dimname dim, const Tensor & index) const;
|
#endif
|
Tensor masked_select(const Tensor & mask) const;
|
Tensor nonzero() const;
|
std::vector<Tensor> nonzero_numpy() const;
|
Tensor gather(int64_t dim, const Tensor & index, bool sparse_grad=false) const;
|
#ifdef BUILD_NAMEDTENSOR
|
Tensor gather(Dimname dim, const Tensor & index, bool sparse_grad=false) const;
|
#endif
|
Tensor addcmul(const Tensor & tensor1, const Tensor & tensor2, Scalar value=1) const;
|
Tensor & addcmul_(const Tensor & tensor1, const Tensor & tensor2, Scalar value=1) const;
|
Tensor addcdiv(const Tensor & tensor1, const Tensor & tensor2, Scalar value=1) const;
|
std::tuple<Tensor,Tensor> lstsq(const Tensor & A) const;
|
std::tuple<Tensor,Tensor> triangular_solve(const Tensor & A, bool upper=true, bool transpose=false, bool unitriangular=false) const;
|
std::tuple<Tensor,Tensor> symeig(bool eigenvectors=false, bool upper=true) const;
|
std::tuple<Tensor,Tensor> eig(bool eigenvectors=false) const;
|
std::tuple<Tensor,Tensor,Tensor> svd(bool some=true, bool compute_uv=true) const;
|
Tensor cholesky(bool upper=false) const;
|
Tensor cholesky_solve(const Tensor & input2, bool upper=false) const;
|
std::tuple<Tensor,Tensor> solve(const Tensor & A) const;
|
Tensor cholesky_inverse(bool upper=false) const;
|
std::tuple<Tensor,Tensor> qr(bool some=true) const;
|
std::tuple<Tensor,Tensor> geqrf() const;
|
Tensor orgqr(const Tensor & input2) const;
|
Tensor ormqr(const Tensor & input2, const Tensor & input3, bool left=true, bool transpose=false) const;
|
Tensor lu_solve(const Tensor & LU_data, const Tensor & LU_pivots) const;
|
Tensor multinomial(int64_t num_samples, bool replacement=false, Generator * generator=nullptr) const;
|
Tensor lgamma() const;
|
Tensor digamma() const;
|
Tensor polygamma(int64_t n) const;
|
Tensor erfinv() const;
|
Tensor & erfinv_() const;
|
Tensor sign() const;
|
Tensor & sign_() const;
|
Tensor dist(const Tensor & other, Scalar p=2) const;
|
Tensor atan2(const Tensor & other) const;
|
Tensor lerp(const Tensor & end, Scalar weight) const;
|
Tensor lerp(const Tensor & end, const Tensor & weight) const;
|
Tensor histc(int64_t bins=100, Scalar min=0, Scalar max=0) const;
|
Tensor fmod(Scalar other) const;
|
Tensor fmod(const Tensor & other) const;
|
Tensor remainder(Scalar other) const;
|
Tensor remainder(const Tensor & other) const;
|
Tensor min(const Tensor & other) const;
|
Tensor min() const;
|
Tensor max(const Tensor & other) const;
|
Tensor max() const;
|
Tensor median() const;
|
std::tuple<Tensor,Tensor> sort(int64_t dim=-1, bool descending=false) const;
|
#ifdef BUILD_NAMEDTENSOR
|
std::tuple<Tensor,Tensor> sort(Dimname dim, bool descending=false) const;
|
#endif
|
Tensor argsort(int64_t dim=-1, bool descending=false) const;
|
#ifdef BUILD_NAMEDTENSOR
|
Tensor argsort(Dimname dim, bool descending=false) const;
|
#endif
|
std::tuple<Tensor,Tensor> topk(int64_t k, int64_t dim=-1, bool largest=true, bool sorted=true) const;
|
Tensor all() const;
|
Tensor any() const;
|
Tensor renorm(Scalar p, int64_t dim, Scalar maxnorm) const;
|
Tensor unfold(int64_t dimension, int64_t size, int64_t step) const;
|
bool equal(const Tensor & other) const;
|
Tensor pow(const Tensor & exponent) const;
|
Tensor alias() const;
|
|
// We changed .dtype() to return a TypeMeta in #12766. Ideally, we want the
|
// at::kDouble and its friends to be TypeMeta's, but that hasn't happened yet.
|
// Before that change, we make this method to maintain BC for C++ usage like
|
// `x.to(y.dtype)`.
|
// TODO: remove following two after at::kDouble and its friends are TypeMeta's.
|
inline Tensor to(caffe2::TypeMeta type_meta, bool non_blocking=false, bool copy=false) const {
|
return this->to(/*scalar_type=*/typeMetaToScalarType(type_meta), non_blocking, copy);
|
}
|
inline Tensor to(Device device, caffe2::TypeMeta type_meta, bool non_blocking=false, bool copy=false) const {
|
return this->to(device, /*scalar_type=*/typeMetaToScalarType(type_meta), non_blocking, copy);
|
}
|
|
template <typename F, typename... Args>
|
auto m(F func, Args&&... params) const -> decltype(func(*this, std::forward<Args>(params)...)) {
|
return func(*this, std::forward<Args>(params)...);
|
}
|
|
protected:
|
friend class ::caffe2::Tensor;
|
|
void enforce_invariants();
|
c10::intrusive_ptr<TensorImpl, UndefinedTensorImpl> impl_;
|
};
|
|
namespace detail {
|
// Helper creator for Tensor class which doesn't requires the users to pass
|
// in an intrusive_ptr instead it just converts the argument passed to
|
// requested intrusive_ptr type.
|
template <typename T, typename... Args>
|
Tensor make_tensor(Args&&... args) {
|
return Tensor(c10::make_intrusive<T>(std::forward<Args>(args)...));
|
}
|
|
} // namespace detail
|
|
static inline TensorTypeId legacyExtractTypeId(const Tensor& t) {
|
return legacyExtractTypeId(t.type_set());
|
}
|
|
} // namespace at
|