#pragma once #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef _MSC_VER #ifdef Type #undef Type #endif #endif using namespace at; using namespace torch::autograd::generated; namespace torch { namespace autograd { inline void check_inplace(const Tensor& tensor) { auto& var = static_cast(tensor); if (var.requires_grad() && var.is_leaf() && GradMode::is_enabled()) { AT_ERROR( "a leaf Variable that requires grad has been used in an in-place operation."); } } inline void throw_error_out_requires_grad(const char* name) { AT_ERROR( name, "(): functions with out=... arguments don't support automatic differentiation, " "but one of the arguments requires grad."); } // TODO: Blegh, bare references inline void rebase_history(Variable& var, std::shared_ptr grad_fn) { if (grad_fn && var.defined()) { grad_fn->add_input_metadata(var); var.rebase_history({std::move(grad_fn), 0}); } } inline void rebase_history(std::vector&& vars, std::shared_ptr grad_fn) { if (grad_fn) { for (auto& var : vars) { if (var.defined()) { // TODO: eliminate const_cast auto output_nr = grad_fn->add_input_metadata(var); var.rebase_history({std::move(grad_fn), output_nr}); } else { grad_fn->add_input_metadata(Node::undefined_input()); } } } } inline void increment_version(Tensor & t) { as_variable_ref(t).bump_version(); } inline bool isFloatingPoint(ScalarType s) { return s == kFloat || s == kDouble || s == kHalf; } struct Flatten : IterArgs { Flatten(variable_list& out) : out(out) {} variable_list& out; void operator()(const at::Tensor& x) { out.emplace_back(x); } void operator()(at::ArrayRef xs) { out.insert(out.end(), xs.begin(), xs.end()); } }; template inline variable_list flatten_tensor_args(Args&&... args) { variable_list out; out.reserve(count_tensors(std::forward(args)...)); Flatten(out).apply(std::forward(args)...); return out; // RVO } // See NOTE [ Autograd View Variables ] for details. inline Tensor as_view(const Tensor & base, Tensor tensor, bool is_differentiable = true) { auto base_var = Variable(base); if (base_var.is_view()) { base_var = base_var.base(); } return make_variable_view(std::move(base_var), std::move(tensor), is_differentiable); } // See NOTE [ Autograd View Variables ] for details. inline std::vector as_view(const Tensor & base, std::vector tensors, bool is_differentiable = true) { auto base_var = Variable(base); if (base_var.is_view()) { base_var = base_var.base(); } for(Tensor &tensor : tensors) { tensor = make_variable_view(base_var, std::move(tensor), is_differentiable); } return tensors; } inline void check_no_requires_grad(const Tensor& tensor, const char* name) { auto& var = static_cast(tensor); if (var.defined() && var.requires_grad()) { std::string msg = "the derivative for '"; msg += name; msg += "' is not implemented"; throw std::runtime_error(msg); } } inline void check_no_requires_grad(TensorList tensors, const char* name) { for (auto& tensor : tensors) { check_no_requires_grad(tensor, name); } } // Assumed that saved tensor lists are never inplace outputs inline std::vector make_saved_variable_list(TensorList tensors) { return fmap(tensors, [](const Tensor& tensor) -> SavedVariable { return SavedVariable{tensor, false /* is output */}; }); } // NOTE: For now, there is no guarantee that the tensors returned from // out-of-place ATen ops are not Variables. For example, the following operators: // // 1. `coalesce()` (called from `VariableType::coalesce()`) // 2. `_embedding_bag_cpu()` (called from `VariableType::_embedding_bag()`) // // can return its input or tensors created using the input's options, which can // potentially be Variables because inputs to ATen ops can be Variables. // // In the near future, once we make every tensor a Variable, these two // `as_variable()` functions are no-op and we can remove them. inline Tensor as_variable(Tensor tensor) { return tensor.is_variable() ? tensor : make_variable(std::move(tensor), /*requires_grad=*/false); } inline std::vector as_variable(TensorList tl) { return fmap(tl, [](const Tensor& t) -> Tensor { return t.is_variable() ? t : make_variable(t, /*requires_grad=*/false); }); } template std::tuple as_variable_impl( std::tuple tensors, Indices) { // Expand the integer parameter pack into a sequence of Variable // constructions. This turns into (boolean omitted): // Variable(std::get<0>(tensors)), Variable(std::get<1>(tensors)), ... return std::tuple( as_variable(std::get(tensors))...); } // NB: Because this was not forward declared, recursive std::tuple won't work. // You can probably rejigger this to make it supported if you really need it. template std::tuple as_variable(std::tuple tensors) { // `sizeof...(Tensors)` gets us the size of the `Tensors` parameter pack at // compile time. We use it to parameterize a `MakeIndices` class, which will // expand into an Indices object containing the numbers 0 to // sizeof...(Tensors) - 1. return as_variable_impl( tensors, typename MakeIndices::indices()); } inline std::vector> to_args_sizes(TensorList tensors) { std::vector> args_sizes(tensors.size()); for (size_t i = 0; i < tensors.size(); ++i) { args_sizes[i] = tensors[i].sizes().vec(); } return args_sizes; } }} // namespace torch::autograd