code/train_hopenet.py | ●●●●● 补丁 | 查看 | 原始文档 | blame | 历史 |
code/train_hopenet.py
@@ -184,7 +184,7 @@ loss_roll += alpha * loss_reg_roll loss_seq = [loss_yaw, loss_pitch, loss_roll] grad_seq = [torch.Tensor(1).cuda(gpu) for _ in range(len(loss_seq))] grad_seq = [torch.ones(1).cuda(gpu) for _ in range(len(loss_seq))] optimizer.zero_grad() torch.autograd.backward(loss_seq, grad_seq) optimizer.step()