Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 2 additions & 2 deletions modules/control/proc/leres/pix2pix/models/base_model.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
import gc
import torch as gc
import os
from abc import ABC, abstractmethod
from collections import OrderedDict
Expand Down Expand Up @@ -155,7 +155,7 @@ def save_networks(self, epoch):
save_path = os.path.join(self.save_dir, save_filename)
net = getattr(self, 'net' + name)

if len(self.gpu_ids) > 0 and torch.cuda.is_available():
if len(self.gpu_ids) > 0 and self.device.type == 'cuda':
torch.save(net.module.cpu().state_dict(), save_path)
net.cuda(self.gpu_ids[0])
else:
Expand Down
19 changes: 15 additions & 4 deletions modules/control/proc/leres/pix2pix/models/pix2pix4depth_model.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,7 @@
import torch
from .base_model import BaseModel
import os
import torch
from . import networks


Expand Down Expand Up @@ -72,8 +74,8 @@ def __init__(self, opt):
self.criterionGAN = networks.GANLoss(opt.gan_mode).to(self.device)
self.criterionL1 = torch.nn.L1Loss()
# initialize optimizers; schedulers will be automatically created by function <BaseModel.setup>.
self.optimizer_G = torch.optim.Adam(self.netG.parameters(), lr=1e-4, betas=(opt.beta1, 0.999))
self.optimizer_D = torch.optim.Adam(self.netD.parameters(), lr=2e-06, betas=(opt.beta1, 0.999))
self.optimizer_G = torch.optim.Adam(self.netG.parameters(), lr=0.0001, betas=(0.5, 0.999))
self.optimizer_D = torch.optim.Adam(self.netD.parameters(), lr=0.000002, betas=(0.5, 0.999))
self.optimizers.append(self.optimizer_G)
self.optimizers.append(self.optimizer_D)

Expand Down Expand Up @@ -103,6 +105,15 @@ def set_input(self, outer, inner):
inner = self.normalize(inner)
outer = self.normalize(outer)

outer = torch.from_numpy(outer).unsqueeze(0).unsqueeze(0)
inner = torch.from_numpy(inner).unsqueeze(0).unsqueeze(0)

inner = (inner - torch.min(inner))/(torch.max(inner)-torch.min(inner))
outer = (outer - torch.min(outer))/(torch.max(outer)-torch.min(outer))

inner = self.normalize(inner)
outer = self.normalize(outer)

self.real_A = torch.cat((outer, inner), 1).to(self.device)


Expand Down Expand Up @@ -134,11 +145,11 @@ def backward_G(self):
# First, G(A) should fake the discriminator
fake_AB = torch.cat((self.real_A, self.fake_B), 1)
pred_fake = self.netD(fake_AB)
self.loss_G_GAN = self.criterionGAN(pred_fake, True)
self.loss_G_GAN = self.criterionGAN(pred_fake, False)
# Second, G(A) = B
self.loss_G_L1 = self.criterionL1(self.fake_B, self.real_B) * self.opt.lambda_L1
# combine loss and calculate gradients
self.loss_G = self.loss_G_L1 + self.loss_G_GAN
self.loss_G = self.loss_G_GAN + self.loss_G_L1
self.loss_G.backward()

def optimize_parameters(self):
Expand Down