加载中 sdfvae/trainer.py +4 −3 原始行号 差异行号 差异行 加载中 @@ -59,9 +59,10 @@ class Trainer(object): # See https://arxiv.org/pdf/1606.05908.pdf, Page 9, Section 2.2 for details. # The constant items in the loss function (not the coefficients) can be any number here, or even omitted # due to they have no any impact on gradientis propagation during training. So do in testing. # log(N(x|mu,var)) # = log{1/(sqrt(2*pi)*var)exp{-(x-mu)^2/(2*var^2)}} # = -0.5*{log(2*pi)+2*log(var)+[(x-mu)/exp{log(var)}]^2} # log(N(x|mu,sigma^2)) # = log{1/(sqrt(2*pi)*sigma)exp{-(x-mu)^2/(2*sigma^2)}} # = -0.5*{log(2*pi)+2*log(sigma)+[(x-mu)/exp{log(sigma)}]^2} # Note that var = sigma^2, so “recon_seq_logvar” is more appropriate to be called “recon_seq_logsigma”, but the name does not the matter loglikelihood = -0.5 * torch.sum(torch.pow(((original_seq.float()-recon_seq_mu.float())/torch.exp(recon_seq_logvar.float())), 2) + 2 * recon_seq_logvar.float() + np.log(np.pi*2)) 加载中 加载中
sdfvae/trainer.py +4 −3 原始行号 差异行号 差异行 加载中 @@ -59,9 +59,10 @@ class Trainer(object): # See https://arxiv.org/pdf/1606.05908.pdf, Page 9, Section 2.2 for details. # The constant items in the loss function (not the coefficients) can be any number here, or even omitted # due to they have no any impact on gradientis propagation during training. So do in testing. # log(N(x|mu,var)) # = log{1/(sqrt(2*pi)*var)exp{-(x-mu)^2/(2*var^2)}} # = -0.5*{log(2*pi)+2*log(var)+[(x-mu)/exp{log(var)}]^2} # log(N(x|mu,sigma^2)) # = log{1/(sqrt(2*pi)*sigma)exp{-(x-mu)^2/(2*sigma^2)}} # = -0.5*{log(2*pi)+2*log(sigma)+[(x-mu)/exp{log(sigma)}]^2} # Note that var = sigma^2, so “recon_seq_logvar” is more appropriate to be called “recon_seq_logsigma”, but the name does not the matter loglikelihood = -0.5 * torch.sum(torch.pow(((original_seq.float()-recon_seq_mu.float())/torch.exp(recon_seq_logvar.float())), 2) + 2 * recon_seq_logvar.float() + np.log(np.pi*2)) 加载中