加载中 sdfvae/trainer.py +2 −0 原始行号 差异行号 差异行 加载中 @@ -57,6 +57,8 @@ class Trainer(object): s_logvar, d_post_mean, d_post_logvar, d_prior_mean, d_prior_logvar): batch_size = original_seq.size(0) # See https://arxiv.org/pdf/1606.05908.pdf, Page 9, Section 2.2 for details. # The constant items in the loss function (not the coefficients) can be any number here, or even omitted # due to they have no any impact on gradientis propagation during training. So do in testing. # log(N(x|mu,var)) # = log{1/(sqrt(2*pi)*var)exp{-(x-mu)^2/(2*var^2)}} # = -0.5*{log(2*pi)+2*log(var)+[(x-mu)/exp{log(var)}]^2} 加载中 加载中
sdfvae/trainer.py +2 −0 原始行号 差异行号 差异行 加载中 @@ -57,6 +57,8 @@ class Trainer(object): s_logvar, d_post_mean, d_post_logvar, d_prior_mean, d_prior_logvar): batch_size = original_seq.size(0) # See https://arxiv.org/pdf/1606.05908.pdf, Page 9, Section 2.2 for details. # The constant items in the loss function (not the coefficients) can be any number here, or even omitted # due to they have no any impact on gradientis propagation during training. So do in testing. # log(N(x|mu,var)) # = log{1/(sqrt(2*pi)*var)exp{-(x-mu)^2/(2*var^2)}} # = -0.5*{log(2*pi)+2*log(var)+[(x-mu)/exp{log(var)}]^2} 加载中