加载中 README.md +1 −1 原始行号 差异行号 差异行 加载中 @@ -8,7 +8,7 @@ This implementation is based on the paper this is UNOFFICIAL implementation. # Requirements - python 3 - python >= 3.5 - Tensorflow - Numpy - sklearn 加载中 README_ja.md +1 −1 原始行号 差异行号 差异行 加载中 @@ -9,7 +9,7 @@ DAGMM (Deep Autoencoding Gaussian Mixture Model) の Tensorflow 実装です。 ※この実装は論文著者とは無関係です。 # 動作要件 - python 3 - python >= 3.5 - Tensorflow - Numpy - sklearn 加载中 dagmm/compression_net.py +4 −4 原始行号 差异行号 差异行 加载中 @@ -35,12 +35,12 @@ class CompressionNet: for size in self.hidden_layer_sizes[:-1]: n_layer += 1 z = tf.layers.dense(z, size, activation=self.activation, name=f"layer_{n_layer}") name="layer_{}".format(n_layer)) # activation function of last layer is linear n_layer += 1 z = tf.layers.dense(z, self.hidden_layer_sizes[-1], name=f"layer_{n_layer}") name="layer_{}".format(n_layer)) return z 加载中 @@ -50,12 +50,12 @@ class CompressionNet: for size in self.hidden_layer_sizes[:-1][::-1]: n_layer += 1 z = tf.layers.dense(z, size, activation=self.activation, name=f"layer_{n_layer}") name="layer_{}".format(n_layer)) # activation function of last layes is linear n_layer += 1 x_dash = tf.layers.dense(z, self.input_size, name=f"layer_{n_layer}") name="layer_{}".format(n_layer)) return x_dash 加载中 dagmm/dagmm.py +1 −1 原始行号 差异行号 差异行 加载中 @@ -153,7 +153,7 @@ class DAGMM: if (epoch + 1) % 100 == 0: loss_val = self.sess.run(loss, feed_dict={input:x, drop:0}) print(f" epoch {epoch+1}/{self.epoch_size} : loss = {loss_val:.3f}") print(" epoch {}/{} : loss = {:.3f}".format(epoch + 1, self.epoch_size, loss_val)) # Fix GMM parameter fix = self.gmm.fix_op() 加载中 dagmm/estimation_net.py +2 −2 原始行号 差异行号 差异行 加载中 @@ -46,10 +46,10 @@ class EstimationNet: for size in self.hidden_layer_sizes[:-1]: n_layer += 1 z = tf.layers.dense(z, size, activation=self.activation, name=f"layer_{n_layer}") name="layer_{}".format(n_layer)) if dropout_ratio is not None: z = tf.layers.dropout(z, dropout_ratio, name=f"drop_{n_layer}") name="drop_{}".format(n_layer)) # Last layer uses linear function (=logits) size = self.hidden_layer_sizes[-1] 加载中 加载中
README.md +1 −1 原始行号 差异行号 差异行 加载中 @@ -8,7 +8,7 @@ This implementation is based on the paper this is UNOFFICIAL implementation. # Requirements - python 3 - python >= 3.5 - Tensorflow - Numpy - sklearn 加载中
README_ja.md +1 −1 原始行号 差异行号 差异行 加载中 @@ -9,7 +9,7 @@ DAGMM (Deep Autoencoding Gaussian Mixture Model) の Tensorflow 実装です。 ※この実装は論文著者とは無関係です。 # 動作要件 - python 3 - python >= 3.5 - Tensorflow - Numpy - sklearn 加载中
dagmm/compression_net.py +4 −4 原始行号 差异行号 差异行 加载中 @@ -35,12 +35,12 @@ class CompressionNet: for size in self.hidden_layer_sizes[:-1]: n_layer += 1 z = tf.layers.dense(z, size, activation=self.activation, name=f"layer_{n_layer}") name="layer_{}".format(n_layer)) # activation function of last layer is linear n_layer += 1 z = tf.layers.dense(z, self.hidden_layer_sizes[-1], name=f"layer_{n_layer}") name="layer_{}".format(n_layer)) return z 加载中 @@ -50,12 +50,12 @@ class CompressionNet: for size in self.hidden_layer_sizes[:-1][::-1]: n_layer += 1 z = tf.layers.dense(z, size, activation=self.activation, name=f"layer_{n_layer}") name="layer_{}".format(n_layer)) # activation function of last layes is linear n_layer += 1 x_dash = tf.layers.dense(z, self.input_size, name=f"layer_{n_layer}") name="layer_{}".format(n_layer)) return x_dash 加载中
dagmm/dagmm.py +1 −1 原始行号 差异行号 差异行 加载中 @@ -153,7 +153,7 @@ class DAGMM: if (epoch + 1) % 100 == 0: loss_val = self.sess.run(loss, feed_dict={input:x, drop:0}) print(f" epoch {epoch+1}/{self.epoch_size} : loss = {loss_val:.3f}") print(" epoch {}/{} : loss = {:.3f}".format(epoch + 1, self.epoch_size, loss_val)) # Fix GMM parameter fix = self.gmm.fix_op() 加载中
dagmm/estimation_net.py +2 −2 原始行号 差异行号 差异行 加载中 @@ -46,10 +46,10 @@ class EstimationNet: for size in self.hidden_layer_sizes[:-1]: n_layer += 1 z = tf.layers.dense(z, size, activation=self.activation, name=f"layer_{n_layer}") name="layer_{}".format(n_layer)) if dropout_ratio is not None: z = tf.layers.dropout(z, dropout_ratio, name=f"drop_{n_layer}") name="drop_{}".format(n_layer)) # Last layer uses linear function (=logits) size = self.hidden_layer_sizes[-1] 加载中