未验证 提交 b339e6d1 编辑于 作者: Toshihiro NAKAE's avatar Toshihiro NAKAE 提交者: GitHub
浏览文件

Merge pull request #20 from tnakae/feature_support-python35

Supported python 3.5
加载中
加载中
加载中
加载中
+1 −1
原始行号 差异行号 差异行
@@ -8,7 +8,7 @@ This implementation is based on the paper
this is UNOFFICIAL implementation.

# Requirements
- python 3
- python >= 3.5
- Tensorflow
- Numpy
- sklearn
+1 −1
原始行号 差异行号 差异行
@@ -9,7 +9,7 @@ DAGMM (Deep Autoencoding Gaussian Mixture Model) の Tensorflow 実装です。
※この実装は論文著者とは無関係です。

# 動作要件
- python 3
- python >= 3.5
- Tensorflow
- Numpy
- sklearn
+4 −4
原始行号 差异行号 差异行
@@ -35,12 +35,12 @@ class CompressionNet:
            for size in self.hidden_layer_sizes[:-1]:
                n_layer += 1
                z = tf.layers.dense(z, size, activation=self.activation,
                    name=f"layer_{n_layer}")
                    name="layer_{}".format(n_layer))

            # activation function of last layer is linear
            n_layer += 1
            z = tf.layers.dense(z, self.hidden_layer_sizes[-1],
                name=f"layer_{n_layer}")
                name="layer_{}".format(n_layer))

        return z

@@ -50,12 +50,12 @@ class CompressionNet:
            for size in self.hidden_layer_sizes[:-1][::-1]:
                n_layer += 1
                z = tf.layers.dense(z, size, activation=self.activation,
                    name=f"layer_{n_layer}")
                    name="layer_{}".format(n_layer))

            # activation function of last layes is linear
            n_layer += 1
            x_dash = tf.layers.dense(z, self.input_size,
                name=f"layer_{n_layer}")
                name="layer_{}".format(n_layer))

        return x_dash

+1 −1
原始行号 差异行号 差异行
@@ -153,7 +153,7 @@ class DAGMM:

                if (epoch + 1) % 100 == 0:
                    loss_val = self.sess.run(loss, feed_dict={input:x, drop:0})
                    print(f" epoch {epoch+1}/{self.epoch_size} : loss = {loss_val:.3f}")
                    print(" epoch {}/{} : loss = {:.3f}".format(epoch + 1, self.epoch_size, loss_val))

            # Fix GMM parameter
            fix = self.gmm.fix_op()
+2 −2
原始行号 差异行号 差异行
@@ -46,10 +46,10 @@ class EstimationNet:
            for size in self.hidden_layer_sizes[:-1]:
                n_layer += 1
                z = tf.layers.dense(z, size, activation=self.activation,
                    name=f"layer_{n_layer}")
                    name="layer_{}".format(n_layer))
                if dropout_ratio is not None:
                    z = tf.layers.dropout(z, dropout_ratio,
                        name=f"drop_{n_layer}")
                        name="drop_{}".format(n_layer))

            # Last layer uses linear function (=logits)
            size = self.hidden_layer_sizes[-1]