Skip to content

Commit

Permalink
Fix a bug
Browse files Browse the repository at this point in the history
  • Loading branch information
Zhongchuan Sun committed Dec 28, 2019
1 parent 7182ea3 commit bef6114
Showing 1 changed file with 9 additions and 4 deletions.
13 changes: 9 additions & 4 deletions model/general_recommender/MLP.py
Original file line number Diff line number Diff line change
Expand Up @@ -51,6 +51,9 @@ def _create_variables(self):
self.mlp_embedding_item = tf.Variable(initializer([self.num_items, int(self.layers[0]/2)]),
name="mlp_embedding_item", dtype=tf.float32)

self.dense_layer = [tf.layers.Dense(units=n_units, activation=tf.nn.relu, name="layer%d" % idx)
for idx, n_units in enumerate(self.layers)]

def _create_inference(self, item_input):
with tf.name_scope("inference"):
# Crucial to flatten an embedding vector!
Expand All @@ -59,9 +62,11 @@ def _create_inference(self, item_input):
# The 0-th layer is the concatenation of embedding layers
mlp_vector = tf.concat([mlp_user_latent, mlp_item_latent], axis=1)
# MLP layers
for idx in np.arange(len(self.layers)):
mlp_vector = tf.layers.dense(mlp_vector, units=self.layers[idx],
activation=tf.nn.relu, name="layer%d" % idx)
for layer in self.dense_layer:
mlp_vector = layer(mlp_vector)
# for idx in np.arange(len(self.layers)):
# mlp_vector = tf.layers.dense(mlp_vector, units=self.layers[idx],
# activation=tf.nn.relu, name="layer%d" % idx)

# Final prediction layer
predict = tf.reduce_sum(mlp_vector, 1)
Expand Down Expand Up @@ -123,7 +128,7 @@ def train_model(self):
if epoch % self.verbose == 0:
logger.info("epoch %d:\t%s" % (epoch, self.evaluate()))

@timer
# @timer
def evaluate(self):
return self.evaluator.evaluate(self)

Expand Down

0 comments on commit bef6114

Please # to comment.