@@ -51,6 +51,9 @@ def _create_variables(self):
51
51
self .mlp_embedding_item = tf .Variable (initializer ([self .num_items , int (self .layers [0 ]/ 2 )]),
52
52
name = "mlp_embedding_item" , dtype = tf .float32 )
53
53
54
+ self .dense_layer = [tf .layers .Dense (units = n_units , activation = tf .nn .relu , name = "layer%d" % idx )
55
+ for idx , n_units in enumerate (self .layers )]
56
+
54
57
def _create_inference (self , item_input ):
55
58
with tf .name_scope ("inference" ):
56
59
# Crucial to flatten an embedding vector!
@@ -59,9 +62,11 @@ def _create_inference(self, item_input):
59
62
# The 0-th layer is the concatenation of embedding layers
60
63
mlp_vector = tf .concat ([mlp_user_latent , mlp_item_latent ], axis = 1 )
61
64
# MLP layers
62
- for idx in np .arange (len (self .layers )):
63
- mlp_vector = tf .layers .dense (mlp_vector , units = self .layers [idx ],
64
- activation = tf .nn .relu , name = "layer%d" % idx )
65
+ for layer in self .dense_layer :
66
+ mlp_vector = layer (mlp_vector )
67
+ # for idx in np.arange(len(self.layers)):
68
+ # mlp_vector = tf.layers.dense(mlp_vector, units=self.layers[idx],
69
+ # activation=tf.nn.relu, name="layer%d" % idx)
65
70
66
71
# Final prediction layer
67
72
predict = tf .reduce_sum (mlp_vector , 1 )
@@ -123,7 +128,7 @@ def train_model(self):
123
128
if epoch % self .verbose == 0 :
124
129
logger .info ("epoch %d:\t %s" % (epoch , self .evaluate ()))
125
130
126
- @timer
131
+ # @timer
127
132
def evaluate (self ):
128
133
return self .evaluator .evaluate (self )
129
134
0 commit comments