def model_fn():
uid = tf.placeholder(tf.int32, shape=[None])
item = tf.placeholder(tf.int32, shape=[None])
y = tf.placeholder(tf.float32, shape=[None])
dropout_rate = tf.placeholder(tf.float32)
mf_user_emb = tf.get_variable(name='mf_user_emb',
initializer=tf.truncated_normal_initializer(mean=0, stddev=0.1),
shape=[USER_COUNT + 1, MF_EMB_SIZE],
trainable=True,
regularizer=None
)
mf_movie_emb = tf.get_variable(name='mf_movie_emb',
initializer=tf.truncated_normal_initializer(mean=0, stddev=0.1),
shape=[MOVIE_COUNT + 1, MF_EMB_SIZE],
trainable=True,
regularizer=None
)
mlp_user_emb = tf.get_variable(name='mlp_user_emb',
initializer=tf.truncated_normal_initializer(mean=0, stddev=0.1),
shape=[USER_COUNT + 1, MLP_EMB_SIZE],
trainable=True,
regularizer=None
)
mlp_movie_emb = tf.get_variable(name='mlp_movie_emb',
initializer=tf.truncated_normal_initializer(mean=0, stddev=0.1),
shape=[MOVIE_COUNT + 1, MLP_EMB_SIZE],
trainable=True,
regularizer=None)
user_mf_vec = tf.nn.embedding_lookup(mf_user_emb, uid)
item_mf_vec = tf.nn.embedding_lookup(mf_movie_emb, item)
mf_layer = tf.multiply(user_mf_vec, item_mf_vec)
mlp_user_vec = tf.nn.embedding_lookup(mlp_user_emb, uid)
mlp_movie_vec = tf.nn.embedding_lookup(mlp_movie_emb, item)
input = tf.concat([mlp_user_vec, mlp_movie_vec], axis=1)
#input = tf.multiply(mlp_user_vec, mlp_movie_vec)
layers = []
size = MLP_EMB_SIZE
for i in range(2):
l = tf.keras.layers.Dense(size,
activation=tf.nn.relu,
bias_regularizer=tf.keras.regularizers.l2(L2_LAMBDA),
kernel_regularizer=tf.keras.regularizers.l2(L2_LAMBDA),
kernel_initializer=tf.contrib.layers.xavier_initializer())
layers.append(l)
input = l.apply(input)
input = tf.nn.dropout(input, keep_prob=1 - dropout_rate)
size = size / 2
layer = tf.concat([mf_layer, input], axis=1)
last_layer = tf.layers.Dense(1,
activation=None,
kernel_regularizer=tf.keras.regularizers.l2(L2_LAMBDA),
kernel_initializer=tf.contrib.layers.xavier_initializer(),
bias_regularizer=tf.keras.regularizers.l2(L2_LAMBDA)
)
logits = last_layer.apply(layer)
logits = tf.reshape(logits, shape=(-1,))
l2_loss = tf.losses.get_regularization_loss()
score = tf.sigmoid(logits)
loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(labels=y, logits=logits)) + l2_loss
return {"score": score,
"loss": loss,
'uid': uid,
"i": item,
"y": y,
"dropout_rate": dropout_rate,
}