Jumat, 15 Maret 2019

How to transfer the follow tensorflow into pytorch

I want to re-implement the word embedding here

here is the original tensorflow code (version: 0.12.1)


import tensorflow as tf


class Network(object):
def __init__(
self, user_length,item_length, num_classes, user_vocab_size,item_vocab_size,fm_k,n_latent,user_num,item_num,
embedding_size, filter_sizes, num_filters, l2_reg_lambda=0.0,l2_reg_V=0.0):


# Skip the embedding
pooled_outputs_u = []

for i, filter_size in enumerate(filter_sizes):
with tf.name_scope("user_conv-maxpool-%s" % filter_size):
# Convolution Layer
filter_shape = [filter_size, embedding_size, 1, num_filters]
W = tf.Variable(tf.truncated_normal(filter_shape, stddev=0.1), name="W")
b = tf.Variable(tf.constant(0.1, shape=[num_filters]), name="b")
conv = tf.nn.conv2d(
self.embedded_users,
W,
strides=[1, 1, 1, 1],
padding="VALID",
name="conv")
# Apply nonlinearity
h = tf.nn.relu(tf.nn.bias_add(conv, b), name="relu")
# Maxpooling over the outputs
pooled = tf.nn.max_pool(
h,
ksize=[1, user_length - filter_size + 1, 1, 1],
strides=[1, 1, 1, 1],
padding='VALID',
name="pool")
pooled_outputs_u.append(pooled)
num_filters_total = num_filters * len(filter_sizes)
self.h_pool_u = tf.concat(3,pooled_outputs_u)
self.h_pool_flat_u = tf.reshape(self.h_pool_u, [-1, num_filters_total])



with tf.name_scope("dropout"):
self.h_drop_u = tf.nn.dropout(self.h_pool_flat_u, 1.0)
self.h_drop_i= tf.nn.dropout(self.h_pool_flat_i, 1.0)
with tf.name_scope("get_fea"):
Wu = tf.get_variable(
"Wu",
shape=[num_filters_total, n_latent],
initializer=tf.contrib.layers.xavier_initializer())
bu = tf.Variable(tf.constant(0.1, shape=[n_latent]), name="bu")
self.u_fea=tf.matmul(self.h_drop_u, Wu) + bu
#self.u_fea = tf.nn.dropout(self.u_fea,self.dropout_keep_prob)
Wi = tf.get_variable(
"Wi",
shape=[num_filters_total, n_latent],
initializer=tf.contrib.layers.xavier_initializer())
bi = tf.Variable(tf.constant(0.1, shape=[n_latent]), name="bi")
self.i_fea = tf.matmul(self.h_drop_i, Wi) + bi
#self.i_fea=tf.nn.dropout(self.i_fea,self.dropout_keep_prob)


with tf.name_scope('fm'):
self.z=tf.nn.relu(tf.concat(1,[self.u_fea,self.i_fea]))

#self.z=tf.nn.dropout(self.z,self.dropout_keep_prob)

WF1=tf.Variable(
tf.random_uniform([n_latent*2, 1], -0.1, 0.1), name='fm1')
Wf2=tf.Variable(
tf.random_uniform([n_latent*2, fm_k], -0.1, 0.1), name='fm2')
one=tf.matmul(self.z,WF1)

inte1=tf.matmul(self.z,Wf2)
inte2=tf.matmul(tf.square(self.z),tf.square(Wf2))

inter=(tf.square(inte1)-inte2)*0.5

inter=tf.nn.dropout(inter,self.dropout_keep_prob)

inter=tf.reduce_sum(inter,1,keep_dims=True)
print inter
b=tf.Variable(tf.constant(0.1), name='bias')

And here is the pytorch version 1.0 that I try


class Network(torch.nn.Module):
def __init__(
self, user_length,item_length, num_classes, user_vocab_size,item_vocab_size,fm_k,n_latent,user_num,item_num,
embedding_size, filter_sizes, num_filters, l2_reg_lambda=0.0,l2_reg_V=0.0):
self.embedded_users= torch.nn.Embedding(num_embeddings=user_length, embedding_dim=embedding_size)


pooled_outputs_u = []

I mainly have the trouble with the convolutional layer tranforming.

Pytorch is much easy to use since we can skip lots of W and b definition. Is there some one can help me with the rest? Thanks



from How to transfer the follow tensorflow into pytorch

How to transfer the follow tensorflow into pytorch Rating: 4.5 Diposkan Oleh: Admin

0 komentar:

Posting Komentar

Popular Posts