【Reverse】Tensorflow中屏蔽Padding处理变长序列的双向LSTM
0x00 前言通常我们处理双向LSTM的时候,序列长短不一参差不齐,就不可避免的需要进行padding, array([[1,2,0],[2,1,3,[3,5,4]],dtype=int32)
对于padding的数据做双向LSTM的时候,反向的部分会将 0x01 简要解释这里的方法是采用单向LSTM,配以cell_len来控制终止位置, 0x02 函数介绍# Tensorflow.Reverse()
sess = tf.Session()
inp = tf.placeholder(tf.int32,[None,5])
cell_lens = tf.placeholder(tf.int32,[None])
rseq = tf.reverse_sequence(
inp,cell_lens,# 输入矩阵 与 需要作reverse操作的长度
seq_axis=None,batch_axis=None,name='reverse_data',# 取名,方便在计算图中定位
seq_dim=1,batch_dim=0 # 需要作reverse操作的维度和作为batch的维度
)
sess.run(rseq,{
inp: [range(5),range(5),range(5)],cell_lens: [2,3,4]
})
""" # 分别前2、3、4维进行reverse操作 array([[1,4],4]],dtype=int32) """
0x03 实际场景运用节选
class LstmLayer(object):
""" LSTM layer class """
def __init__(self,num_units,bidirection=False,sequence_length=None,name="lstm"):
self.num_units = num_units
self.bidirection = bidirection
self.sequence_length = tf.reshape(sequence_length,[-1])
self.name = name
def __call__(self,inputs,time_major=False):
with tf.name_scope('{}_cal'.format(self.name)):
with tf.variable_scope(self.name,reuse=tf.AUTO_REUSE):
if self.bidirection:
lstm_fw_cell = tf.nn.rnn_cell.BasicLSTMCell(self.num_units,state_is_tuple=True)
lstm_bw_cell = tf.nn.rnn_cell.BasicLSTMCell(self.num_units,state_is_tuple=True)
outputs,output_states = tf.nn.bidirectional_dynamic_rnn(
lstm_fw_cell,lstm_bw_cell,sequence_length=self.sequence_length,time_major=time_major,dtype=tf.float32)
else:
lstm_fw_cell = tf.nn.rnn_cell.BasicLSTMCell(self.num_units,output_states = tf.nn.dynamic_rnn(
lstm_fw_cell,dtype=tf.float32)
return outputs,output_states
class Network(object):
""" Network for model testing """
def __init__(self,options=feed_options,is_training=True):
self.options = options
self.is_training = is_training
# LSTM Layer
self.forward_lstm = LstmLayer(num_units=options.get('lstm_dim'),sequence_length=self.cell_lens,name="forward_lstm")
self.backward_lstm = LstmLayer(num_units=options.get('lstm_dim'),name="backward_lstm")
def get_reverse(self,input,seq_dim=1,batch_dim=0):
""" # Given this: batch_dim = 2 seq_dim = 0 input.dims = (8,?,...) seq_lengths = [7,5] # then slices of input are reversed on seq_dim,but only up to seq_lengths: output[0:7,:,...] = input[7:0:-1,...] output[0:2,...] = input[2:0:-1,...] output[0:3,...] = input[3:0:-1,...] output[0:5,...] = input[5:0:-1,...] # while entries past seq_lens are copied through: output[7:,...] = input[7:,...] output[2:,...] = input[2:,...] output[3:,...] = input[3:,...] """
tf.reverse_sequence(
input,self.cell_lens,seq_dim=seq_dim,batch_dim=batch_dim,)
def lstm_layer(self,forward_emb):
# [batch,seg_len + 2,emb_dim] -> [batch,emb_dim]
backward_emb = self.get_reverse(forward_emb)
# [batch,emb_dim] x 2 -> [batch,seg_len,lstm_dim] x 2
_,(c1,f_hidden) = self.forward_lstm(forward_emb)
_,(c2,b_hidden) = self.backward_lstm(backward_emb)
# [batch,lstm_dim] x 2 -> [batch,lstm_dim * 2]
return tf.concat([f_hidden,self.get_reverse(b_hidden)],-1)
此处应用场景中还有卖弄了一个反向错位相加的小伎俩,使用了 (编辑:李大同) 【声明】本站内容均来自网络,其相关言论仅代表作者个人观点,不代表本站立场。若无意侵犯到您的权利,请及时与联系站长删除相关内容! |