-
Notifications
You must be signed in to change notification settings - Fork 1
/
Copy pathseq2seq_model.py
102 lines (81 loc) · 3.38 KB
/
seq2seq_model.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
'''
seq2seq implement by pytorch
'''
import torch
import torch.nn as nn
import torch.nn.functional as F
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
MAX_LENGTH=10
class EncoderRNN(nn.Module):
def __init__(self, input_size, hidden_size, output_size, num_layers, batch_size):
super(EncoderRNN, self).__init__()
self.input_size = input_size
self.hidden_size = hidden_size
self.num_layers = num_layers
self.batch_size = batch_size
self.lstm = nn.LSTM(
input_size=input_size,
hidden_size=hidden_size,
batch_size_first=True,
num_layers=num_layers
)
self.out = nn.Linear(hidden_size, output_size)
def forward(self, input, hidden):
output, hidden = self.lstm(input, hidden)
output = self.out(output[:, -1, :])
return output, hidden
def initHidden(self):
return torch.zeros(self.num_layers, self.batch_size, self.hidden_size, device=device)
class DecoderRNN(nn.Module):
def __init__(self, input_size, hidden_size, output_size, num_layers, batch_size):
super(DecoderRNN, self).__init__()
self.input_size = input_size
self.hidden_size = hidden_size
self.batch_size = batch_size
self.lstm = nn.LSTM(
input_size=input_size,
hidden_size=hidden_size,
batch_size_first=True,
num_layers=num_layers
)
self.out = nn.Linear(hidden_size, output_size)
def forward(self, input, hidden):
output, hidden = self.lstm(input, hidden)
output = self.out(output)
return output, hidden
def initHidden(self):
return torch.zeros(self.num_layers, self.batch_size, self.hidden_size, device=device)
class AttnDecoderRNN(nn.Module):
def __init__(self, hidden_size, output_size, dropout_p=0.1, max_length=MAX_LENGTH):
super(AttnDecoderRNN, self).__init__()
self.hidden_size = hidden_size
self.output_size = output_size
self.dropout_p = dropout_p
self.max_length = max_length
self.embedding = nn.Embedding(self.output_size, self.hidden_size)
self.attn = nn.Linear(self.hidden_size*2, self.hidden_size)
self.dropout = nn.Dropout(self.dropout_p)
self.gru = nn.GRU(self.hidden_size, self.hidden_size)
self.out = nn.Linear(self.hidden_size, self.output_size)
def forward(self, input, hidden, encoder_outputs):
embedded = self.embedding(input).view(1, 1, -1)
embedded = self.dropout(embedded)
attn_weights = F.softmax(self.attn(torch.cat((embedded[0], hidden[0]), 1)), dim=1)
attn_applied = torch.bmm(attn_weights.unsqueeze(0), encoder_outputs.unsqueeze(0))
output = torch.cat((embedded[0], attn_applied[0]), 1)
output = self.attn_combine(output).unsqueeze(0)
output = F.relu(output)
output, hidden = self.gru(output, hidden)
output = F.log_softmax(self.out(output[0]), dim=1)
return output, hidden, attn_weights
def initHidden(self):
return torch.zeros(1, 1, self.hidden_size, device=device)
def test():
encoder = EncoderRNN(100, 1024)
decoder = DecoderRNN(1024, 100)
attn_decoder = AttnDecoderRNN(1024, 100)
print('encoder: ', encoder)
print('decoder: ', decoder)
print('attention decoder: ', attn_decoder)
if __name__ == '__main__':
test()