N-Gram模型实现方式

N-Gram模型实现方式

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
import torch
import torch.nn.functional as F
from torch import nn, optim
from torch.autograd import Variable

CONTEXT_SIZE = 2
EMBEDDING_DIM = 10
# We will use Shakespeare Sonnet 2
test_sentence = """When forty winters shall besiege thy brow,
And dig deep trenches in thy beauty's field,
Thy youth's proud livery so gazed on now,
Will be a totter'd weed of small worth held:
Then being asked, where all thy beauty lies,
Where all the treasure of thy lusty days;
To say, within thine own deep sunken eyes,
Were an all-eating shame, and thriftless praise.
How much more praise deserv'd thy beauty's use,
If thou couldst answer 'This fair child of mine
Shall sum my count, and make my old excuse,'
Proving his beauty by succession thine!
This were to be new made when thou art old,
And see thy blood warm when thou feel'st it cold.""".split()

trigram = [((test_sentence[i], test_sentence[i + 1]), test_sentence[i + 2])
for i in range(len(test_sentence) - 2)]

vocb = set(test_sentence)
word_to_idx = {word: i for i, word in enumerate(vocb)}
idx_to_word = {word_to_idx[word]: word for word in word_to_idx}


class NgramModel(nn.Module):
def __init__(self, vocb_size, context_size, n_dim): #97,2,100
super(NgramModel, self).__init__()
self.n_word = vocb_size #n_word为97
self.embedding = nn.Embedding(self.n_word, n_dim) #97,100
self.linear1 = nn.Linear(context_size * n_dim, 128) #2*100,128
self.linear2 = nn.Linear(128, self.n_word) #128,97

def forward(self, x):
emb = self.embedding(x) #(2,) --> (2,100)
emb = emb.view(1, -1) #(2,100) --> (1,200)
out = self.linear1(emb) #(1,200) --> (1,128)
out = F.relu(out)
out = self.linear2(out) #(1,128) -->(1,97)
log_prob = F.log_softmax(out) #(1,97)
return log_prob


ngrammodel = NgramModel(len(word_to_idx), CONTEXT_SIZE, 100)
criterion = nn.NLLLoss()
optimizer = optim.SGD(ngrammodel.parameters(), lr=1e-3)

for epoch in range(100):
print('epoch: {}'.format(epoch + 1))
print('*' * 10)
running_loss = 0
for data in trigram:
word, label = data #data为(('When', 'forty'), 'winters') word为('When', 'forty') label为'winters'
word = Variable(torch.LongTensor([word_to_idx[i] for i in word])) #word为[34,12] size()为(2)
label = Variable(torch.LongTensor([word_to_idx[label]])) #label为[23] size()为(1)
# forward
out = ngrammodel(word) #输出out为(1,97)
loss = criterion(out, label) #label为(1)
running_loss += loss.data[0]
# backward
optimizer.zero_grad()
loss.backward()
optimizer.step()
print('Loss: {:.6f}'.format(running_loss / len(word_to_idx)))

#预测模型是否有效
word, label = trigram[3]
word = Variable(torch.LongTensor([word_to_idx[i] for i in word]))
out = ngrammodel(word)
_, predict_label = torch.max(out, 1)
#predict_label.data (1,1)
#predict_label.data[0] (1)
predict_word = idx_to_word[predict_label.data[0][0]]
print('real word is {}, predict word is {}'.format(label, predict_word))