NLP入门--Word2Vec(CBOW)实战

发布于:2022-11-09 ⋅ 阅读:(342) ⋅ 点赞:(0)

一、模型介绍

1.1 模型介绍

Word2Vec是用来实现词嵌入(embedding)对模型。相对one-hot的稀疏编码,该模型可以学习到单词的语义特征,以此使用更加合理的稠密低维向量对单词进行编码。该模型分两种实现模式:CBOW及SKIP-GRAM。

1.2 CBOW(连续词袋模型)

词袋模型即使用周围的上下文单词去预测中间的目标单词。那么何为上下文单词,又何为目标单词呢?
如下图所示:

预测
均值
?
中国
一名

如上所示,在“我是一名中国人”这句话说,如果我们使用jieba分词,可得到一个长度为5的列表:[“我”|“是”|“一名”|“中国”|“人”]。那么如果以“一名”作为目标词,则其周围的n个单词都称为它的context上下文(n为自己设置)。该模型的目标则是使用context单词去预测当前单词,以此使得模型学习到语义特征。

1.3 SKIP-GRAM

即使用目标单词去预测其周围上下文单词,如下图所示:

一名
预测
中国

二、代码编写

2.1 参数设置及数据预处理

import os
import numpy as np
import torch
import jieba
jieba.setLogLevel(jieba.logging.INFO)
from collections import Counter
from tqdm.notebook import tqdm
embed_size = 128 #词向量维度
window = 5 #窗口大小
nb_negative = 50 #随机负采样的样本数
min_count = 10 #频数少于min_count的词将会被抛弃,低频词类似于噪声,可以抛弃掉
nb_epoch = 10 #迭代次数
data_dir = './CBOW_Data'
words, corpus = [], []
for file in tqdm(os.listdir(data_dir)[:3000]):
    with open(os.path.join(data_dir, file), 'r') as f:
        for line in f:
            words+=jieba.lcut(line)
            corpus.append(jieba.lcut(line))

请添加图片描述

words = dict(Counter(words))
words = {i:j for i,j in words.items() if j >= min_count}
id2word = {i+2:j for i,j in enumerate(words)}
id2word[0] = 'PAD'
id2word[1] = 'UNK'
word2id = {j:i for i,j in id2word.items()}

在CBOW的训练过程中,需要根据周围词去预测中间词,例如 [“我”, “爱”, “你”, “中国”, “。”]这句话,包含五个token。这时我们将 [“我”, “爱”, “中国”, “。”]作为输入,即中间空出一个单词,我们希望能通过训练得到中间缺少的这个单词。而监督样本的标签则是长度为“负样本数+1”的向量,其中第一个为正样本,其余为随机选取的负样本。

2.2 数据集编写

from torch.utils.data import Dataset, DataLoader
import random

class CBOW_Dataset(Dataset):
    def __init__(self, words, corpus,window=5, nb_negative=50, min_count=10):
        super(CBOW_Dataset, self).__init__()
        self.words = words
        self.corpus = corpus
        self.window = window
        self.nb_negative = nb_negative
        self.min_count = min_count
        self.words, self.id2word, self.word2id = self._build_members()
        self.nb_word = len(self.id2word)
        self.nb_sentence = len(self.corpus)
        self.context, self.pos_data, self.neg_data = self._build_set()
        
    def _build_members(self):
        words = {i:j for i,j in self.words.items() if j >= self.min_count}
        id2word = {i+2:j for i,j in enumerate(words)}
        id2word[0] = 'PAD'
        id2word[1] = 'UNK'
        word2id = {j:i for i,j in id2word.items()}
        print("构建字典成功...")
        return words, id2word, word2id
    
    def _get_negative_sample(self, x):
        negs = []
        while True:
            rand = random.randrange(0, len(self.words))
            if rand != x and rand not in negs:
                negs.append(rand)
            if len(negs) == self.nb_negative:
                return negs
            
    def _build_set(self):
        context, pos_data, neg_data = [],[],[]
        for sentence in tqdm(corpus):
            sentence = [0]*self.window + [self.word2id[w] for w in sentence if w in self.word2id] + [0]*self.window
            for i in range(self.window, len(sentence)-self.window):
                context.append(sentence[i-self.window: i]+sentence[i+1: self.window+i+1])
                pos_data.append([sentence[i]])
                neg_data.append(self._get_negative_sample(sentence[i]))
        print("构建数据集成功...")
        print("共{}单词,{}语料,{}数据".format(len(self.words), len(self.corpus), len(context)))
        return context, pos_data, neg_data
    
    def __getitem__(self, index):
        return torch.LongTensor(self.context[index]), torch.LongTensor(self.pos_data[index]), torch.LongTensor(self.neg_data[index])
    def __len__(self):
        return len(self.context)
    
    def get_dict(self):
        return self.id2word, self.word2id
        
        
    
data_set = CBOW_Dataset(words, corpus)

请添加图片描述

data_loader = DataLoader(data_set, batch_size=32, shuffle=True)

2.3 模型编写

import torch.nn as nn
import torch.nn.functional as F

class CBOW_Model(nn.Module):
    def __init__(self, vocab_size, embed_size):
        super(CBOW_Model, self).__init__()
        self.vocab_size = vocab_size
        self.embed_size = embed_size
        self.in_embed = nn.Embedding(self.vocab_size, self.embed_size)
        self.out_embed = nn.Embedding(self.vocab_size, self.embed_size)
    
    def forward(self, input_labels, pos_labels, neg_labels):
        input_embedding = self.in_embed(input_labels)
        pos_embedding = self.out_embed(pos_labels)
        neg_embedding = self.out_embed(neg_labels)
        input_embedding = input_embedding.unsqueeze(2)
        pos_dot = torch.bmm(pos_embedding, input_embedding)
        pos_dot = pos_dot.squeeze(2)
        neg_dot = torch.bmm(neg_embedding, -input_embedding)
        neg_dot = neg_dot.squeeze(2)
        log_pos = F.logsigmoid(pos_dot).sum(1)
        log_neg = F.logsigmoid(neg_dot).sum(1)
        loss = log_pos + log_neg
        return -loss
    def input_embedding(self):
        return self.in_embed.weight.detach().numpy()

2.4 模型训练

device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
device = torch.device('cpu')
model = CBOW_Model(len(data_set.id2word), embed_size).to(device)
optimizer = torch.optim.Adam(model.parameters(), lr=1e-3)
embedding_weights_pre = model.input_embedding()
loss_plot = []
for epoch in tqdm(range(nb_epoch)):
    np_loss = 0
    num_of_data = 0
    for pos_labels, input_labels, neg_labels in data_loader:
        input_labels = input_labels.to(device).squeeze()
        pos_labels = pos_labels.to(device)
        neg_labels = neg_labels.to(device)  
        num_of_data += len(input_labels)
        optimizer.zero_grad()
        loss = model(input_labels, pos_labels, neg_labels).mean()
        np_loss += loss.detach().cpu().numpy()
        loss.backward()
        optimizer.step()
    loss_plot.append(np_loss/num_of_data)

请添加图片描述

embedding_weights = model.input_embedding()
%matplotlib notebook
import matplotlib.pyplot as plt
plt.rcParams['font.sans-serif']=['SimHei']
plt.rcParams['axes.unicode_minus']=False


plt.plot(loss_plot)
plt.show()

请添加图片描述

三、应用

这里我们保存模型的第一个embedding层即可。

embedding_weights = model.input_embedding()

使用时查表即可。

id2word, word2id = data_set.get_dict()
import scipy
def find_nearest(word, weights):
    index = word2id[word]
    embedding = weights[index]
    cos_dis = np.array([scipy.spatial.distance.cosine(e, embedding) for e in weights])
    result =  [id2word[i] for i in cos_dis.argsort()[:10]]
    return result
find_nearest('北京', embedding_weights)

四、缺点

该模型无法处理语言二义性的问题,例如“我买了一个苹果”和“乔布斯创办了苹果”中的“苹果”,虽然为同一单词,但是在语义上却相距甚远。然后如果使用Word2Vec却只能将一个单词映射到唯一的一个向量。

本文含有隐藏内容,请 开通VIP 后查看