NLP之word2vec:利用 Wikipedia Text(中文维基百科)语料+Word2vec工具来训练简体中文词向量
NLP之word2vec:利用 Wikipedia Text(中文维基百科)语料+Word2vec工具来训练简体中文词向量
输出结果
后期更新……
最后的model
word2vec_wiki.model.rar
设计思路
后期更新……
1、Wikipedia Text语料来源
Wikipedia Text语料来源及其下载:zhwiki dump progress on 20190120
其中zhwiki-latest-pages-articles.xml.bz2文件包含了标题、正文部分。压缩包大概是1.3G,解压后大概是5.7G。相比英文wiki中文的还是小了不少。
2、维基百科的文档解析
下载下来的wiki是XML格式,需要提取其正文内容。不过维基百科的文档解析有不少的成熟工具(例如gensim,wikipedia extractor等。其中Wikipedia Extractor 是一个简单方便的Python脚本。
T1、Wikipedia Extractor工具
Wikipedia extractor的网址: http://medialab.di.unipi.it/wiki/Wikipedia_Extractor
Wikipedia extractor的使用:下载好WikiExtractor.py后直接使用下面的命令运行即可,
其中,-cb 1200M表示以 1200M 为单位切分文件,-o 后面接出入文件,最后是输入文件。
WikiExtractor.py -cb 1200M -o extracted zhwiki-latest-pages-articles.xml.bz2
T2、python代码实现
将这个XML压缩文件转换为txt文件
python process_wiki.py zhwiki-latest-pages-articles.xml.bz2 wiki.zh.text
3、中文的简繁转换
中文wiki内容中大多数是繁体,这需要进行简繁转换。可以采用厦门大学NLP实验室开发的简繁转换工具或者opencc代码实现。
T1、厦门大学NLP实验室开发的简繁转换工具
转换工具下载网址:http://jf.cloudtranslation.cc/
转换工具的使用:下载单机版即可,在windos命令行窗口下使用下面命令行运行
其中file1.txt为繁体原文文件,file2.txt为输出转换结果的目标文件名,lm_s2t.txt为语言模型文件。
jf -fj file1.txt file2.txt -lm lm_s2t.txt
T2、opencc代码实现
opencc -i wiki.zh.text -o wiki.zh.text.jian -c zht2zhs.ini, 将繁体字转换为简体字。
4、将非utf-8格式字符转换为utf-8格式
iconv -c -t UTF-8 < wiki.zh.text.jian.seg > wiki.zh.text.jian.seg.utf-8
5、调用word2vec
python train_word2vec_model.py wiki.zh.text.jian.seg.utf-8 wiki.zh.text.model wiki.zh.text.vector
实现代码
正在更新……
对下边文件代码的说明
#We create word2vec model use wiki Text like this https://dumps.wikimedia.org/zhwiki/20161001/zhwiki-20161001-pages-articles-multistream.xml.bz2
##parameter:
=================================
feature_size = 500
content_window = 5
freq_min_count = 3
# threads_num = 4
negative = 3 #best采样使用hierarchical softmax方法(负采样,对常见词有利),不使用negative sampling方法(对罕见词有利)。
iter = 20
##process.py deal with wiki*.xml
##word2vec_wiki.py : create model and load model
1、process.py文件
#process.py文件
import logging
import os.path
import sys
from gensim.corpora import WikiCorpus
#run python process_wiki.py ../data/zhwiki-latest-pages-articles.xml.bz2 wiki.zh.text
if __name__ == '__main__':
program = os.path.basename(sys.argv[0])
logger = logging.getLogger(program)
logging.basicConfig(format='%(asctime)s: %(levelname)s: %(message)s')
logging.root.setLevel(level=logging.INFO)
logger.info("running %s" % ' '.join(sys.argv))
# check and process input arguments
if len(sys.argv) < 3:
print(('globals()[__doc__]):', globals()['__doc__']) )
print('locals:', locals())
print(globals()['__doc__'] % locals()) #最初代码 print globals()['__doc__'] % locals()
sys.exit(1)
inp, outp = sys.argv[1:3]
space = " "
i = 0
output = open(outp, 'w')
wiki = WikiCorpus(inp, lemmatize=False, dictionary={})
for text in wiki.get_texts():
output.write(space.join(text) + "\n")
i = i + 1
if (i % 10000 == 0):
logger.info("Saved " + str(i) + " articles")
output.close()
logger.info("Finished Saved " + str(i) + " articles")
2、word2vec_wiki.py文件
#word2vec_wiki.py文件
# -*- coding:utf-8 -*-
from __future__ import print_function
import numpy as np
import os
import sys
import jieba
import time
import jieba.posseg as pseg
import codecs
import multiprocessing
import json
# from gensim.models import Word2Vec,Phrases
from gensim import models,corpora
import logging
# auto_brand = codecs.open("Automotive_Brand.txt", encoding='utf-8').read()
sys.path.append("../../")
sys.path.append("../../langconv/")
sys.path.append("../../parser/")
# import xmlparser
# from xmlparser import *
# from langconv import *
# logger = logging.getLogger(program)
logging.basicConfig(format='%(asctime)s: %(levelname)s: %(message)s')
logging.root.setLevel(level=logging.INFO)
# logger.info("running %s" % ' '.join(sys.argv))
def json_dict_from_file(json_file,fieldnames=None,isdelwords=True):
"""
load json file and generate a new object instance whose __name__ filed
will be 'inst'
:param json_file:
"""
obj_s = []
with open(json_file) as f:
for line in f:
object_dict = json.loads(line)
if fieldnames==None:
obj_s.append(object_dict)
else:
# for fieldname in fieldname:
if set(fieldnames).issubset(set(object_dict.keys())):
one = []
for fieldname in fieldnames:
if isdelwords and fieldname == 'content':
one.append(delNOTNeedWords(object_dict[fieldname])[1])
else:
one.append(object_dict[fieldname])
obj_s.append(one)
return obj_s
def delNOTNeedWords(content,customstopwords=None):
# words = jieba.lcut(content)
if customstopwords == None:
customstopwords = "stopwords.txt"
import os
if os.path.exists(customstopwords):
stop_words = codecs.open(customstopwords, encoding='UTF-8').read().split(u'\n')
customstopwords = stop_words
result=''
return_words = []
# for w in words:
# if w not in stopwords:
# result += w.encode('utf-8') # +"/"+str(w.flag)+" " #去停用词
words = pseg.lcut(content)
for word, flag in words:
# print word.encode('utf-8')
tempword = word.encode('utf-8').strip(' ')
if (word not in customstopwords and len(tempword)>0 and flag in [u'n',u'nr',u'ns',u'nt',u'nz',u'ng',u't',u'tg',u'f',u'v',u'vd',u'vn',u'vf',u'vx',u'vi',u'vl',u'vg', u'a',u'an',u'ag',u'al',u'm',u'mq',u'o',u'x']):
# and flag[0] in [u'n', u'f', u'a', u'z']):
# ["/x","/zg","/uj","/ul","/e","/d","/uz","/y"]): #去停用词和其他词性,比如非名词动词等
result += tempword # +"/"+str(w.flag)+" " #去停用词
return_words.append(tempword)
return result,return_words
def get_save_wikitext(wiki_filename,text_filename):
output = open(text_filename, 'w')
wiki = corpora.WikiCorpus(text_filename, lemmatize=False, dictionary={})
for text in wiki.get_texts():
# text = delNOTNeedWords(text,"../../stopwords.txt")[1]
output.write(" ".join(text) + "\n")
i = i + 1
if (i % 10000 == 0):
logging.info("Saved " + str(i) + " articles")
output.close()
def load_save_word2vec_model(line_words, model_filename):
# 模型参数
feature_size = 500
content_window = 5
freq_min_count = 3
# threads_num = 4
negative = 3 #best采样使用hierarchical softmax方法(负采样,对常见词有利),不使用negative sampling方法(对罕见词有利)。
iter = 20
print("word2vec...")
tic = time.time()
if os.path.isfile(model_filename):
model = models.Word2Vec.load(model_filename)
print(model.vocab)
print("Loaded word2vec model")
else:
bigram_transformer = models.Phrases(line_words)
model = models.Word2Vec(bigram_transformer[line_words], size=feature_size, window=content_window, iter=iter, min_count=freq_min_count,negative=negative, workers=multiprocessing.cpu_count())
toc = time.time()
print("Word2vec completed! Elapsed time is %s." % (toc-tic))
model.save(model_filename)
# model.save_word2vec_format(save_model2, binary=False)
print("Word2vec Saved!")
return model
if __name__ == '__main__':
limit = -1 #该属性决定取wiki文件text tag前多少条,-1为所有
wiki_filename = '/home/wac/data/zhwiki-20160203-pages-articles-multistream.xml'
wiki_text = './wiki_text.txt'
wikimodel_filename = './word2vec_wiki.model'
s_list = []
# if you want get wiki text ,uncomment lines
# get_save_wikitext(wiki_filename,wiki_text)
# for i,text in enumerate(open(wiki_text, 'r')):
# s_list.append(delNOTNeedWords(text,"../../stopwords.txt")[1])
# print(i)
#
# if i==limit: #取前limit条,-1为所有
# break
#
#计算模型
model = load_save_word2vec_model(s_list, wikimodel_filename)
#计算相似单词,命令行输入
while 1:
print("请输入想测试的单词: ", end='\b')
t_word = sys.stdin.readline()
if "quit" in t_word:
break
try:
results = model.most_similar(t_word.decode('utf-8').strip('\n').strip('\r').strip(' ').split(' '), topn=30)
except:
continue
for t_w, t_sim in results:
print(t_w, " ", t_sim)
参考文章(贴上源址表示感谢)
使用维基百科训练简体中文词向量
中文Wiki语料获取
Wiki语料处理
中文维基语料训练获取
Windows3.5下对维基百科语料用word2vec进行训练寻找同义词相似度