[Python]jieba切词 添加字典 去除停用词、单字 python 2020.2.10

laityc 2020-02-10

源码如下:

import jieba
import io
import re

#jieba.load_userdict("E:/xinxi2.txt")
patton=re.compile(r‘..‘)

#添加字典
def add_dict():
    f=open("E:/xinxi2.txt","r+",encoding="utf-8")  #百度爬取的字典
    for line in f:
        jieba.suggest_freq(line.rstrip("\n"), True)
    f.close()

#对句子进行分词
def cut():
    number=0
    f=open("E:/luntan.txt","r+",encoding="utf-8")   #要处理的内容,所爬信息,CSDN论坛标题
    for line in f:
        line=seg_sentence(line.rstrip("\n"))
        seg_list=jieba.cut(line)
        for i in seg_list:
            print(i) #打印词汇内容
            m=patton.findall(i)
            #print(len(m)) #打印字符长度
            if len(m)!=0:
                write(i.strip()+" ")
        line=line.rstrip().lstrip()
        print(len(line))#打印句子长度
        if len(line)>1:
            write("\n")
        number+=1
        print("已处理",number,"行")

#分词后写入
def write(contents):
    f=open("E://luntan_cut2.txt","a+",encoding="utf-8") #要写入的文件
    f.write(contents)
    #print("写入成功!")
    f.close()

#创建停用词
def stopwordslist(filepath):
    stopwords = [line.strip() for line in open(filepath, ‘r‘, encoding=‘utf-8‘).readlines()]
    return stopwords

# 对句子进行去除停用词
def seg_sentence(sentence):
    sentence_seged = jieba.cut(sentence.strip())
    stopwords = stopwordslist(‘E://stop.txt‘)  # 这里加载停用词的路径
    outstr = ‘‘
    for word in sentence_seged:
        if word not in stopwords:
            if word != ‘\t‘:
                outstr += word
                #outstr += " "
    return outstr

#循环去除、无用函数
def cut_all():
    inputs = open(‘E://luntan_cut.txt‘, ‘r‘, encoding=‘utf-8‘)
    outputs = open(‘E//luntan_stop.txt‘, ‘a‘)
    for line in inputs:
        line_seg = seg_sentence(line)  # 这里的返回值是字符串
        outputs.write(line_seg + ‘\n‘)
    outputs.close()
    inputs.close()

if __name__=="__main__":
    add_dict()
    cut()

luntan.txt的来源,地址:https://www.cnblogs.com/zlc364624/p/12285055.html

其中停用词自行百度下载,或者自己创建一个txt文件夹,自行添加词汇换行符隔开。

百度爬取的字典在前几期博客中可以找到,地址:https://www.cnblogs.com/zlc364624/p/12289008.html

效果如下:

[Python]jieba切词 添加字典 去除停用词、单字 python 2020.2.10

import jiebaimport ioimport re#jieba.load_userdict("E:/xinxi2.txt")patton=re.compile(r‘..‘)#添加字典def add_dict():    f=open("E:/xinxi2.txt","r+",encoding="utf-8")  #百度爬取的字典for line in f:        jieba.suggest_freq(line.rstrip("\n"), True)    f.close()#对句子进行分词def cut():    number=0f=open("E:/luntan.txt","r+",encoding="utf-8")   #要处理的内容,所爬信息,CSDN论坛标题for line in f:        line=seg_sentence(line.rstrip("\n"))        seg_list=jieba.cut(line)for i in seg_list:print(i) #打印词汇内容m=patton.findall(i)#print(len(m)) #打印字符长度if len(m)!=0:                write(i.strip()+" ")        line=line.rstrip().lstrip()print(len(line))#打印句子长度if len(line)>1:            write("\n")        number+=1print("已处理",number,"行")#分词后写入def write(contents):    f=open("E://luntan_cut2.txt","a+",encoding="utf-8") #要写入的文件f.write(contents)#print("写入成功!")f.close()#创建停用词def stopwordslist(filepath):    stopwords = [line.strip() for line in open(filepath, ‘r‘, encoding=‘utf-8‘).readlines()]return stopwords# 对句子进行去除停用词def seg_sentence(sentence):    sentence_seged = jieba.cut(sentence.strip())    stopwords = stopwordslist(‘E://stop.txt‘)  # 这里加载停用词的路径outstr = ‘‘for word in sentence_seged:if word not in stopwords:if word != ‘\t‘:                outstr += word#outstr += " "return outstr#循环去除、无用函数def cut_all():    inputs = open(‘E://luntan_cut.txt‘, ‘r‘, encoding=‘utf-8‘)    outputs = open(‘E//luntan_stop.txt‘, ‘a‘)for line in inputs:        line_seg = seg_sentence(line)  # 这里的返回值是字符串outputs.write(line_seg + ‘\n‘)    outputs.close()    inputs.close()if __name__=="__main__":    add_dict()    cut()

相关推荐