# import IPython from IPython.display import display # lunar1.py header import pandas as pd import glob # lunar2.py header import MeCab tagger = MeCab.Tagger("-Ochasen") import mojimoji import os import urllib #lunar1.py text_paths = glob.glob('data/text/**/*.txt') texts = [] for text_path in text_paths: text = open(text_path, 'r').read() text = text.split('\n') title = text[2] text = ' '.join(text[3:]) texts.append(text) news_ss = pd.Series(texts) # display(news_ss.head()) # lunar2.py def load_jp_stopwords(path="data/jp_stop_words.txt"): url = 'http://svn.sourceforge.jp/svnroot/slothlib/CSharp/Version1/SlothLib/NLP/Filter/StopWord/word/Japanese.txt' if os.path.exists(path): print('File already exists.') else: print('Downloading...') urllib.request.urlretrieve(url, path) return pd.read_csv(path, header=None)[0].tolist() def preprocess_jp(series): stop_words = load_jp_stopwords() def tokenizer_func(text): tokens = [] node = tagger.parseToNode(str(text)) while node: features = node.feature.split(',') surface = features[6] if (surface == '*') or (len(surface) < 2) or (surface in stop_words): node = node.next continue noun_flag = (features[0] == '名詞') proper_noun_flag = (features[0] == '名詞') & (features[1] == '固有名詞') verb_flag = (features[0] == '動詞') & (features[1] == '自立') adjective_flag = (features[0] == '形容詞') & (features[1] == '自立') if proper_noun_flag: tokens.append(surface) elif noun_flag: tokens.append(surface) elif verb_flag: tokens.append(surface) elif adjective_flag: tokens.append(surface) node = node.next return " ".join(tokens) series = series.map(tokenizer_func) #---------------Normalization-----------# series = series.map(lambda x: x.lower()) series = series.map(mojimoji.zen_to_han) return series processed_news_ss = preprocess_jp(news_ss) display(processed_news_ss.head())