forked from singletongue/WikiEntVec
-
Notifications
You must be signed in to change notification settings - Fork 0
/
make_corpus.py
137 lines (111 loc) · 5.21 KB
/
make_corpus.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
import re
import json
import gzip
import argparse
from collections import OrderedDict
from logzero import logger
from tokenization import RegExpTokenizer, NLTKTokenizer, MeCabTokenizer
regex_spaces = re.compile(r'\s+')
regex_title_paren = re.compile(r' \([^()].+?\)$')
regex_hyperlink = re.compile(r'\[\[(.+?)\]\]')
regex_entity = re.compile(r'##[^#]+?##')
def main(args):
logger.info('initializing a tokenizer')
if args.tokenizer == 'regexp':
tokenizer = RegExpTokenizer(do_lower_case=args.do_lower_case,
preserved_pattern=regex_entity)
elif args.tokenizer == 'nltk':
tokenizer = NLTKTokenizer(do_lower_case=args.do_lower_case,
preserved_pattern=regex_entity)
elif args.tokenizer == 'mecab':
tokenizer = MeCabTokenizer(mecab_option=args.tokenizer_option,
do_lower_case=args.do_lower_case,
preserved_pattern=regex_entity)
else:
raise RuntimeError(f'Invalid tokenizer: {args.tokenizer}')
redirects = dict()
if args.do_resolve_redirects:
logger.info('loading redirect information')
with gzip.open(args.cirrus_file, 'rt') as fi:
for line in fi:
json_item = json.loads(line)
if 'title' not in json_item:
continue
if 'redirect' not in json_item:
continue
dst_title = json_item['title']
redirects[dst_title] = dst_title
for redirect_item in json_item['redirect']:
if redirect_item['namespace'] == 0:
src_title = redirect_item['title']
redirects.setdefault(src_title, dst_title)
logger.info('generating corpus for training')
n_processed = 0
with gzip.open(args.cirrus_file, 'rt') as fi, \
open(args.output_file, 'wt') as fo:
for line in fi:
json_item = json.loads(line)
if 'title' not in json_item:
continue
title = json_item['title']
text = regex_spaces.sub(' ', json_item['text'])
hyperlinks = dict()
title_without_paren = regex_title_paren.sub('', title)
hyperlinks.setdefault(title_without_paren, title)
for match in regex_hyperlink.finditer(json_item['source_text']):
if '|' in match.group(1):
(entity, anchor) = match.group(1).split('|', maxsplit=1)
else:
entity = anchor = match.group(1)
if '#' in entity:
entity = entity[:entity.find('#')]
anchor = anchor.strip()
entity = entity.strip()
if args.do_resolve_redirects:
entity = redirects.get(entity, '')
if len(anchor) > 0 and len(entity) > 0:
hyperlinks.setdefault(anchor, entity)
hyperlinks_sorted = OrderedDict(sorted(
hyperlinks.items(), key=lambda t: len(t[0]), reverse=True))
replacement_flags = [0] * len(text)
for (anchor, entity) in hyperlinks_sorted.items():
cursor = 0
while cursor < len(text) and anchor in text[cursor:]:
start = text.index(anchor, cursor)
end = start + len(anchor)
if not any(replacement_flags[start:end]):
entity_token = f'##{entity}##'.replace(' ', '_')
text = text[:start] + entity_token + text[end:]
replacement_flags = replacement_flags[:start] \
+ [1] * len(entity_token) + replacement_flags[end:]
assert len(text) == len(replacement_flags)
cursor = start + len(entity_token)
else:
cursor = end
text = ' '.join(tokenizer.tokenize(text))
print(text, file=fo)
n_processed += 1
if n_processed <= 10:
logger.info('*** Example ***')
example_text = text[:400] + '...' if len(text) > 400 else text
logger.info(example_text)
if n_processed % 10000 == 0:
logger.info(f'processed: {n_processed}')
if n_processed % 10000 != 0:
logger.info(f'processed: {n_processed}')
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--cirrus_file', type=str, required=True,
help='Wikipedia Cirrussearch content dump file (.json.gz)')
parser.add_argument('--output_file', type=str, required=True,
help='output corpus file (.txt)')
parser.add_argument('--tokenizer', default='regexp',
help='tokenizer type [regexp]')
parser.add_argument('--do_lower_case', action='store_true',
help='lowercase words (not applied to NEs)')
parser.add_argument('--do_resolve_redirects', action='store_true',
help='resolve redirects of entity names')
parser.add_argument('--tokenizer_option', type=str, default='',
help='option string passed to the tokenizer')
args = parser.parse_args()
main(args)