forked from faraday/wikiprep-esa
-
Notifications
You must be signed in to change notification settings - Fork 0
/
scanLinks.py
226 lines (161 loc) · 5.34 KB
/
scanLinks.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
#!/usr/bin/python
'''
Copyright (C) 2010 Cagatay Calli <ccalli@gmail.com>
Scans XML output (gum.xml) from Wikiprep, creates 3 tables:
TABLE: article COLUMNS: id INT, title VARBINARY(255)
TABLE: text COLUMNS: old_id INT, old_text MEDIUMBLOB
TABLE: pagelinks COLUMNS: source_id INT, target_id INT
USAGE: scanData.py <hgw.xml file from Wikiprep>
IMPORTANT: If you use XML output from a recent version of Wikiprep
(e.g. Zemanta fork), then set FORMAT to 'Zemanta-legacy' or 'Zemanta-modern'.
'''
import sys
import re
import MySQLdb
import signal
LINK_LOAD_THRES = 100000
# formats: 1) Gabrilovich 2) Zemanta-legacy 3) Zemanta-modern
FORMAT = 'Gabrilovich'
try:
conn = MySQLdb.connect(host='localhost',user='root',passwd='123456',db='wiki',charset = "utf8", use_unicode = True)
except MySQLdb.Error, e:
print "Error %d: %s" % (e.args[0], e.args[1])
sys.exit(1)
try:
cursor = conn.cursor()
cursor.execute("DROP TABLE IF EXISTS namespace")
cursor.execute("""
CREATE TABLE namespace
(
id INT(10),
KEY (id)
) DEFAULT CHARSET=binary
""")
cursor.execute("DROP TABLE IF EXISTS pagelinks")
cursor.execute("""
CREATE TABLE pagelinks
(
source_id INT(10),
target_id INT(10),
KEY (source_id),
KEY (target_id)
) DEFAULT CHARSET=binary
""")
except MySQLdb.Error, e:
print "Error %d: %s" % (e.args[0], e.args[1])
sys.exit (1)
## handler for SIGTERM ###
def signalHandler(signum, frame):
global conn, cursor
cursor.close()
conn.close()
sys.exit(1)
signal.signal(signal.SIGTERM, signalHandler)
#####
rePageLegacy = re.compile('<page id="(?P<id>\d+)".+?newlength="(?P<len>\d+)".+?>(?P<page>.+?)</page>',re.MULTILINE | re.DOTALL)
rePageModern = re.compile('<page id="(?P<id>\d+)".+?newlength="(?P<len>\d+)".+?>(?P<page>.+?)</page>',re.MULTILINE | re.DOTALL)
reContent = re.compile('<title>(?P<title>.+?)</title>.+?<links>(?P<links>.*?)</links>',re.MULTILINE | re.DOTALL)
reOtherNamespace = re.compile("^(User|Wikipedia|File|MediaWiki|Template|Help|Category|Portal|Book|Talk|Special|Media|WP|User talk|Wikipedia talk|File talk|MediaWiki talk|Template talk|Help talk|Category talk|Portal talk):.+",re.DOTALL)
if FORMAT == 'Zemanta-modern':
rePage = rePageModern
else:
rePage = rePageLegacy
RSIZE = 10000000 # read chunk size = 10 MB
linkBuffer = []
linkBuflen = 0
nsBuffer = []
nsBuflen = 0
mainNS = []
# pageContent - <page>..content..</page>
# pageDict - stores page attribute dict
def recordArticle(pageDict):
global linkBuffer, linkBuflen, nsBuffer, nsBuflen
# a simple check for content
if int(pageDict['len']) < 10:
return
mContent = reContent.search(pageDict['page'])
if not mContent:
return
contentDict = mContent.groupdict()
id = int(pageDict['id'])
title = contentDict['title']
# only keep articles of Main namespace
if reOtherNamespace.match(title):
return
nsBuffer.append((id))
nsBuflen += 1
if linkBuflen >= 10000:
cursor.executemany("""
INSERT INTO namespace (id)
VALUES (%s)
""",nsBuffer)
nsBuffer = []
nsBuflen = 0
ls = contentDict['links']
ls = ls.split()
# write links
for l in ls:
linkBuffer.append((id,l)) # source, target
linkBuflen += 1
if linkBuflen >= 10000:
cursor.executemany("""
INSERT INTO pagelinks (source_id,target_id)
VALUES (%s,%s)
""",linkBuffer)
linkBuffer = []
linkBuflen = 0
return
args = sys.argv[1:]
# scanData.py <hgw_file> <RSIZE>
if len(args) < 1:
sys.exit()
if len(args) == 2:
RSIZE = int(args[1])
f = open(args[0],'r')
prevText = ''
firstRead = f.read(10000)
if FORMAT == 'Gabrilovich':
documentStart = firstRead.find('</siteinfo>') + len('</siteinfo>')
else:
documentStart = firstRead.find('<gum>') + len('<gum>')
prevText = firstRead[documentStart:10000]
while True:
newText = f.read(RSIZE)
if not newText:
break
text = prevText + newText
endIndex = -1
for page in rePage.finditer(text):
recordArticle(page.groupdict())
endIndex = page.end()
prevText = text[endIndex:]
f.close()
if nsBuflen > 0:
cursor.executemany("""
INSERT INTO namespace (id)
VALUES (%s)
""",nsBuffer)
nsBuffer = []
nsBuflen = 0
if linkBuflen > 0:
cursor.executemany("""
INSERT INTO pagelinks (source_id,target_id)
VALUES (%s,%s)
""",linkBuffer)
linkBuffer = []
linkBuflen = 0
cursor.execute("DROP TABLE IF EXISTS tmppagelinks")
cursor.execute("CREATE TABLE tmppagelinks LIKE pagelinks")
cursor.execute("INSERT tmppagelinks SELECT p.* FROM pagelinks p WHERE EXISTS (SELECT * FROM namespace n WHERE p.target_id = n.id)")
cursor.execute("DROP TABLE pagelinks")
cursor.execute("RENAME TABLE tmppagelinks TO pagelinks")
# inlinks
cursor.execute("DROP TABLE IF EXISTS inlinks")
cursor.execute("CREATE TABLE inlinks AS SELECT p.target_id, COUNT(p.source_id) AS inlink FROM pagelinks p GROUP BY p.target_id")
cursor.execute("CREATE INDEX idx_target_id ON inlinks (target_id)")
# outlinks
cursor.execute("DROP TABLE IF EXISTS outlinks")
cursor.execute("CREATE TABLE outlinks AS SELECT p.source_id, COUNT(p.target_id) AS outlink FROM pagelinks p GROUP BY p.source_id")
cursor.execute("CREATE INDEX idx_source_id ON outlinks (source_id)")
cursor.close()
conn.close()