-
Notifications
You must be signed in to change notification settings - Fork 26
/
process_wiki.py
51 lines (40 loc) · 1.57 KB
/
process_wiki.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
USAGE: %(program)s WIKI_XML_DUMP OUTPUT
Converts articles from a Wikipedia dump to a file containing the texts from the
articles. A single line is an article, articles are separted by a newline.
Note: doesn't support lemmatization.
Adapted from:
- http://textminingonline.com/training-word2vec-model-on-english-wikipedia-by-gensim
See also:
- https://github.com/piskvorky/gensim/blob/develop/gensim/scripts/make_wikicorpus.py
"""
import logging
import os.path
import sys
from gensim.corpora import WikiCorpus
if __name__ == '__main__':
program = os.path.basename(sys.argv[0])
logger = logging.getLogger(program)
logging.basicConfig(format='%(asctime)s: %(levelname)s: %(message)s')
logging.root.setLevel(level=logging.INFO)
logger.info("Running %s", ' '.join(sys.argv))
# Check and process input arguments.
if len(sys.argv) < 3:
print(globals()['__doc__'] % locals())
sys.exit(1)
inp, outp = sys.argv[1:3]
# Lemmatization is only available for English.
# Don't construct a dictionary because we're not using it.
wiki = WikiCorpus(inp, lemmatize=False, dictionary={})
with open(outp, 'w') as output:
for i, text in enumerate(wiki.get_texts()):
if sys.version_info.major < 3:
output.write(" ".join(unicode(text)) + "\n")
else:
output.write(" ".join(text) + "\n")
if i > 0 and i % 10000 == 0:
logger.info("Saved %s articles", i)
n = i
logger.info("Finished saving %s articles", n)