-
Notifications
You must be signed in to change notification settings - Fork 0
/
bluemix.py
257 lines (231 loc) · 9.93 KB
/
bluemix.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
import os
import re
import time
import shutil
import cPickle as cp
import numpy as np
import math
import glob
from multiprocessing import Process
from nltk.tokenize import sent_tokenize
from bluemix_key import *
from TED_data_location import ted_data_path
from list_of_talks import all_valid_talks
'''
This module extracts the bluemix scores from IBM Watson Tone Analyzer.
The code and its core assumptions are altered on October 30th to make
it consistent with the new crawler format and the overall folder
structure. The code is employed to extract the bluemix scores for the
new TED talks.
Please note that this module assumes the existence of a working
credential in the bluemix_key file.
'''
# Use the bluemix api to extract tones
def fetch_partial_annotations(startidx,endidx):
# Create all paths
metafolder = os.path.join(ted_data_path,'TED_meta/')
outfolder = os.path.join(ted_data_path,\
'TED_feature_bluemix_scores/')
partfolder = os.path.join(ted_data_path,'TED_bm_partial/')
if not os.path.exists(partfolder):
os.mkdir(partfolder)
# List existing full and partial data
full_score = [int(afile[:-4]) for afile in \
os.listdir(outfolder) if afile.endswith('.pkl')]
part_score = [int(afile[:-4]) for afile in \
os.listdir(partfolder) if afile.endswith('.pkl')]
# Start processing
for atalk in all_valid_talks:
if atalk<startidx or atalk>endidx or atalk in full_score\
or atalk in part_score:
print 'skipping:',atalk
continue
__part_process__(atalk,metafolder,partfolder)
def __part_process__(atalk,metafolder,partfolder):
filename = os.path.join(metafolder,str(atalk)+'.pkl')
print filename
data = cp.load(open(filename))
txt = ' '.join([aline.encode('ascii','ignore') for apara \
in data['talk_transcript'] for aline in apara])
# remove tags
txt = re.sub('\([\w ]*?\)','',txt)
response = tone_analyzer.tone(text=txt)
print response
with open(os.path.join(partfolder,str(atalk)+'.pkl'),'wb') as f:
cp.dump(response,f)
# Use the bluemix api to extract tones. It takes a list
def fetch_partial_annotations_by_list(list_of_talks):
# Create all paths
metafolder = os.path.join(ted_data_path,'TED_meta/')
outfolder = os.path.join(ted_data_path,\
'TED_feature_bluemix_scores/')
partfolder = os.path.join(ted_data_path,'TED_bm_partial/')
if not os.path.exists(partfolder):
os.mkdir(partfolder)
# List existing full and partial data
full_score = [int(afile[:-4]) for afile in \
os.listdir(outfolder) if afile.endswith('.pkl')]
part_score = [int(afile[:-4]) for afile in \
os.listdir(partfolder) if afile.endswith('.pkl')]
# Start processing
for atalk in list_of_talks:
if atalk in full_score or atalk in part_score:
print 'skipping:',atalk
continue
__part_process__(atalk,metafolder,partfolder)
# segment a list into chunks of 100's
def segment100(alist):
m = len(alist)
m_10 = math.ceil(float(m)/100.)*100
segm=[alist[i:min(j,len(alist))] for i,j in \
zip(range(0,int(m_10),100),range(100,int(m_10)+1,100))]
return segm
# Parse the tone_categories data structure coming from bluemix
def parse_tone_categories(categ_list):
header=[]
scores=[]
for acat in categ_list:
for atone in acat['tones']:
header.append(acat['category_id']+'_'+atone['tone_id'])
scores.append(atone['score'])
return header,scores
# Parse the sentences_tone data structure coming from bluemix
def parse_sentence_tone(senttone_list):
frm_idx=[]
to_idx=[]
sentences=[]
header=[]
scores=[]
for asent in senttone_list:
frm_idx.append(asent['input_from'])
to_idx.append(asent['input_to'])
sentences.append(asent['text'])
if asent['sentence_id']==0:
header,score = parse_tone_categories(asent['tone_categories'])
else:
_,score = parse_tone_categories(asent['tone_categories'])
if not score:
continue
scores.append(score)
scores = np.array(scores)
return scores,header,sentences,frm_idx,to_idx
# Bluemax gives tone only for the first 100 sentences.
# This function gets the remaining annotations
def fetch_remaining_annotations(startidx,endidx,
talksdir='TED_meta/',
outdir='TED_feature_bluemix_scores/',
partdir='TED_bm_partial/'):
# Create all paths
metafolder = os.path.join(ted_data_path,talksdir)
outfolder = os.path.join(ted_data_path,outdir)
partfolder = os.path.join(ted_data_path,partdir)
if not os.path.exists(partfolder):
os.mkdir(partfolder)
# List existing full and partial data
full_score = [int(afile[:-4]) for afile in \
os.listdir(outfolder) if afile.endswith('.pkl')]
part_score = [int(afile[:-4]) for afile in \
os.listdir(partfolder) if afile.endswith('.pkl')]
for atalk in all_valid_talks:
if atalk in full_score or atalk not in part_score:
print 'skipping:',atalk
continue
__process_remaining__(atalk,partfolder,outfolder,metafolder)
# Bluemax gives tone only for the first 100 sentences.
# This function gets the remaining annotations. It takes a list.
def fetch_remaining_annotations_by_list(list_of_talks,
talksdir='TED_meta/',
outdir='TED_feature_bluemix_scores/',
partdir='TED_bm_partial/'):
# Create all paths
metafolder = os.path.join(ted_data_path,talksdir)
outfolder = os.path.join(ted_data_path,outdir)
partfolder = os.path.join(ted_data_path,partdir)
if not os.path.exists(partfolder):
os.mkdir(partfolder)
# List existing full and partial data
full_score = [int(afile[:-4]) for afile in \
os.listdir(outfolder) if afile.endswith('.pkl')]
part_score = [int(afile[:-4]) for afile in \
os.listdir(partfolder) if afile.endswith('.pkl')]
for atalk in list_of_talks:
if atalk in full_score or atalk not in part_score:
print 'skipping:',atalk
else:
__process_remaining__(atalk,partfolder,outfolder,metafolder)
def __process_remaining__(atalk,partfolder,outfolder,metafolder):
print atalk
# Source and destination files
src = os.path.join(partfolder,str(atalk)+'.pkl')
dst = os.path.join(outfolder,str(atalk)+'.pkl')
# Read the current file and check transcript length
filename = os.path.join(metafolder,str(atalk)+'.pkl')
data = cp.load(open(filename))
txt = ' '.join([aline.encode('ascii','ignore') for apar in\
data['talk_transcript'] for aline in apar])
# remove tags
txt = re.sub('\([\w ]*?\)','',txt)
sentences = sent_tokenize(txt)
if len(sentences)<=100:
# Old annotation has all the information. So skip.
print 'Less than 100 sentences. copying directly',atalk
shutil.copyfile(src,dst)
else:
# This is the partial score data
existingdata = cp.load(open(src))
# Mark pickles without sentence-wise score
if not existingdata.get('sentences_tone'):
print 'Sentence-wise annotation not found.'\
' Marking it and skipping ...'
shutil.copyfile(src,os.path.join(outfolder,str(atalk)+\
'_no_sentence.pkl'))
else:
# Processing sentence-wise scores (adding missing scores)
old_to = existingdata['sentences_tone'][-1]['input_to']
old_sentid = existingdata['sentences_tone'][-1]['sentence_id']
# Segment the talk in chunks of 100 sentences
segments = segment100(sentences)
# Collect annotation for the rest of the talk
for asegm in segments[1:]:
txt = ' '.join(asegm)
result = tone_analyzer.tone(txt)
# Update the input_from and input_to fields accordingly
try:
output = result['sentences_tone']
except KeyError:
# There was only one sentence in txt
output=[{'input_from':0,'input_to':len(txt),'sentence_id':0,'text':txt,\
'tone_categories':result['document_tone']['tone_categories']}]
for i in range(len(output)):
output[i]['input_from']+=old_to+1
output[i]['input_to']+=old_to+1
output[i]['sentence_id']+=old_sentid+1
# Add the new content to existing data
existingdata['sentences_tone'].extend(output)
# Update old_to and old_sentid to the most recent to value
old_to = output[i]['input_to']
old_sentid = output[i]['sentence_id']
cp.dump(existingdata,open(dst,'wb'))
def pipeline(st,en):
fetch_partial_annotations(st,en)
fetch_remaining_annotations(st,en)
def process_bluehive():
bm_path = os.path.join(ted_data_path,'TED_feature_bluemix_scores/*.pkl')
bluemixlist = set([int(item.split('/')[-1].split('.')[0]) for item in glob.glob(bm_path)])
meta_path = os.path.join(ted_data_path,'TED_meta/*.pkl')
metalist = set([int(item.split('/')[-1].split('.')[0]) for item in glob.glob(meta_path)])
filelist = list(metalist.difference(bluemixlist))
id = int(os.environ['SLURM_ARRAY_TASK_ID'])
time.sleep(5.*np.random.rand())
fetch_partial_annotations_by_list([filelist[id]])
time.sleep(5.*np.random.rand())
fetch_remaining_annotations_by_list([filelist[id]])
if __name__=='__main__':
p1 = Process(target=pipeline,args=(1,725))
p1.start()
p2 = Process(target=pipeline,args=(725,1450))
p2.start()
p3 = Process(target=pipeline,args=(1450,2175))
p3.start()
p4 = Process(target=pipeline,args=(2175,np.inf))
p4.start()