-
Notifications
You must be signed in to change notification settings - Fork 2
/
verbmanager.py
228 lines (206 loc) · 8.35 KB
/
verbmanager.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
import vozbase
import collections
import util
def add_children_mentions_to_list(lst, lst_):
for i in lst:
lst_.append(i)
if i: add_children_mentions_to_list(i.child_mentions, lst_)
class Dependency(object):
def __init__(self, label, governor, dependent):
self.label, self.governor, self.dependent = label, governor, dependent
class Verb(vozbase.VozTextContainer):
def __init__(self,id,offset,len,token,frame,arguments):
super(Verb, self).__init__(id,offset,len)
self.token = token
self.frame = frame #unused
self.arguments = arguments #type: {}
self._subjects = None
self._objects = None
self._sentence = None
def set_negated(self):
self.arguments['Am-NEG']=None
def is_negated(self):
return ('AM-NEG' in self.arguments.keys())
def get_subjects(self, filter_character_field = None):
return [i for i in self._subjects if i and i.is_independent and (filter_character_field is None or getattr(i,filter_character_field).is_character())]
def get_objects(self, filter_character_field = None):
return [i for i in self._objects if i and i.is_independent and (filter_character_field is None or getattr(i,filter_character_field).is_character())]
def _clear_caches(self,sentence):
del self._subjects
del self._objects
del self._sentence
def _compute_caches(self,sentence=None):
if sentence:
self._sentence = sentence
subjects = set()
objects = set()
for arg,tokens in self.arguments.items():
matches = [sentence._parent_document.get_mention_by_token_id(i.id) for i in tokens]
#matches = set([i for i in matches if i and i.is_independent]) # is_independent is not properly initialized here?
matches_ = []
add_children_mentions_to_list(matches,matches_)
matches = set([i for i in matches_])
if not matches:
pass
# TODO add the children of non-independent mentions by checking mention.contains(mention)
else:
if arg.startswith('A0'):
subjects.update(matches)
elif (arg.startswith('A1') or arg.startswith('A2') or arg.startswith('A3') or arg.startswith('AM')):
objects.update(matches)
elif arg == 'nsubj':
subjects.update(matches)
elif arg == 'expl':
subjects.update(matches)
elif arg == 'nsubjpass':
objects.update(matches)
elif arg == 'dobj':
objects.update(matches)
elif arg == 'iobj':
objects.update(matches)
elif arg == 'pobj':
objects.update(matches)
self._subjects = subjects
self._objects = objects
def get_tokens(self):
return [self.token]
def get_text(self):
return self.token.get_text()
def __str__(self):
return "Verb %d" % (self.id)
class VerbMapper(object):
MODE_NO_MAP = 'NoMap'
MODE_FRAMENET_TEXT = 'FNT'
MODE_LEVIN_TEXT = 'LVT'
MODE_WORDNET_TEXT = 'WNT'
_fn = None
_wn = None
_verb_mapping_levin = None
_verb_mapping_100 = None
_verb_mapping_50 = None
def __init__(self,mode=None):
self._verb_mapping_cache = {}
self._default_mode = mode or VerbMapper.MODE_NO_MAP
def map(self,verb,mode=None,fallback=True):
if not mode:
mode = self._default_mode
if mode==VerbMapper.MODE_NO_MAP:
return verb
else:
if mode not in self._verb_mapping_cache:
self._load_cache(mode)
if not verb in self._verb_mapping_cache[mode]:
if mode==VerbMapper.MODE_FRAMENET_TEXT:
self._verb_mapping_cache[mode][verb] = VerbMapper.verb_mapping_framenet(verb)
elif mode==VerbMapper.MODE_WORDNET_TEXT:
self._verb_mapping_cache[mode][verb] = VerbMapper.verb_mapping_wordnet(verb)
elif mode==VerbMapper.MODE_LEVIN_TEXT:
# if not found in cache, not in file
pass
verb_return = self._verb_mapping_cache[mode].get(verb,None)
if verb_return:
return verb_return
else:
return verb if fallback else None
def _load_cache(self,mode):
if mode == VerbMapper.MODE_LEVIN_TEXT:
self._verb_mapping_cache[mode] = self._verb_mapping_levin_load()
else:
try:
0/0
cache = vozbase.unserialize_file('/Users/josepvalls/temp/voz2/VerbMapperCache-%s.json' % mode)
except:
cache = {}
self._verb_mapping_cache[mode]=cache
def save_cache(self,mode=None):
if not mode:
mode = self._default_mode
vozbase.serialize_to_file(self._verb_mapping_cache[mode],'/Users/josepvalls/temp/voz2/VerbMapperCache-%s.json' % mode)
@classmethod
def verb_mapping_framenet(cls,verb):
if not cls._fn:
try:
from nltk.corpus import framenet as fn
cls._fn = fn
except:
print "Cannot load NLTK.CORPUS"
try:
return [f for f in cls._fn.frames() if any(luName==verb+'.v' for luName in f.lexUnit)][0].name
except:
return None
@classmethod
def verb_mapping_wordnet(cls,verb):
if not cls._wn:
try:
from nltk.corpus import wordnet as wn
cls._wn = wn
except:
print "Cannot load NLTK.CORPUS"
try:
return collections.Counter(util.flatten([i.root_hypernyms() for i in cls._wn.synsets(verb, 'v')])).most_common()[0][0].lemma_names()[0]
except:
return None
@classmethod
def verb_mapping_100(cls,verb):
if not cls._verb_mapping_100:
verb_mapping_files = ['clustered-verb-counts-100.txt','clustered-verb-counts-50.txt','clustered-verbs-100.txt','clustered-verbs-50.txt']
cls._verb_mapping_100 = cls._verb_mapping_load(verb_mapping_files[2])
return cls._verb_mapping_100.get(verb,None)
@classmethod
def verb_mapping_50(cls,verb):
if not cls._verb_mapping_50:
verb_mapping_files = ['clustered-verb-counts-100.txt','clustered-verb-counts-50.txt','clustered-verbs-100.txt','clustered-verbs-50.txt']
cls._verb_mapping_50 = cls._verb_mapping_load(verb_mapping_files[3])
return cls._verb_mapping_50.get(verb,None)
@classmethod
def _verb_mapping_load(filename):
verb_mapping_data = {}
for line in open(filename).readlines():
key = line.split(' ',1)[0]
for i in line.split(' '):
if i:
verb_mapping_data[i]=key
return verb_mapping_data
def _verb_mapping_levin_load(self):
c = {}
current = ''
cd = 0
data = open('data/levin.verbs.txt').readlines()
verbs = []
for line in data:
if line.strip() and not line.startswith(' '):
current = line.strip().split()[0]
try:
cd = int(current.split('.')[0])
except:
cd = -1
c[current] = []
if '"' in line:
c[current]=[line.split('"')[1]]
elif cd > 8:
c[current]+=line.strip().split()
for i in line.strip().split():
verbs.append((i,current))
verbmap = {}
for verb,group in verbs:
if verb in verbmap:
if len(c[verbmap[verb]])>len(c[group]):
verbmap[verb] = group
else:
verbmap[verb] = group
#print collections.Counter(util.flatten(c.values())).most_common()
#print verbmap
return verbmap
def main():
vm = VerbMapper()
#for mode in [VerbMapper.MODE_FRAMENET_TEXT,VerbMapper.MODE_LEVIN_TEXT,VerbMapper.MODE_WORDNET_TEXT]:
for mode in [VerbMapper.MODE_WORDNET_TEXT]:
vm.map('walk',mode)
vm.map('run',mode)
vm.map('crawl',mode)
vm.map('jump',mode)
vm.map('leap',mode)
import pprint
pprint.pprint(vm._verb_mapping_cache)
if __name__ == "__main__":
main()