3 # dmv reestimation and inside-outside probabilities using loc_h, and
7 # 1. Grammar-class and related functions
8 # 2. P_INSIDE / inner() and inner_sent()
9 # 3. P_OUTSIDE / outer()
10 # 4. Reestimation v.1: sentences as outer loop
11 # 5. Reestimation v.2: head-types as outer loop
12 # 6. Most Probable Parse
13 # 7. Testing functions
16 from common_dmv
import *
18 ### todo: debug with @accepts once in a while, but it's SLOW
19 # from typecheck import accepts, Any
21 if __name__
== "__main__":
22 print "loc_h_dmv module tests:"
24 def adj(middle
, loc_h
):
25 "middle is eg. k when rewriting for i<k<j (inside probabilities)."
26 return middle
== loc_h
or middle
== loc_h
+1 # ADJ == True
28 def make_GO_AT(p_STOP
,p_ATTACH
):
30 for (a
,h
,dir), p_ah
in p_ATTACH
.iteritems():
31 p_GO_AT
[a
,h
,dir, NON
] = p_ah
* (1-p_STOP
[h
, dir, NON
])
32 p_GO_AT
[a
,h
,dir, ADJ
] = p_ah
* (1-p_STOP
[h
, dir, ADJ
])
35 class DMV_Grammar(io
.Grammar
):
38 return "%d=%s" % (n
, self
.numtag(n
))
45 p_L
= p(self
.p_ATTACH
,(a
,h
,LEFT
))
46 p_R
= p(self
.p_ATTACH
,(a
,h
,RIGHT
))
47 if p_L
== 0.0 and p_R
== 0.0:
51 str = "p_ATTACH[ %s|%s,L] = %.4f" % (t(a
), t(h
), p_L
)
56 str += "p_ATTACH[ %s|%s,R] = %.4f" % (t(a
), t(h
), p_R
)
59 root
, stop
, att
, ord = "","","",""
60 for h
in self
.headnums():
61 root
+= "p_ROOT[%s] = %.4f\n" % (t(h
), p(self
.p_ROOT
, (h
)))
62 stop
+= "p_STOP[stop|%s,L,adj] = %.4f\t" % (t(h
), p(self
.p_STOP
, (h
,LEFT
,ADJ
)))
63 stop
+= "p_STOP[stop|%s,R,adj] = %.4f\n" % (t(h
), p(self
.p_STOP
, (h
,RIGHT
,ADJ
)))
64 stop
+= "p_STOP[stop|%s,L,non] = %.4f\t" % (t(h
), p(self
.p_STOP
, (h
,LEFT
,NON
)))
65 stop
+= "p_STOP[stop|%s,R,non] = %.4f\n" % (t(h
), p(self
.p_STOP
, (h
,RIGHT
,NON
)))
66 att
+= ''.join([p_a(a
,h
) for a
in self
.headnums()])
67 ord += "p_ORDER[ left-first|%s ] = %.4f\t" % (t(h
), p(self
.p_ORDER
, (GOL
,h
)))
68 ord += "p_ORDER[right-first|%s ] = %.4f\n" % (t(h
), p(self
.p_ORDER
, (GOR
,h
)))
69 return root
+ stop
+ att
+ ord
71 def __init__(self
, numtag
, tagnum
, p_ROOT
, p_STOP
, p_ATTACH
, p_ORDER
):
72 io
.Grammar
.__init
__(self
, numtag
, tagnum
)
73 self
.p_ROOT
= p_ROOT
# p_ROOT[w] = p
74 self
.p_ORDER
= p_ORDER
# p_ORDER[seals, w] = p
75 self
.p_STOP
= p_STOP
# p_STOP[w, LEFT, NON] = p (etc. for LA,RN,RA)
76 self
.p_ATTACH
= p_ATTACH
# p_ATTACH[a, h, LEFT] = p (etc. for R)
77 # p_GO_AT[a, h, LEFT, NON] = p (etc. for LA,RN,RA)
78 self
.p_GO_AT
= make_GO_AT(self
.p_STOP
, self
.p_ATTACH
)
79 # these are used in reestimate2():
82 def get_iochart(self
, sent_nums
):
83 ch_key
= tuple(sent_nums
)
85 ichart
= self
._icharts
[ch_key
]
89 ochart
= self
._ocharts
[ch_key
]
92 return (ichart
, ochart
)
94 def set_iochart(self
, sent_nums
, ichart
, ochart
):
95 self
._icharts
[tuple(sent_nums
)] = ichart
96 self
._ocharts
[tuple(sent_nums
)] = ochart
98 def reset_iocharts(self
):
102 def p_GO_AT_or0(self
, a
, h
, dir, adj
):
104 return self
.p_GO_AT
[a
, h
, dir, adj
]
109 def locs(sent_nums
, start
, stop
):
110 '''Return the between-word locations of all words in some fragment of
111 sent. We make sure to offset the locations correctly so that for
112 any w in the returned list, sent[w]==loc_w.
114 start is inclusive, stop is exclusive, as in klein-thesis and
115 Python's list-slicing.'''
116 for i0
,w
in enumerate(sent_nums
[start
:stop
]):
120 ###################################################
121 # P_INSIDE (dmv-specific) #
122 ###################################################
124 #@accepts(int, int, (int, int), int, Any(), [str], {tuple:float}, IsOneOf(None,{}))
125 def inner(i
, j
, node
, loc_h
, g
, sent
, ichart
, mpptree
=None):
126 ''' The ichart is of this form:
127 ichart[i,j,LHS, loc_h]
128 where i and j are between-word positions.
130 loc_h gives adjacency (along with k for attachment rules), and is
131 needed in P_STOP reestimation.
133 sent_nums
= g
.sent_nums(sent
)
135 def terminal(i
,j
,node
, loc_h
, tabs
):
136 if not i
<= loc_h
< j
:
138 print "%s*= 0.0 (wrong loc_h)" % tabs
140 elif POS(node
) == sent_nums
[i
] and node
in g
.p_ORDER
:
141 # todo: add to ichart perhaps? Although, it _is_ simple lookup..
142 prob
= g
.p_ORDER
[node
]
145 print "%sLACKING TERMINAL:" % tabs
148 print "%s*= %.4f (terminal: %s -> %s_%d)" % (tabs
,prob
, node_str(node
), sent
[i
], loc_h
)
151 def e(i
,j
, (s_h
,h
), loc_h
, n_t
):
154 key
= (i
,j
, (s_h
,h
), loc_h
)
155 if key
not in mpptree
:
156 mpptree
[key
] = (p
, L
, R
)
157 elif mpptree
[key
][0] < p
:
158 mpptree
[key
] = (p
, L
, R
)
161 "Tabs for debug output"
164 if (i
, j
, (s_h
,h
), loc_h
) in ichart
:
166 print "%s*= %.4f in ichart: i:%d j:%d node:%s loc:%s" % (tab(),ichart
[i
, j
, (s_h
,h
), loc_h
], i
, j
,
167 node_str((s_h
,h
)), loc_h
)
168 return ichart
[i
, j
, (s_h
,h
), loc_h
]
170 # Either terminal rewrites, using p_ORDER:
171 if i
+1 == j
and (s_h
== GOR
or s_h
== GOL
):
172 return terminal(i
, j
, (s_h
,h
), loc_h
, tab())
173 else: # Or not at terminal level yet:
175 print "%s%s (%.1f) from %d to %d" % (tab(),node_str((s_h
,h
)),loc_h
,i
,j
)
177 p_RGOL
= g
.p_STOP
[h
, LEFT
, adj(i
,loc_h
)] * e(i
,j
,(RGOL
,h
),loc_h
,n_t
+1)
178 p_LGOR
= g
.p_STOP
[h
, RIGHT
, adj(j
,loc_h
)] * e(i
,j
,(LGOR
,h
),loc_h
,n_t
+1)
180 to_mpp(p_RGOL
, STOPKEY
, (i
,j
, (RGOL
,h
),loc_h
))
181 to_mpp(p_LGOR
, (i
,j
, (RGOL
,h
),loc_h
), STOPKEY
)
183 print "%sp= %.4f (STOP)" % (tab(), p
)
184 elif s_h
== RGOL
or s_h
== GOL
:
187 p
= g
.p_STOP
[h
, RIGHT
, adj(j
,loc_h
)] * e(i
,j
, (GOR
,h
),loc_h
,n_t
+1)
188 to_mpp(p
, (i
,j
, (GOR
,h
),loc_h
), STOPKEY
)
189 for k
in xgo_left(i
, loc_h
): # i < k <= loc_l(h)
190 p_R
= e(k
, j
, ( s_h
,h
), loc_h
, n_t
+1)
192 for loc_a
,a
in locs(sent_nums
, i
, k
):
193 p_ah
= g
.p_GO_AT_or0(a
, h
, LEFT
, adj(k
,loc_h
))
195 p_L
= e(i
, k
, (SEAL
,a
), loc_a
, n_t
+1)
196 p_add
= p_L
* p_ah
* p_R
199 (i
, k
, (SEAL
,a
), loc_a
),
200 (k
, j
, ( s_h
,h
), loc_h
))
202 print "%sp= %.4f (ATTACH)" % (tab(), p
)
203 elif s_h
== GOR
or s_h
== LGOR
:
206 p
= g
.p_STOP
[h
, LEFT
, adj(i
,loc_h
)] * e(i
,j
, (GOL
,h
),loc_h
,n_t
+1)
207 to_mpp(p
, (i
,j
, (GOL
,h
),loc_h
), STOPKEY
)
208 for k
in xgo_right(loc_h
, j
): # loc_l(h) < k < j
209 p_L
= e(i
, k
, ( s_h
,h
), loc_h
, n_t
+1)
211 for loc_a
,a
in locs(sent_nums
,k
,j
):
212 p_ah
= g
.p_GO_AT_or0(a
, h
, RIGHT
, adj(k
,loc_h
))
213 p_R
= e(k
, j
, (SEAL
,a
), loc_a
, n_t
+1)
214 p_add
= p_L
* p_ah
* p_R
217 (i
, k
, ( s_h
,h
), loc_h
),
218 (k
, j
, (SEAL
,a
), loc_a
))
221 print "%sp= %.4f (ATTACH)" % (tab(), p
)
222 # elif s_h == GOL: # todo
224 ichart
[i
, j
, (s_h
,h
), loc_h
] = p
228 inner_prob
= e(i
,j
,node
,loc_h
, 0)
230 print debug_ichart(g
,sent
,ichart
)
232 # end of dmv.inner(i, j, node, loc_h, g, sent, ichart,mpptree)
235 def debug_ichart(g
,sent
,ichart
):
236 str = "---ICHART:---\n"
237 for (s
,t
,LHS
,loc_h
),v
in ichart
.iteritems():
238 str += "%s -> %s_%d ... %s_%d (loc_h:%s):\t%s\n" % (node_str(LHS
,g
.numtag
),
239 sent
[s
], s
, sent
[s
], t
, loc_h
, v
)
240 str += "---ICHART:end---\n"
244 def inner_sent(g
, sent
, ichart
):
245 return sum([g
.p_ROOT
[w
] * inner(0, len(sent
), (SEAL
,w
), loc_w
, g
, sent
, ichart
)
246 for loc_w
,w
in locs(g
.sent_nums(sent
),0,len(sent
))])
252 ###################################################
253 # P_OUTSIDE (dmv-specific) #
254 ###################################################
256 #@accepts(int, int, (int, int), int, Any(), [str], {tuple:float}, {tuple:float})
257 def outer(i
,j
,w_node
,loc_w
, g
, sent
, ichart
, ochart
):
258 ''' http://www.student.uib.no/~kun041/dmvccm/DMVCCM.html#outer
260 w_node is a pair (seals,POS); the w in klein-thesis is made up of
263 sent_nums
= g
.sent_nums(sent
)
264 if POS(w_node
) not in sent_nums
[i
:j
]:
265 # sanity check, w must be able to dominate sent[i:j]
269 def e(i
,j
,LHS
,loc_h
): # P_{INSIDE}
271 return ichart
[i
,j
,LHS
,loc_h
]
273 return inner(i
,j
,LHS
,loc_h
,g
,sent
,ichart
)
275 def f(i
,j
,w_node
,loc_w
):
276 if not (i
<= loc_w
< j
):
278 if (i
,j
,w_node
,loc_w
) in ochart
:
279 return ochart
[i
,j
, w_node
,loc_w
]
281 if i
== 0 and j
== len(sent
):
283 else: # ROOT may only be used on full sentence
285 # but we may have non-ROOTs (stops) over full sentence too:
289 # todo: try either if p_M > 0.0: or sum(), and speed-test them
291 if s_w
== SEAL
: # w == a
292 # todo: do the i<sent<j check here to save on calls?
293 p
= g
.p_ROOT
[w
] * f(i
,j
,ROOT
,loc_w
)
295 for k
in xgt(j
, sent
): # j<k<len(sent)+1
296 for loc_h
,h
in locs(sent_nums
,j
,k
):
297 p_wh
= g
.p_GO_AT_or0(w
, h
, LEFT
, adj(j
, loc_h
))
298 for s_h
in [RGOL
, GOL
]:
299 p
+= f(i
,k
,(s_h
,h
),loc_h
) * p_wh
* e(j
,k
,(s_h
,h
),loc_h
)
301 for k
in xlt(i
): # k<i
302 for loc_h
,h
in locs(sent_nums
,k
,i
):
303 p_wh
= g
.p_GO_AT_or0(w
, h
, RIGHT
, adj(i
, loc_h
))
304 for s_h
in [LGOR
, GOR
]:
305 p
+= e(k
,i
,(s_h
,h
), loc_h
) * p_wh
* f(k
,j
,(s_h
,h
), loc_h
)
307 elif s_w
== RGOL
or s_w
== GOL
: # w == h, left stop + left attach
312 p
= g
.p_STOP
[w
, LEFT
, adj(i
,loc_w
)] * f(i
,j
,( s_h
,w
),loc_w
)
313 for k
in xlt(i
): # k<i
314 for loc_a
,a
in locs(sent_nums
,k
,i
):
315 p_aw
= g
.p_GO_AT_or0(a
, w
, LEFT
, adj(i
, loc_w
))
316 p
+= e(k
,i
, (SEAL
,a
),loc_a
) * p_aw
* f(k
,j
,w_node
,loc_w
)
318 elif s_w
== GOR
or s_w
== LGOR
: # w == h, right stop + right attach
323 p
= g
.p_STOP
[w
, RIGHT
, adj(j
,loc_w
)] * f(i
,j
,( s_h
,w
),loc_w
)
324 for k
in xgt(j
, sent
): # j<k<len(sent)+1
325 for loc_a
,a
in locs(sent_nums
,j
,k
):
326 p_ah
= g
.p_GO_AT_or0(a
, w
, RIGHT
, adj(j
, loc_w
))
327 p
+= f(i
,k
,w_node
,loc_w
) * p_ah
* e(j
,k
,(SEAL
,a
),loc_a
)
329 ochart
[i
,j
,w_node
,loc_w
] = p
333 return f(i
,j
,w_node
,loc_w
)
334 # end outer(i,j,w_node,loc_w, g,sent, ichart,ochart)
339 ###################################################
340 # Reestimation v.1: #
341 # Sentences as outer loop #
342 ###################################################
344 def reest_zeros(h_nums
):
345 '''A dict to hold numerators and denominators for our 6+ reestimation
348 fr
= { ('ROOT','den'):0.0 } # holds sum over p_sent
350 fr
['ROOT','num',h
] = 0.0
351 for s_h
in [GOR
,GOL
,RGOL
,LGOR
]:
353 fr
['hat_a','den',x
] = 0.0 # = c()
354 # not all arguments are attached to, so we just initialize
355 # fr['hat_a','num',a,(s_h,h)] as they show up, in reest_freq
356 for adj
in [NON
, ADJ
]:
357 for nd
in ['num','den']:
358 fr
['STOP',nd
,x
,adj
] = 0.0
362 def reest_freq(g
, corpus
):
363 fr
= reest_zeros(g
.headnums())
366 p_sent
= None # 50 % speed increase on storing this locally
368 # local functions altogether 2x faster than global
369 def c(i
,j
,LHS
,loc_h
,sent
):
373 p_in
= e(i
,j
, LHS
,loc_h
,sent
)
377 p_out
= f(i
,j
, LHS
,loc_h
,sent
)
378 return p_in
* p_out
/ p_sent
381 def f(i
,j
,LHS
,loc_h
,sent
): # P_{OUTSIDE}
383 return ochart
[i
,j
,LHS
,loc_h
]
385 return outer(i
,j
,LHS
,loc_h
,g
,sent
,ichart
,ochart
)
388 def e(i
,j
,LHS
,loc_h
,sent
): # P_{INSIDE}
390 return ichart
[i
,j
,LHS
,loc_h
]
392 return inner(i
,j
,LHS
,loc_h
,g
,sent
,ichart
)
395 def w_left(i
,j
, x
,loc_h
,sent
,sent_nums
):
396 if not p_sent
> 0.0: return
400 for k
in xtween(i
, j
):
401 p_out
= f(i
,j
, x
,loc_h
, sent
)
404 p_R
= e(k
,j
, x
,loc_h
, sent
)
408 for loc_a
,a
in locs(sent_nums
, i
,k
): # i<=loc_l(a)<k
409 p_rule
= g
.p_GO_AT_or0(a
, h
, LEFT
, adj(k
, loc_h
))
410 p_L
= e(i
,k
, (SEAL
,a
), loc_a
, sent
)
411 p
= p_L
* p_out
* p_R
* p_rule
415 for a
,p
in a_k
.iteritems():
416 try: fr
['hat_a','num',a
,x
] += p
/ p_sent
417 except: fr
['hat_a','num',a
,x
] = p
/ p_sent
418 # end reest_freq.w_left()
420 def w_right(i
,j
, x
,loc_h
,sent
,sent_nums
):
421 if not p_sent
> 0.0: return
425 for k
in xtween(i
, j
):
426 p_out
= f(i
,j
, x
,loc_h
, sent
)
429 p_L
= e(i
,k
, x
,loc_h
, sent
)
433 for loc_a
,a
in locs(sent_nums
, k
,j
): # k<=loc_l(a)<j
434 p_rule
= g
.p_GO_AT_or0(a
, h
, RIGHT
, adj(k
, loc_h
))
435 p_R
= e(k
,j
, (SEAL
,a
),loc_a
, sent
)
436 p
= p_L
* p_out
* p_R
* p_rule
440 for a
,p
in a_k
.iteritems():
441 try: fr
['hat_a','num',a
,x
] += p
/ p_sent
442 except: fr
['hat_a','num',a
,x
] = p
/ p_sent
443 # end reest_freq.w_right()
451 p_sent
= inner_sent(g
, sent
, ichart
)
452 fr
['ROOT','den'] += p_sent
454 sent_nums
= g
.sent_nums(sent
)
456 for loc_h
,h
in locs(sent_nums
,0,len(sent
)+1): # locs-stop is exclusive, thus +1
458 fr
['ROOT','num',h
] += g
.p_ROOT
[h
] * e(0,len(sent
), (SEAL
,h
),loc_h
, sent
)
463 # left non-adjacent stop:
464 for i
in xlt(loc_l_h
):
465 fr
['STOP','num',(GOL
,h
),NON
] += c(loc_l_h
, j
, (LGOR
, h
),loc_h
, sent
)
466 fr
['STOP','den',(GOL
,h
),NON
] += c(loc_l_h
, j
, (GOL
, h
),loc_h
, sent
)
467 for j
in xgteq(loc_r_h
, sent
):
468 fr
['STOP','num',(RGOL
,h
),NON
] += c(i
, j
, (SEAL
, h
),loc_h
, sent
)
469 fr
['STOP','den',(RGOL
,h
),NON
] += c(i
, j
, (RGOL
, h
),loc_h
, sent
)
470 # left adjacent stop, i = loc_l_h
471 fr
['STOP','num',(GOL
,h
),ADJ
] += c(loc_l_h
, loc_r_h
, (LGOR
, h
),loc_h
, sent
)
472 fr
['STOP','den',(GOL
,h
),ADJ
] += c(loc_l_h
, loc_r_h
, (GOL
, h
),loc_h
, sent
)
473 for j
in xgteq(loc_r_h
, sent
):
474 fr
['STOP','num',(RGOL
,h
),ADJ
] += c(loc_l_h
, j
, (SEAL
, h
),loc_h
, sent
)
475 fr
['STOP','den',(RGOL
,h
),ADJ
] += c(loc_l_h
, j
, (RGOL
, h
),loc_h
, sent
)
476 # right non-adjacent stop:
477 for j
in xgt(loc_r_h
, sent
):
478 fr
['STOP','num',(GOR
,h
),NON
] += c(loc_l_h
, j
, (RGOL
, h
),loc_h
, sent
)
479 fr
['STOP','den',(GOR
,h
),NON
] += c(loc_l_h
, j
, (GOR
, h
),loc_h
, sent
)
480 for i
in xlteq(loc_l_h
):
481 fr
['STOP','num',(LGOR
,h
),NON
] += c(loc_l_h
, j
, (SEAL
, h
),loc_h
, sent
)
482 fr
['STOP','den',(LGOR
,h
),NON
] += c(loc_l_h
, j
, (LGOR
, h
),loc_h
, sent
)
483 # right adjacent stop, j = loc_r_h
484 fr
['STOP','num',(GOR
,h
),ADJ
] += c(loc_l_h
, loc_r_h
, (RGOL
, h
),loc_h
, sent
)
485 fr
['STOP','den',(GOR
,h
),ADJ
] += c(loc_l_h
, loc_r_h
, (GOR
, h
),loc_h
, sent
)
486 for i
in xlteq(loc_l_h
):
487 fr
['STOP','num',(LGOR
,h
),ADJ
] += c(loc_l_h
, j
, (SEAL
, h
),loc_h
, sent
)
488 fr
['STOP','den',(LGOR
,h
),ADJ
] += c(loc_l_h
, j
, (LGOR
, h
),loc_h
, sent
)
491 if 'REEST_ATTACH' in DEBUG
:
492 print "Lattach %s: for i < %s"%(g
.numtag(h
),sent
[0:loc_h
+1])
493 for s_h
in [RGOL
, GOL
]:
495 for i
in xlt(loc_l_h
): # i < loc_l(h)
496 if 'REEST_ATTACH' in DEBUG
:
497 print "\tfor j >= %s"%sent
[loc_h
:len(sent
)]
498 for j
in xgteq(loc_r_h
, sent
): # j >= loc_r(h)
499 fr
['hat_a','den',x
] += c(i
,j
, x
,loc_h
, sent
) # v_q in L&Y
500 if 'REEST_ATTACH' in DEBUG
:
501 print "\t\tc( %d , %d, %s, %s, sent)=%.4f"%(i
,j
,node_str(x
),loc_h
,fr
['hat_a','den',x
])
502 w_left(i
, j
, x
,loc_h
, sent
,sent_nums
) # compute w for all a in sent
505 if 'REEST_ATTACH' in DEBUG
:
506 print "Rattach %s: for i <= %s"%(g
.numtag(h
),sent
[0:loc_h
+1])
507 for s_h
in [GOR
, LGOR
]:
509 for i
in xlteq(loc_l_h
): # i <= loc_l(h)
510 if 'REEST_ATTACH' in DEBUG
:
511 print "\tfor j > %s"%sent
[loc_h
:len(sent
)]
512 for j
in xgt(loc_r_h
, sent
): # j > loc_r(h)
513 fr
['hat_a','den',x
] += c(i
,j
, x
,loc_h
, sent
) # v_q in L&Y
514 if 'REEST_ATTACH' in DEBUG
:
515 print "\t\tc( %d , %d, %s, %s, sent)=%.4f"%(loc_h
,j
,node_str(x
),loc_h
,fr
['hat_a','den',x
])
516 w_right(i
,j
, x
,loc_h
, sent
,sent_nums
) # compute w for all a in sent
522 def reestimate(old_g
, corpus
):
523 fr
= reest_freq(old_g
, corpus
)
524 p_ROOT
, p_STOP
, p_ATTACH
= {},{},{}
526 for h
in old_g
.headnums():
527 # reest_head changes p_ROOT, p_STOP, p_ATTACH
528 reest_head(h
, fr
, old_g
, p_ROOT
, p_STOP
, p_ATTACH
)
529 p_ORDER
= old_g
.p_ORDER
530 numtag
, tagnum
= old_g
.get_nums_tags()
532 new_g
= DMV_Grammar(numtag
, tagnum
, p_ROOT
, p_STOP
, p_ATTACH
, p_ORDER
)
536 def reest_head(h
, fr
, g
, p_ROOT
, p_STOP
, p_ATTACH
):
537 "Given a single head, update g with the reestimated probability."
538 # remove 0-prob stuff? todo
540 p_ROOT
[h
] = fr
['ROOT','num',h
] / fr
['ROOT','den']
542 p_ROOT
[h
] = fr
['ROOT','den']
544 for dir in [LEFT
,RIGHT
]:
545 for adj
in [ADJ
, NON
]: # p_STOP
546 p_STOP
[h
, dir, adj
] = 0.0
547 for s_h
in dirseal(dir):
549 p
= fr
['STOP','den', x
, adj
]
551 p
= fr
['STOP', 'num', x
, adj
] / p
552 p_STOP
[h
, dir, adj
] += p
554 for s_h
in dirseal(dir): # make hat_a for p_ATTACH
558 p_c
= fr
['hat_a','den',x
]
559 for w
in g
.headnums():
561 hat_a
[w
,x
] = fr
['hat_a','num',w
,x
] / p_c
565 # this has to happen after all_w hat_a[w,x] are done:
566 sum_hat_a
= sum([hat_a
[w
,x
] for w
in g
.headnums()
569 for a
in g
.headnums():
570 if (a
,h
,dir) not in p_ATTACH
:
571 p_ATTACH
[a
,h
,dir] = 0.0
572 try: # (a,x) might not be in hat_a
573 p_ATTACH
[a
,h
,dir] += hat_a
[a
,x
] / sum_hat_a
581 ###################################################
582 # Reestimation v.2: #
583 # Heads as outer loop #
584 ###################################################
586 def locs_h(h
, sent_nums
):
587 '''Return the between-word locations of all tokens of h in sent.'''
588 return [loc_w
for loc_w
,w
in locs(sent_nums
, 0, len(sent_nums
))
591 def locs_a(a
, sent_nums
, start
, stop
):
592 '''Return the between-word locations of all tokens of h in some
593 fragment of sent. We make sure to offset the locations correctly
594 so that for any w in the returned list, sent[w]==loc_w.
596 start is inclusive, stop is exclusive, as in klein-thesis and
597 Python's list-slicing (eg. return left-loc).'''
598 return [loc_w
for loc_w
,w
in locs(sent_nums
, start
, stop
)
601 def inner2(i
, j
, node
, loc_h
, g
, sent
):
602 ichart
,ochart
= g
.get_iochart(s_n
)
603 try: p
= ichart
[i
,j
,x
,loc_h
]
604 except: p
= inner(i
,j
,x
,loc_h
,g
,sent
,ichart
)
605 g
.set_iochart(s_n
,ichart
,ochart
)
608 def inner_sent2(g
, sent
):
609 ichart
,ochart
= g
.get_iochart(s_n
)
610 p
= inner_sent(g
,sent
,ichart
)
611 g
.set_iochart(s_n
,ichart
,ochart
)
614 def outer2(i
, j
,w_node
,loc_w
, g
, sent
):
615 ichart
,ochart
= g
.get_iochart(s_n
)
616 try: p
= ochart
[i
,j
,w_node
,loc_w
]
617 except: p
= inner(i
,j
,w_node
,loc_w
,g
,sent
,ichart
,ochart
)
618 g
.set_iochart(s_n
,ichart
,ochart
)
621 def reestimate2(old_g
, corpus
):
622 p_ROOT
, p_STOP
, p_ATTACH
= {},{},{}
624 for h
in old_g
.headnums():
625 # reest_head changes p_ROOT, p_STOP, p_ATTACH
626 reest_head2(h
, old_g
, corpus
, p_ROOT
, p_STOP
, p_ATTACH
)
627 p_ORDER
= old_g
.p_ORDER
628 numtag
, tagnum
= old_g
.get_nums_tags()
630 new_g
= DMV_Grammar(numtag
, tagnum
, p_ROOT
, p_STOP
, p_ATTACH
, p_ORDER
)
633 def hat_d2(xbar
, x
, xi
, xj
, g
, corpus
): # stop helper
634 def c(x
,loc_x
,i
,j
): return c2(x
,loc_x
,i
,j
,g
,s_n
,sent
)
637 if h
!= POS(xbar
): raise ValueError
640 for s_n
,sent
in [(g
.sent_nums(sent
),sent
) for sent
in corpus
]:
641 for loc_h
in locs_h(h
,s_n
):
642 loc_l_h
, loc_r_h
= loc_h
, loc_h
+ 1
643 for i
in xi(loc_l_h
):
644 for j
in xj(loc_r_h
, s_n
):
645 # print "s:%s %d,%d"%(sent,i,j)
646 num
+= c(xbar
,loc_h
,i
,j
)
647 den
+= c(x
,loc_h
,i
,j
)
650 return num
/den
# eg. SEAL/RGOL, xbar/x
653 def c2(x
,loc_h
,i
,j
,g
,s_n
,sent
):
654 ichart
,ochart
= g
.get_iochart(s_n
)
656 def f(i
,j
,x
,loc_h
): # P_{OUTSIDE}
657 try: return ochart
[i
,j
,x
,loc_h
]
658 except: return outer(i
,j
,x
,loc_h
,g
,sent
,ichart
,ochart
)
659 def e(i
,j
,x
,loc_h
): # P_{INSIDE}
660 try: return ichart
[i
,j
,x
,loc_h
]
661 except: return inner(i
,j
,x
,loc_h
,g
,sent
,ichart
)
663 p_sent
= inner_sent(g
, sent
, ichart
)
667 p_in
= e(i
,j
, x
,loc_h
)
671 p_out
= f(i
,j
, x
,loc_h
)
673 g
.set_iochart(s_n
,ichart
,ochart
)
674 return p_in
* p_out
/ p_sent
676 def w2(a
, x
,loc_h
, dir, i
, j
, g
, s_n
,sent
):
677 ichart
,ochart
= g
.get_iochart(s_n
)
679 def f(i
,j
,x
,loc_h
): # P_{OUTSIDE}
680 try: return ochart
[i
,j
,x
,loc_h
]
681 except: return outer(i
,j
,x
,loc_h
,g
,sent
,ichart
,ochart
)
682 def e(i
,j
,x
,loc_h
): # P_{INSIDE}
683 try: return ichart
[i
,j
,x
,loc_h
]
684 except: return inner(i
,j
,x
,loc_h
,g
,sent
,ichart
)
687 p_sent
= inner_sent(g
, sent
, ichart
)
695 for k
in xtween(i
,j
):
700 for loc_a
in locs_a(a
, s_n
, start
, stop
):
702 loc_L
, loc_R
= loc_a
, loc_h
704 loc_L
, loc_R
= loc_h
, loc_a
705 p
= g
.p_GO_AT_or0(a
,h
,dir,adj(k
,loc_h
))
706 in_L
= e(i
,k
,L
,loc_L
)
707 in_R
= e(k
,j
,R
,loc_R
)
709 w_sum
+= p
* in_L
* in_R
* out
711 g
.set_iochart(s_n
,ichart
,ochart
)
714 def hat_a2(a
, x
, dir, g
, corpus
): # attachment helper
715 def w(a
,x
,loc_x
,dir,i
,j
): return w2(a
,x
,loc_x
,dir,i
,j
,g
,s_n
,sent
)
716 def c(x
,loc_x
,i
,j
): return c2(x
,loc_x
,i
,j
,g
,s_n
,sent
)
725 for s_n
,sent
in [(g
.sent_nums(sent
),sent
) for sent
in corpus
]:
726 for loc_h
in locs_h(h
,s_n
):
727 loc_l_h
, loc_r_h
= loc_h
, loc_h
+ 1
728 for i
in xi(loc_l_h
):
729 for j
in xj(loc_r_h
,sent
):
730 num
+= w(a
, x
,loc_h
, dir, i
,j
)
731 den
+= c(x
,loc_h
, i
,j
)
736 def reest_root2(h
,g
,corpus
):
738 for s_n
,sent
in [(g
.sent_nums(sent
),sent
) for sent
in corpus
]:
739 ichart
, ochart
= g
.get_iochart(s_n
)
740 den
+= inner_sent(g
, sent
, ichart
)
741 for loc_h
in locs_h(h
,s_n
):
744 inner(0, len(s_n
), (SEAL
,h
), loc_h
, g
, sent
, ichart
)
745 g
.set_iochart(s_n
, ichart
, ochart
)
748 def reest_head2(h
, g
, corpus
, p_ROOT
, p_STOP
, p_ATTACH
):
749 print "h: %d=%s ..."%(h
,g
.numtag(h
)),
750 def hat_d(xbar
,x
,xi
,xj
): return hat_d2(xbar
,x
,xi
,xj
, g
, corpus
)
751 def hat_a(a
, x
, dir ): return hat_a2(a
, x
, dir, g
, corpus
)
753 if not ("0.0"=="%s"%den
or "1.0"=="%s"%den
):
754 print "in reest_head2:div, got den=%s"%den
755 # return num # todo: test without division by sum_hat_a
756 if den
> 0.0: return num
/ den
758 if num
> 0.0: raise ValueError
761 p_STOP
[h
, LEFT
,NON
] = \
762 hat_d((SEAL
,h
),(RGOL
,h
),xlt
, xgteq
) + \
763 hat_d((LGOR
,h
),( GOL
,h
),xlt
, xeq
)
764 p_STOP
[h
, LEFT
,ADJ
] = \
765 hat_d((SEAL
,h
),(RGOL
,h
),xeq
, xgteq
) + \
766 hat_d((LGOR
,h
),( GOL
,h
),xeq
, xeq
)
767 p_STOP
[h
,RIGHT
,NON
] = \
768 hat_d((RGOL
,h
),( GOR
,h
),xeq
, xgt
) + \
769 hat_d((SEAL
,h
),(LGOR
,h
),xlteq
,xgt
)
770 p_STOP
[h
,RIGHT
,ADJ
] = \
771 hat_d((RGOL
,h
),( GOR
,h
),xeq
, xeq
) + \
772 hat_d((SEAL
,h
),(LGOR
,h
),xlteq
,xeq
)
773 print "stops done...",
774 p_ROOT
[h
] = reest_root2(h
,g
,corpus
)
775 print "root done...",
776 for a
in g
.headnums():
777 p_ATTACH
[a
,h
,LEFT
] = \
778 div( hat_a(a
, (GOL
,h
),LEFT
),
779 sum([hat_a(w
, (GOL
,h
),LEFT
) for w
in g
.headnums()]) ) + \
780 div( hat_a(a
,(RGOL
,h
),LEFT
),
781 sum([hat_a(w
,(RGOL
,h
),LEFT
) for w
in g
.headnums()]) )
782 p_ATTACH
[a
,h
,RIGHT
] = \
783 div( hat_a(a
, (GOR
,h
),RIGHT
),
784 sum([hat_a(w
, (GOR
,h
),RIGHT
) for w
in g
.headnums()]) ) + \
785 div( hat_a(a
,(LGOR
,h
),RIGHT
),
786 sum([hat_a(w
,(LGOR
,h
),RIGHT
) for w
in g
.headnums()]) )
788 print "attachment done"
792 ###################################################
793 # Most Probable Parse: #
794 ###################################################
796 STOPKEY
= (-1,-1,STOP
,-1)
797 ROOTKEY
= (-1,-1,ROOT
,-1)
799 def make_mpptree(g
, sent
):
800 '''Tell inner() to make an mpptree, connect ROOT to this. (Logically,
801 this should be part of inner_sent though...)'''
803 mpptree
= { ROOTKEY
:(0.0, ROOTKEY
, None) }
804 for loc_w
,w
in locs(g
.sent_nums(sent
),0,len(sent
)):
805 p
= g
.p_ROOT
[w
] * inner(0, len(sent
), (SEAL
,w
), loc_w
, g
, sent
, ichart
, mpptree
)
807 R
= (0,len(sent
), (SEAL
,w
), loc_w
)
808 if mpptree
[ROOTKEY
][0] < p
:
809 mpptree
[ROOTKEY
] = (p
, L
, R
)
812 def parse_mpptree(mpptree
, sent
):
813 '''mpptree is a dict of the form {k:(p,L,R),...}; where k, L and R
814 are `keys' of the form (i,j,node,loc).
816 returns an mpp of the form [((head, loc_h),(arg, loc_a)), ...],
817 where head and arg are tags.'''
818 # local functions for clear access to mpptree:
822 return POS(k_node(key
))
824 return seals(k_node(key
))
826 return (k_node(key
),key
[3])
828 return (k_POS(key
),key
[3])
830 s_k
= k_seals(key
) # i+1 == j
831 return key
[0] + 1 == key
[1] and (s_k
== GOR
or s_k
== GOL
)
837 # arbitrarily, "ROOT attaches to right". We add it here to
838 # avoid further complications:
839 firstkey
= t_R(mpptree
[ROOTKEY
])
840 deps
= set([ (k_locPOS(ROOTKEY
), k_locPOS(firstkey
), RIGHT
) ])
848 L
= t_L( mpptree
[k
] )
849 R
= t_R( mpptree
[k
] )
850 if k_locnode( k
) == k_locnode( L
): # Rattach
851 deps
.add((k_locPOS( k
), k_locPOS( R
), LEFT
))
853 elif k_locnode( k
) == k_locnode( R
): # Lattach
854 deps
.add((k_locPOS( k
), k_locPOS( L
), RIGHT
))
863 tagf
= g
.numtag
# localized function, todo: speed-test
864 mpptree
= make_mpptree(g
, sent
)
865 return set([((tagf(h
), loc_h
), (tagf(a
), loc_a
))
866 for (h
, loc_h
),(a
,loc_a
),dir in parse_mpptree(mpptree
,sent
)])
869 ########################################################################
870 # testing functions: #
871 ########################################################################
873 testcorpus
= [s
.split() for s
in ['det nn vbd c vbd','vbd nn c vbd',
874 'det nn vbd', 'det nn vbd c pp',
875 'det nn vbd', 'det vbd vbd c pp',
876 'det nn vbd', 'det nn vbd c vbd',
877 'det nn vbd', 'det nn vbd c vbd',
878 'det nn vbd', 'det nn vbd c vbd',
879 'det nn vbd', 'det nn vbd c pp',
880 'det nn vbd pp', 'det nn vbd', ]]
883 import loc_h_harmonic
884 reload(loc_h_harmonic
)
886 # make sure these are the way they were when setting up the tests:
887 loc_h_harmonic
.HARMONIC_C
= 0.0
888 loc_h_harmonic
.FNONSTOP_MIN
= 25
889 loc_h_harmonic
.FSTOP_MIN
= 5
890 loc_h_harmonic
.RIGHT_FIRST
= 1.0
892 return loc_h_harmonic
.initialize(testcorpus
)
894 def testreestimation2():
896 reestimate2(g2
, testcorpus
)
899 def testreestimation():
901 g
= reestimate(g
, testcorpus
)
905 def testmpp_regression(mpptree
,k_n
):
906 mpp
= {ROOTKEY
: (2.877072116829971e-05, STOPKEY
, (0, 3, (2, 3), 1)),
907 (0, 1, (1, 1), 0): (0.1111111111111111, (0, 1, (0, 1), 0), STOPKEY
),
908 (0, 1, (2, 1), 0): (0.049382716049382713, STOPKEY
, (0, 1, (1, 1), 0)),
909 (0, 3, (1, 3), 1): (0.00027619892321567721,
912 (0, 3, (2, 3), 1): (0.00012275507698474543, STOPKEY
, (0, 3, (1, 3), 1)),
913 (1, 3, (0, 3), 1): (0.025280986819448362,
916 (1, 3, (1, 3), 1): (0.0067415964851862296, (1, 3, (0, 3), 1), STOPKEY
),
917 (2, 3, (1, 4), 2): (0.32692307692307693, (2, 3, (0, 4), 2), STOPKEY
),
918 (2, 3, (2, 4), 2): (0.037721893491124266, STOPKEY
, (2, 3, (1, 4), 2))}
919 for k
,(v
,L
,R
) in mpp
.iteritems():
920 k2
= k
[0:k_n
] # 3 if the new does not check loc_h
923 if k2
not in mpptree
:
924 print "mpp regression, %s missing"%(k2
,)
926 vnew
= mpptree
[k2
][0]
927 if not "%.10f"%vnew
== "%.10f"%v
:
928 print "mpp regression, wanted %s=%.5f, got %.5f"%(k2
,v
,vnew
)
933 p_ROOT
, p_STOP
, p_ATTACH
, p_ORDER
= {},{},{},{}
936 p_STOP
[h
,LEFT
,NON
] = 1.0
937 p_STOP
[h
,LEFT
,ADJ
] = 1.0
938 p_STOP
[h
,RIGHT
,NON
] = 0.4 # RSTOP
939 p_STOP
[h
,RIGHT
,ADJ
] = 0.3 # RSTOP
940 p_STOP
[a
,LEFT
,NON
] = 1.0
941 p_STOP
[a
,LEFT
,ADJ
] = 1.0
942 p_STOP
[a
,RIGHT
,NON
] = 0.4 # RSTOP
943 p_STOP
[a
,RIGHT
,ADJ
] = 0.3 # RSTOP
944 p_ATTACH
[a
,h
,LEFT
] = 1.0 # not used
945 p_ATTACH
[a
,h
,RIGHT
] = 1.0 # not used
946 p_ATTACH
[h
,a
,LEFT
] = 1.0 # not used
947 p_ATTACH
[h
,a
,RIGHT
] = 1.0 # not used
948 p_ATTACH
[h
,h
,LEFT
] = 1.0 # not used
949 p_ATTACH
[h
,h
,RIGHT
] = 1.0 # not used
950 p_ORDER
[(GOR
, h
)] = 1.0
951 p_ORDER
[(GOL
, h
)] = 0.0
952 p_ORDER
[(GOR
, a
)] = 1.0
953 p_ORDER
[(GOL
, a
)] = 0.0
954 g
= DMV_Grammar({h
:'h',a
:'a'}, {'h':h
,'a':a
}, p_ROOT
, p_STOP
, p_ATTACH
, p_ORDER
)
955 # these probabilities are impossible so add them manually:
956 g
.p_GO_AT
[a
,a
,LEFT
,NON
] = 0.4 # Lattach
957 g
.p_GO_AT
[a
,a
,LEFT
,ADJ
] = 0.6 # Lattach
958 g
.p_GO_AT
[h
,a
,LEFT
,NON
] = 0.2 # Lattach to h
959 g
.p_GO_AT
[h
,a
,LEFT
,ADJ
] = 0.1 # Lattach to h
960 g
.p_GO_AT
[a
,a
,RIGHT
,NON
] = 1.0 # Rattach
961 g
.p_GO_AT
[a
,a
,RIGHT
,ADJ
] = 1.0 # Rattach
962 g
.p_GO_AT
[h
,a
,RIGHT
,NON
] = 1.0 # Rattach to h
963 g
.p_GO_AT
[h
,a
,RIGHT
,ADJ
] = 1.0 # Rattach to h
964 g
.p_GO_AT
[h
,h
,LEFT
,NON
] = 0.2 # Lattach
965 g
.p_GO_AT
[h
,h
,LEFT
,ADJ
] = 0.1 # Lattach
966 g
.p_GO_AT
[a
,h
,LEFT
,NON
] = 0.4 # Lattach to a
967 g
.p_GO_AT
[a
,h
,LEFT
,ADJ
] = 0.6 # Lattach to a
968 g
.p_GO_AT
[h
,h
,RIGHT
,NON
] = 1.0 # Rattach
969 g
.p_GO_AT
[h
,h
,RIGHT
,ADJ
] = 1.0 # Rattach
970 g
.p_GO_AT
[a
,h
,RIGHT
,NON
] = 1.0 # Rattach to a
971 g
.p_GO_AT
[a
,h
,RIGHT
,ADJ
] = 1.0 # Rattach to a
977 p_ROOT
, p_STOP
, p_ATTACH
, p_ORDER
= {},{},{},{}
979 p_STOP
[h
,LEFT
,NON
] = 1.0
980 p_STOP
[h
,LEFT
,ADJ
] = 1.0
981 p_STOP
[h
,RIGHT
,NON
] = 0.4
982 p_STOP
[h
,RIGHT
,ADJ
] = 0.3
983 p_ATTACH
[h
,h
,LEFT
] = 1.0 # not used
984 p_ATTACH
[h
,h
,RIGHT
] = 1.0 # not used
985 p_ORDER
[(GOR
, h
)] = 1.0
986 p_ORDER
[(GOL
, h
)] = 0.0
987 g
= DMV_Grammar({h
:'h'}, {'h':h
}, p_ROOT
, p_STOP
, p_ATTACH
, p_ORDER
)
988 g
.p_GO_AT
[h
,h
,LEFT
,NON
] = 0.6 # these probabilities are impossible
989 g
.p_GO_AT
[h
,h
,LEFT
,ADJ
] = 0.7 # so add them manually...
990 g
.p_GO_AT
[h
,h
,RIGHT
,NON
] = 1.0
991 g
.p_GO_AT
[h
,h
,RIGHT
,ADJ
] = 1.0
996 def testreestimation_h():
999 reestimate(g
,['h h h'.split()])
1002 def test(wanted
, got
):
1003 if not wanted
== got
:
1004 raise Warning, "Regression! Should be %s: %s" % (wanted
, got
)
1006 def regression_tests():
1007 testmpp_regression(make_mpptree(testgrammar(), testcorpus
[2]),4)
1011 "%.3f" % inner(0, 2, (SEAL
,h
), 0, testgrammar_h(), 'h h'.split(),{}))
1013 "%.3f" % inner(0, 2, (SEAL
,h
), 1, testgrammar_h(), 'h h'.split(),{}))
1015 "%.4f" % inner_sent(testgrammar_h(), 'h h h'.split(),{}))
1018 "%.4f" % inner(0, 3, (SEAL
,0), 0, testgrammar_h(), 'h h h'.split(),{}))
1020 "%.4f" % inner(0, 3, (SEAL
,0), 1, testgrammar_h(), 'h h h'.split(),{}))
1022 "%.4f" % inner(0, 3, (SEAL
,h
), 2, testgrammar_h(), 'h h h'.split(),{}))
1025 "%.2f" % outer(1, 3, (RGOL
,h
), 2, testgrammar_h(),'h h h'.split(),{},{}))
1026 test("0.61" , # ftw? can't be right... there's an 0.4 shared between these two...
1027 "%.2f" % outer(1, 3, (RGOL
,h
), 1, testgrammar_h(),'h h h'.split(),{},{}))
1030 "%.2f" % outer(1, 3, (RGOL
,h
), 0, testgrammar_h(),'h h h'.split(),{},{}))
1032 "%.2f" % outer(1, 3, (RGOL
,h
), 3, testgrammar_h(),'h h h'.split(),{},{}))
1035 "%.4f" % outer(0, 1, (GOR
,h
), 0,testgrammar_a(),'h a'.split(),{},{}))
1037 "%.4f" % outer(0, 2, (GOR
,h
), 0,testgrammar_a(),'h a'.split(),{},{}))
1039 "%.4f" % outer(0, 3, (GOR
,h
), 0,testgrammar_a(),'h a'.split(),{},{}))
1041 # todo: add more of these tests...
1045 def compare_grammars(g1
,g2
):
1047 for d1
,d2
in [(g1
.p_ATTACH
,g2
.p_ATTACH
),(g1
.p_STOP
,g2
.p_STOP
),
1048 (g1
.p_ORDER
, g2
.p_ORDER
), (g1
.p_ROOT
,g2
.p_ROOT
) ]:
1049 for k
,v
in d1
.iteritems():
1051 result
+= "\nreestimate1[%s]=%s missing from reestimate2"%(k
,v
)
1052 elif "%s"%d2[k
] != "%s"%v
:
1053 result
+= "\nreestimate1[%s]=%s while \nreestimate2[%s]=%s."%(k
,v
,k
,d2
[k
])
1054 for k
,v
in d2
.iteritems():
1056 result
+= "\nreestimate2[%s]=%s missing from reestimate1"%(k
,v
)
1060 def testNVNgrammar():
1061 from loc_h_harmonic
import initialize
1062 g
= initialize(['n v n'.split()])
1067 inners
= [(sent
, inner_sent(g
, sent
, {})) for sent
in testcorpus
]
1070 if __name__
== "__main__":
1075 # profile.run('testreestimation()')
1078 # print timeit.Timer("loc_h_dmv.testreestimation()",'''import loc_h_dmv
1079 # reload(loc_h_dmv)''').timeit(1)
1084 # for s in testcorpus:
1085 # print "sent:%s\nparse:set(\n%s)"%(s,pprint.pformat(list(mpp(testgrammar(), s)),
1088 # g1 = testreestimation()
1089 # g2 = testreestimation2()
1090 # print compare_grammars(g1,g2)
1091 g
= testNVNgrammar()
1092 q_sent
= inner_sent(g
,'n v n'.split(),{})
1094 q_tree
[1] = 2.7213e-06 # n_0 -> v, n_0 -> n_2
1095 q_tree
[2] = 9.738e-06 # n -> v -> n
1096 q_tree
[3] = 2.268e-06 # n_0 -> n_2 -> v
1097 q_tree
[4] = 2.7213e-06 # same as 1-3
1098 q_tree
[5] = 9.738e-06
1099 q_tree
[6] = 2.268e-06
1100 q_tree
[7] = 1.086e-06 # n <- v -> n
1102 for i
,q_t
in q_tree
.iteritems():
1103 f_T_q
[i
] = q_t
/ q_sent
1105 pprint
.pprint(q_tree
)
1106 pprint
.pprint(f_T_q
)
1107 print sum([f
for f
in f_T_q
.values()])
1109 def treediv(num
,den
):
1110 return sum([f_T_q
[i
] for i
in num
]) / \
1111 sum([f_T_q
[i
] for i
in den
])
1113 g2
['root --> _n_'] = treediv( (1,2,3,4,5,6), (1,2,3,4,5,6,7) )
1114 g2
['root --> _v_'] = treediv( (7,), (1,2,3,4,5,6,7) )
1115 g2
['_n_ --> STOP n><'] = treediv( (1,2,3,4,5,6,7,1,2,3,4,5,6,7),
1116 (1,2,3,4,5,6,7,1,2,3,4,5,6,7))
1117 g2
['_v_ --> STOP v><'] = treediv( (1,2,3,4,5,6,7),
1119 nlrtrees
= (1,2,3,4,5,6,7,1,2,3,4,5,6,7,
1121 g2
['n>< --> _n_ n><'] = treediv( (4,6), nlrtrees
)
1122 g2
['n>< --> _v_ n><'] = treediv( (3,4,5), nlrtrees
)
1123 g2
['n>< --> n> STOP'] = treediv( (1,2,3,4,5,6,7,1,2,3,4,5,6,7),
1125 vlrtrees
= (1,2,3,4,5,6,7,
1127 g2
['v>< --> _n_ v><'] = treediv( (5,7), vlrtrees
)
1128 g2
['v>< --> v> STOP'] = treediv( (1,2,3,4,5,6,7), vlrtrees
)
1129 nrtrees
= (1,2,3,4,5,6,7,1,2,3,4,5,6,7,
1131 g2
['n> --> n> _n_'] = treediv( (1,3), nrtrees
)
1132 g2
['n> --> n> _v_'] = treediv( (1,2,6), nrtrees
)
1133 vrtrees
= (1,2,3,4,5,6,7,
1135 g2
['v> --> v> _n_'] = treediv( (2,7), vrtrees
)
1137 g2
[' v|n,R '] = treediv( (1, 2, 6),
1139 g2
[' n|n,R '] = treediv( (1, 3),
1143 g3
= reestimate2(g
, ['n v n'.split()])