set distr as Bionic.
[MACS.git] / MACS2 / PeakModel.pyx
blob2b887c2ee3aceab5ef0489611c4fd99237a5c918
1 # cython: language_level=3
2 # cython: profile=True
3 # Time-stamp: <2019-10-30 16:33:42 taoliu>
5 """Module Description: Build shifting model
7 This code is free software; you can redistribute it and/or modify it
8 under the terms of the BSD License (see the file LICENSE included with
9 the distribution).
10 """
11 import sys, time, random
12 import numpy as np
13 cimport numpy as np
14 from cpython cimport array
15 import array
16 from MACS2.Constants import *
18 from cpython cimport bool
19 from libc.stdint cimport uint32_t, uint64_t, int32_t, int64_t
20 ctypedef np.float32_t float32_t
22 cpdef median (nums):
23 """Calculate Median.
25 Parameters:
26 nums: list of numbers
27 Return Value:
28 median value
29 """
30 p = sorted(nums)
31 l = len(p)
32 if l%2 == 0:
33 return (p[l//2]+p[l//2-1])/2
34 else:
35 return p[l//2]
37 class NotEnoughPairsException(Exception):
38 def __init__ (self,value):
39 self.value = value
40 def __str__ (self):
41 return repr(self.value)
43 cdef class PeakModel:
44 """Peak Model class.
45 """
46 cdef:
47 object treatment
48 double gz
49 int max_pairnum
50 int umfold
51 int lmfold
52 int bw
53 int d_min
54 int tag_expansion_size
55 object info, debug, warn, error
56 str summary
57 public np.ndarray plus_line, minus_line, shifted_line
58 public int d
59 public int scan_window
60 public int min_tags
61 int max_tags
62 int peaksize
63 public list alternative_d
64 public np.ndarray xcorr, ycorr
66 def __init__ ( self, opt , treatment, int max_pairnum=500 ): #, double gz = 0, int umfold=30, int lmfold=10, int bw=200, int ts = 25, int bg=0, bool quiet=False):
67 self.treatment = treatment
68 #if opt:
69 self.gz = opt.gsize
70 self.umfold = opt.umfold
71 self.lmfold = opt.lmfold
72 self.tag_expansion_size = 10 #opt.tsize| test 10bps. The reason is that we want the best 'lag' between left & right cutting sides. A tag will be expanded to 10bps centered at cutting point.
73 self.d_min = opt.d_min ### discard any fragment size < d_min
74 self.bw = opt.bw
75 self.info = opt.info
76 self.debug = opt.debug
77 self.warn = opt.warn
78 self.error = opt.warn
79 #else:
80 # self.gz = gz
81 # self.umfold = umfold
82 # self.lmfold = lmfold
83 # self.tag_expansion_size = ts
84 # self.bg = bg
85 # self.bw = bw
86 # self.info = lambda x: sys.stderr.write(x+"\n")
87 # self.debug = lambda x: sys.stderr.write(x+"\n")
88 # self.warn = lambda x: sys.stderr.write(x+"\n")
89 # self.error = lambda x: sys.stderr.write(x+"\n")
90 #if quiet:
91 # self.info = lambda x: None
92 # self.debug = lambda x: None
93 # self.warn = lambda x: None
94 # self.error = lambda x: None
96 self.max_pairnum = max_pairnum
97 #self.summary = ""
98 #self.plus_line = None
99 #self.minus_line = None
100 #self.shifted_line = None
101 #self.d = None
102 #self.scan_window = None
103 #self.min_tags = None
104 #self.peaksize = None
105 self.build()
107 cpdef build (self):
108 """Build the model.
110 prepare self.d, self.scan_window, self.plus_line,
111 self.minus_line and self.shifted_line to use.
113 cdef:
114 dict paired_peaks
115 long num_paired_peakpos, num_paired_peakpos_remained, num_paired_peakpos_picked
116 bytes c #chromosome
119 self.peaksize = 2*self.bw
120 self.min_tags = int(round(float(self.treatment.total) * self.lmfold * self.peaksize / self.gz / 2)) # mininum unique hits on single strand
121 self.max_tags = int(round(float(self.treatment.total) * self.umfold * self.peaksize / self.gz /2)) # maximum unique hits on single strand
122 #print self.min_tags, self.max_tags
123 #print self.min_tags
124 #print self.max_tags
125 # use treatment data to build model
126 self.info("#2 looking for paired plus/minus strand peaks...")
127 paired_peakpos = self.__paired_peaks ()
128 # select up to 1000 pairs of peaks to build model
129 num_paired_peakpos = 0
130 num_paired_peakpos_remained = self.max_pairnum
131 num_paired_peakpos_picked = 0
132 # select only num_paired_peakpos_remained pairs.
133 for c in list(paired_peakpos.keys()):
134 num_paired_peakpos +=len(paired_peakpos[c])
135 # TL: Now I want to use everything
137 num_paired_peakpos_picked = num_paired_peakpos
139 self.info("#2 number of paired peaks: %d" % (num_paired_peakpos))
140 if num_paired_peakpos < 100:
141 self.error("Too few paired peaks (%d) so I can not build the model! Broader your MFOLD range parameter may erase this error. If it still can't build the model, we suggest to use --nomodel and --extsize 147 or other fixed number instead." % (num_paired_peakpos))
142 self.error("Process for pairing-model is terminated!")
143 raise NotEnoughPairsException("No enough pairs to build model")
144 elif num_paired_peakpos < self.max_pairnum:
145 self.warn("Fewer paired peaks (%d) than %d! Model may not be build well! Lower your MFOLD parameter may erase this warning. Now I will use %d pairs to build model!" % (num_paired_peakpos,self.max_pairnum,num_paired_peakpos_picked))
146 self.debug("Use %d pairs to build the model." % (num_paired_peakpos_picked))
147 self.__paired_peak_model(paired_peakpos)
149 def __str__ (self):
150 """For debug...
153 return """
154 Summary of Peak Model:
155 Baseline: %d
156 Upperline: %d
157 Fragment size: %d
158 Scan window size: %d
159 """ % (self.min_tags,self.max_tags,self.d,self.scan_window)
161 cdef __paired_peak_model (self, paired_peakpos,):
162 """Use paired peak positions and treatment tag positions to build the model.
164 Modify self.(d, model_shift size and scan_window size. and extra, plus_line, minus_line and shifted_line for plotting).
166 cdef:
167 int window_size, i
168 list chroms
169 object paired_peakpos_chrom
170 np.ndarray[np.int32_t, ndim=1] tags_plus, tags_minus, plus_start, plus_end, minus_start, minus_end, plus_line, minus_line
171 np.ndarray plus_data, minus_data, xcorr, ycorr, i_l_max
173 window_size = 1+2*self.peaksize+self.tag_expansion_size
174 self.plus_line = np.zeros(window_size, dtype="int32") # for plus strand pileup
175 self.minus_line = np.zeros(window_size, dtype="int32")# for minus strand pileup
176 plus_start = np.zeros(window_size, dtype="int32") # for fast pileup
177 plus_end = np.zeros(window_size, dtype="int32") # for fast pileup
178 minus_start = np.zeros(window_size, dtype="int32") # for fast pileup
179 minus_end = np.zeros(window_size, dtype="int32") # for fast pileup
180 #self.plus_line = [0]*window_size
181 #self.minus_line = [0]*window_size
182 self.info("start model_add_line...")
183 chroms = list(paired_peakpos.keys())
185 for i in range(len(chroms)):
186 paired_peakpos_chrom = paired_peakpos[chroms[i]]
187 (tags_plus, tags_minus) = self.treatment.get_locations_by_chr(chroms[i])
188 # every paired peak has plus line and minus line
189 # add plus_line
190 #self.plus_line = self.__model_add_line (paired_peakpos_chrom, tags_plus, self.plus_line) #, plus_strand=1)
191 self.__model_add_line (paired_peakpos_chrom, tags_plus, plus_start, plus_end) #, plus_strand=1)
192 self.__model_add_line (paired_peakpos_chrom, tags_minus, minus_start, minus_end) #, plus_strand=0)
193 # add minus_line
194 #self.minus_line = self.__model_add_line (paired_peakpos_chrom, tags_minus, self.minus_line) #, plus_strand=0)
196 self.__count ( plus_start, plus_end, self.plus_line )
197 self.__count ( minus_start, minus_end, self.minus_line )
199 self.info("start X-correlation...")
200 # Now I use cross-correlation to find the best d
201 #plus_line = np.asarray(self.plus_line,dtype="int32")
202 #minus_line = np.asarray(self.minus_line,dtype="int32")
203 plus_line = self.plus_line
204 minus_line = self.minus_line
206 # normalize first
207 minus_data = (minus_line - minus_line.mean())/(minus_line.std()*len(minus_line))
208 plus_data = (plus_line - plus_line.mean())/(plus_line.std()*len(plus_line))
209 #print "plus:",len(plus_data)
210 #print "minus:",len(minus_data)
212 # cross-correlation
213 ycorr = np.correlate(minus_data,plus_data,mode="full")[window_size-self.peaksize:window_size+self.peaksize]
214 xcorr = np.linspace(len(ycorr)//2*-1, len(ycorr)//2, num=len(ycorr))
216 # smooth correlation values to get rid of local maximums from small fluctuations.
217 ycorr = smooth(ycorr, window="flat") # window size is by default 11.
219 # all local maximums could be alternative ds.
220 i_l_max = np.r_[False, ycorr[1:] > ycorr[:-1]] & np.r_[ycorr[:-1] > ycorr[1:], False]
221 i_l_max = np.where(i_l_max)[0]
222 i_l_max = i_l_max[ xcorr[i_l_max] > self.d_min ]
223 i_l_max = i_l_max[ np.argsort(ycorr[i_l_max])[::-1]]
224 # filter(lambda i: xcorr[i]>self.d_min, i_l_max )
225 # i_l_max = sorted(i_l_max,
226 # key=ycorr.__getitem__,
227 # reverse=True)
228 self.alternative_d = sorted([int(x) for x in xcorr[i_l_max]])
229 assert len(self.alternative_d) > 0, "No proper d can be found! Tweak --mfold?"
231 self.d = xcorr[i_l_max[0]]
232 # i_l_max = filter(lambda: ycorr)
233 # tmp_cor_alternative_d = ycorr[ i_l_max ]
234 # tmp_alternative_d = xcorr[ i_l_max ]
235 # cor_alternative_d = tmp_cor_alternative_d [ tmp_alternative_d > 0 ]
236 # self.alternative_d = map( int, tmp_alternative_d[ tmp_alternative_d > 0 ] )
238 # best cross-correlation point
240 # d_cand = xcorr[ np.where( ycorr == sorted( cor_alternative_d [::-1] ) ) ]
241 # print (cor_alternative_d)
242 # print (d_cand)
243 # d_cand = xcorr[ np.where( ycorr== max( cor_alternative_d ) )[0] ]
245 #### make sure fragment size is not zero
247 # self.d = [ x for x in d_cand if x > self.d_min ] [0]
248 #self.d = xcorr[np.where(ycorr==max(ycorr))[0][0]] #+self.tag_expansion_size
250 # get rid of the last local maximum if it's at the right end of curve.
253 self.ycorr = ycorr
254 self.xcorr = xcorr
256 #shift_size = self.d/2
258 self.scan_window = max(self.d,self.tag_expansion_size)*2
259 # a shifted model
260 #self.shifted_line = [0]*window_size
262 self.info("end of X-cor")
264 return True
266 cdef __model_add_line (self, object pos1, np.ndarray[np.int32_t, ndim=1] pos2, np.ndarray[np.int32_t, ndim=1] start, np.ndarray[np.int32_t, ndim=1] end): #, int plus_strand=1):
267 """Project each pos in pos2 which is included in
268 [pos1-self.peaksize,pos1+self.peaksize] to the line.
270 pos1: paired centers -- array.array
271 pos2: tags of certain strand -- a numpy.array object
272 line: numpy array object where we pileup tags
275 cdef:
276 int i1, i2, i2_prev, i1_max, i2_max, last_p2, psize_adjusted1, psize_adjusted2, p1, p2, max_index, s, e
278 i1 = 0 # index for pos1
279 i2 = 0 # index for pos2
280 i2_prev = 0 # index for pos2 in previous pos1
281 # [pos1-self.peaksize,pos1+self.peaksize]
282 # region
283 i1_max = len(pos1)
284 i2_max = pos2.shape[0]
285 last_p2 = -1
286 flag_find_overlap = False
288 max_index = start.shape[0] - 1
290 psize_adjusted1 = self.peaksize + self.tag_expansion_size // 2 # half window
292 while i1<i1_max and i2<i2_max:
293 p1 = pos1[i1]
294 #if plus_strand:
295 # p2 = pos2[i2]
296 #else:
297 # p2 = pos2[i2] - self.tag_expansion_size
299 p2 = pos2[i2] #- self.tag_expansion_size/2
301 if p1-psize_adjusted1 > p2: # move pos2
302 i2 += 1
303 elif p1+psize_adjusted1 < p2: # move pos1
304 i1 += 1
305 i2 = i2_prev # search minus peaks from previous index
306 flag_find_overlap = False
307 else: # overlap!
308 if not flag_find_overlap:
309 flag_find_overlap = True
310 i2_prev = i2 # only the first index is recorded
311 # project
312 #for i in range(p2-p1+self.peaksize,p2-p1+self.peaksize+self.tag_expansion_size):
313 s = max(int(p2-self.tag_expansion_size/2-p1+psize_adjusted1), 0)
314 start[s] += 1
315 e = min(int(p2+self.tag_expansion_size/2-p1+psize_adjusted1), max_index)
316 end[e] -= 1
317 #line[s:e] += 1
318 #for i in range(s,e):
319 # #if i>=0 and i<length_l:
320 # line[i]+=1
321 i2+=1
322 return
324 cdef __count ( self, np.ndarray[np.int32_t, ndim=1] start, np.ndarray[np.int32_t, ndim=1] end, np.ndarray[np.int32_t, ndim=1] line ):
327 cdef:
328 int i
329 long pileup
330 pileup = 0
331 for i in range(line.shape[0]):
332 pileup += start[i] + end[i]
333 line[i] = pileup
334 return
337 cdef __paired_peaks (self):
338 """Call paired peaks from fwtrackI object.
340 Return paired peaks center positions.
342 cdef:
343 int i
344 list chrs
345 bytes chrom
346 dict paired_peaks_pos
347 np.ndarray[np.int32_t, ndim=1] plus_tags, minus_tags
349 chrs = list(self.treatment.get_chr_names())
350 chrs.sort()
351 paired_peaks_pos = {}
352 for i in range( len(chrs) ):
353 chrom = chrs[ i ]
354 self.debug("Chromosome: %s" % (chrom) )
355 [ plus_tags, minus_tags ] = self.treatment.get_locations_by_chr( chrom )
356 plus_peaksinfo = self.__naive_find_peaks ( plus_tags, 1 )
357 self.debug("Number of unique tags on + strand: %d" % ( plus_tags.shape[0] ) )
358 self.debug("Number of peaks in + strand: %d" % ( len(plus_peaksinfo) ) )
359 minus_peaksinfo = self.__naive_find_peaks ( minus_tags, 0 )
360 self.debug("Number of unique tags on - strand: %d" % ( minus_tags.shape[0] ) )
361 self.debug("Number of peaks in - strand: %d" % ( len( minus_peaksinfo ) ) )
362 if not plus_peaksinfo or not minus_peaksinfo:
363 self.debug("Chrom %s is discarded!" % (chrom) )
364 continue
365 else:
366 paired_peaks_pos[chrom] = self.__find_pair_center (plus_peaksinfo, minus_peaksinfo)
367 self.debug("Number of paired peaks: %d" %(len(paired_peaks_pos[chrom])))
368 return paired_peaks_pos
370 cdef __find_pair_center (self, list pluspeaks, list minuspeaks):
371 cdef:
372 long ip = 0 # index for plus peaks
373 long im = 0 # index for minus peaks
374 long im_prev = 0 # index for minus peaks in previous plus peak
375 array.array pair_centers
376 long ip_max
377 long im_max
378 bool flag_find_overlap
379 long pp, pn
380 long mp, mn
382 pair_centers = array.array(BYTE4,[])
383 ip_max = len(pluspeaks)
384 im_max = len(minuspeaks)
385 flag_find_overlap = False
386 while ip<ip_max and im<im_max:
387 (pp,pn) = pluspeaks[ip] # for (peakposition, tagnumber in peak)
388 (mp,mn) = minuspeaks[im]
389 if pp-self.peaksize > mp: # move minus
390 im += 1
391 elif pp+self.peaksize < mp: # move plus
392 ip += 1
393 im = im_prev # search minus peaks from previous index
394 flag_find_overlap = False
395 else: # overlap!
396 if not flag_find_overlap:
397 flag_find_overlap = True
398 im_prev = im # only the first index is recorded
399 if float(pn)/mn < 2 and float(pn)/mn > 0.5: # number tags in plus and minus peak region are comparable...
400 if pp < mp:
401 pair_centers.append((pp+mp)//2)
402 #self.debug ( "distance: %d, minus: %d, plus: %d" % (mp-pp,mp,pp))
403 im += 1
404 return pair_centers
406 cdef __naive_find_peaks (self, np.ndarray[np.int32_t, ndim=1] taglist, int plus_strand=1 ):
407 """Naively call peaks based on tags counting.
409 if plus_strand == 0, call peak on minus strand.
411 Return peak positions and the tag number in peak region by a tuple list [(pos,num)].
413 cdef:
414 long i
415 int pos
416 list peak_info
417 int32_t * taglist_ptr
418 list current_tag_list
420 taglist_ptr = <int32_t *> taglist.data
422 peak_info = [] # store peak pos in every peak region and
423 # unique tag number in every peak region
424 if taglist.shape[0] < 2:
425 return peak_info
426 pos = taglist[0]
428 current_tag_list = [ pos ]
430 for i in range( 1, taglist.shape[0] ):
431 pos = taglist_ptr[0]
432 taglist_ptr += 1
434 if ( pos - current_tag_list[0] + 1 ) > self.peaksize: # call peak in current_tag_list when the region is long enough
435 # a peak will be called if tag number is ge min tags.
436 if len(current_tag_list) >= self.min_tags and len(current_tag_list) <= self.max_tags:
437 peak_info.append( ( self.__naive_peak_pos(current_tag_list,plus_strand), len(current_tag_list) ) )
438 #current_tag_list = array(BYTE4, []) # reset current_tag_list
439 current_tag_list = []
441 current_tag_list.append( pos ) # add pos while 1. no
442 # need to call peak;
443 # 2. current_tag_list is []
445 return peak_info
447 cdef __naive_peak_pos (self, pos_list, int plus_strand ):
448 """Naively calculate the position of peak.
450 plus_strand: 1, plus; 0, minus
452 return the highest peak summit position.
454 #if plus_strand:
455 # tpos = pos_list + self.tag_expansion_size/2
456 #else:
457 # tpos = pos_list - self.tag_expansion_size/2
458 cdef:
459 int peak_length, start, pos, i, pp, top_p_num, s, e, pileup, j
460 np.ndarray[np.int32_t, ndim=1] horizon_line
461 list ss, es
462 int l_ss, l_es, i_s, i_e
464 peak_length = pos_list[-1]+1-pos_list[0]+self.tag_expansion_size
465 #if plus_strand:
466 # start = pos_list[0]
467 #else:
468 # start = pos_list[0] - self.tag_expansion_size
470 start = pos_list[0] - self.tag_expansion_size//2 # leftmost position of project line
471 ss = []
472 es = []
474 #horizon_line = np.zeros(peak_length, dtype="int32") # the line for tags to be projected
476 horizon_line = np.zeros( peak_length, dtype="int32") # the line for tags to be projected
477 #horizon_line = array('i',[0]*peak_length)
478 #for pos in pos_list:
479 for i in range(len(pos_list)):
480 pos = pos_list[i]
481 #if plus_strand:
482 ss.append( max(pos-start-self.tag_expansion_size//2,0) )
483 es.append( min(pos-start+self.tag_expansion_size//2,peak_length) )
485 ss.sort()
486 es.sort()
488 pileup = 0
490 ls = len( ss )
491 le = len( es )
493 i_s = 0
494 i_e = 0
496 pre_p = min( ss[ 0 ], es[ 0 ] )
497 if pre_p != 0:
498 for i in range( pre_p ):
499 horizon_line[ i ] = 0
501 while i_s < ls and i_e < le:
502 if ss[ i_s ] < es[ i_e ]:
503 p = ss[ i_s ]
504 if p != pre_p:
505 for i in range( pre_p, p ):
506 horizon_line[ i ] = pileup
507 pre_p = p
508 pileup += 1
509 i_s += 1
510 elif ss[ i_s ] > es[ i_e ]:
511 p = es[ i_e ]
512 if p != pre_p:
513 for i in range( pre_p, p):
514 horizon_line[ i ] = pileup
515 pre_p = p
516 pileup -= 1
517 i_e += 1
518 else:
519 i_s += 1
520 i_e += 1
521 if ( i_e < ls ):
522 for j in range( i_e, ls ):
523 p = es[ i_e ]
524 if p != pre_p:
525 for i in range( pre_p, p ):
526 horizon_line[ i ] = pileup
527 pre_p = p
528 pileup -= 1
530 # # top indices
531 # top_indices = np.where(horizon_line == horizon_line.max())[0]
532 # #print top_indices+start
533 # print top_indices[ int(top_indices.shape[0]/2) ] + start
534 # return top_indices[ int(top_indices.shape[0]/2) ] + start
537 top_pos = [] # to record the top positions. Maybe > 1
538 top_p_num = 0 # the maximum number of projected points
539 for pp in range(peak_length): # find the peak posistion as the highest point
540 if horizon_line[pp] > top_p_num:
541 top_p_num = horizon_line[pp]
542 top_pos = [pp]
543 elif horizon_line[pp] == top_p_num:
544 top_pos.append(pp)
546 #print top_pos[int(len(top_pos)/2)]+start
547 return (top_pos[len(top_pos)//2]+start)
549 cdef __naive_peak_pos2 (self, pos_list, int plus_strand ):
550 """Naively calculate the position of peak.
552 plus_strand: 1, plus; 0, minus
554 return the highest peak summit position.
556 cdef int peak_length, start, pos, i, pp, top_p_num
558 peak_length = pos_list[-1]+1-pos_list[0]+self.tag_expansion_size
559 if plus_strand:
560 start = pos_list[0]
561 else:
562 start = pos_list[0] - self.tag_expansion_size
563 horizon_line = np.zeros(peak_length, dtype="int32") # the line for tags to be projected
564 for i in range(len(pos_list)):
565 pos = pos_list[i]
566 if plus_strand:
567 for pp in range(int(pos-start),int(pos-start+self.tag_expansion_size)): # projected point
568 horizon_line[pp] += 1
569 else:
570 for pp in range(int(pos-start-self.tag_expansion_size),int(pos-start)): # projected point
571 horizon_line[pp] += 1
573 # top indices
574 #print pos_list
575 #print horizon_line
576 top_indices = np.where(horizon_line == horizon_line.max())[0]
577 #print top_indices+start
578 return top_indices[ top_indices.shape[0]//2 ] + start
580 # smooth function from SciPy cookbook: http://www.scipy.org/Cookbook/SignalSmooth
581 cpdef smooth(x, int window_len=11, str window='hanning'):
582 """smooth the data using a window with requested size.
584 This method is based on the convolution of a scaled window with the signal.
585 The signal is prepared by introducing reflected copies of the signal
586 (with the window size) in both ends so that transient parts are minimized
587 in the beginning and end part of the output signal.
589 input:
590 x: the input signal
591 window_len: the dimension of the smoothing window; should be an odd integer
592 window: the type of window from 'flat', 'hanning', 'hamming', 'bartlett', 'blackman'
593 flat window will produce a moving average smoothing.
595 output:
596 the smoothed signal
598 example:
600 t=linspace(-2,2,0.1)
601 x=sin(t)+randn(len(t))*0.1
602 y=smooth(x)
604 see also:
606 numpy.hanning, numpy.hamming, numpy.bartlett, numpy.blackman, numpy.convolve
607 scipy.signal.lfilter
609 TODO: the window parameter could be the window itself if an array instead of a string
610 NOTE: length(output) != length(input), to correct this: return y[(window_len/2-1):-(window_len/2)] instead of just y.
613 if x.ndim != 1:
614 raise ValueError, "smooth only accepts 1 dimension arrays."
616 if x.size < window_len:
617 raise ValueError, "Input vector needs to be bigger than window size."
620 if window_len<3:
621 return x
624 if not window in ['flat', 'hanning', 'hamming', 'bartlett', 'blackman']:
625 raise ValueError, "Window is on of 'flat', 'hanning', 'hamming', 'bartlett', 'blackman'"
628 s=np.r_[x[window_len-1:0:-1],x,x[-1:-window_len:-1]]
629 #print(len(s))
630 if window == 'flat': #moving average
631 w=np.ones(window_len,'d')
632 else:
633 w=eval('np.'+window+'(window_len)')
635 y=np.convolve(w/w.sum(),s,mode='valid')
636 return y[(window_len//2):-(window_len//2)]