2 * Copyright (c) 2012 Neratec Solutions AG
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17 #include <linux/slab.h>
18 #include <linux/spinlock.h>
21 #include "dfs_pattern_detector.h"
22 #include "dfs_pri_detector.h"
24 struct ath_dfs_pool_stats global_dfs_pool_stats
= {};
26 #define DFS_POOL_STAT_INC(c) (global_dfs_pool_stats.c++)
27 #define DFS_POOL_STAT_DEC(c) (global_dfs_pool_stats.c--)
28 #define GET_PRI_TO_USE(MIN, MAX, RUNTIME) \
29 (MIN + PRI_TOLERANCE == MAX - PRI_TOLERANCE ? \
30 MIN + PRI_TOLERANCE : RUNTIME)
33 * struct pulse_elem - elements in pulse queue
34 * @ts: time stamp in usecs
37 struct list_head head
;
42 * pde_get_multiple() - get number of multiples considering a given tolerance
43 * @return factor if abs(val - factor*fraction) <= tolerance, 0 otherwise
45 static u32
pde_get_multiple(u32 val
, u32 fraction
, u32 tolerance
)
54 delta
= (val
< fraction
) ? (fraction
- val
) : (val
- fraction
);
56 if (delta
<= tolerance
)
57 /* val and fraction are within tolerance */
60 factor
= val
/ fraction
;
61 remainder
= val
% fraction
;
62 if (remainder
> tolerance
) {
64 if ((fraction
- remainder
) <= tolerance
)
65 /* remainder is within tolerance */
74 * DOC: Singleton Pulse and Sequence Pools
76 * Instances of pri_sequence and pulse_elem are kept in singleton pools to
77 * reduce the number of dynamic allocations. They are shared between all
78 * instances and grow up to the peak number of simultaneously used objects.
80 * Memory is freed after all references to the pools are released.
82 static u32 singleton_pool_references
;
83 static LIST_HEAD(pulse_pool
);
84 static LIST_HEAD(pseq_pool
);
85 static DEFINE_SPINLOCK(pool_lock
);
87 static void pool_register_ref(void)
89 spin_lock_bh(&pool_lock
);
90 singleton_pool_references
++;
91 DFS_POOL_STAT_INC(pool_reference
);
92 spin_unlock_bh(&pool_lock
);
95 static void pool_deregister_ref(void)
97 spin_lock_bh(&pool_lock
);
98 singleton_pool_references
--;
99 DFS_POOL_STAT_DEC(pool_reference
);
100 if (singleton_pool_references
== 0) {
101 /* free singleton pools with no references left */
102 struct pri_sequence
*ps
, *ps0
;
103 struct pulse_elem
*p
, *p0
;
105 list_for_each_entry_safe(p
, p0
, &pulse_pool
, head
) {
107 DFS_POOL_STAT_DEC(pulse_allocated
);
110 list_for_each_entry_safe(ps
, ps0
, &pseq_pool
, head
) {
112 DFS_POOL_STAT_DEC(pseq_allocated
);
116 spin_unlock_bh(&pool_lock
);
119 static void pool_put_pulse_elem(struct pulse_elem
*pe
)
121 spin_lock_bh(&pool_lock
);
122 list_add(&pe
->head
, &pulse_pool
);
123 DFS_POOL_STAT_DEC(pulse_used
);
124 spin_unlock_bh(&pool_lock
);
127 static void pool_put_pseq_elem(struct pri_sequence
*pse
)
129 spin_lock_bh(&pool_lock
);
130 list_add(&pse
->head
, &pseq_pool
);
131 DFS_POOL_STAT_DEC(pseq_used
);
132 spin_unlock_bh(&pool_lock
);
135 static struct pri_sequence
*pool_get_pseq_elem(void)
137 struct pri_sequence
*pse
= NULL
;
138 spin_lock_bh(&pool_lock
);
139 if (!list_empty(&pseq_pool
)) {
140 pse
= list_first_entry(&pseq_pool
, struct pri_sequence
, head
);
141 list_del(&pse
->head
);
142 DFS_POOL_STAT_INC(pseq_used
);
144 spin_unlock_bh(&pool_lock
);
148 static struct pulse_elem
*pool_get_pulse_elem(void)
150 struct pulse_elem
*pe
= NULL
;
151 spin_lock_bh(&pool_lock
);
152 if (!list_empty(&pulse_pool
)) {
153 pe
= list_first_entry(&pulse_pool
, struct pulse_elem
, head
);
155 DFS_POOL_STAT_INC(pulse_used
);
157 spin_unlock_bh(&pool_lock
);
161 static struct pulse_elem
*pulse_queue_get_tail(struct pri_detector
*pde
)
163 struct list_head
*l
= &pde
->pulses
;
166 return list_entry(l
->prev
, struct pulse_elem
, head
);
169 static bool pulse_queue_dequeue(struct pri_detector
*pde
)
171 struct pulse_elem
*p
= pulse_queue_get_tail(pde
);
173 list_del_init(&p
->head
);
175 /* give it back to pool */
176 pool_put_pulse_elem(p
);
178 return (pde
->count
> 0);
181 /* remove pulses older than window */
182 static void pulse_queue_check_window(struct pri_detector
*pde
)
185 struct pulse_elem
*p
;
187 /* there is no delta time with less than 2 pulses */
191 if (pde
->last_ts
<= pde
->window_size
)
194 min_valid_ts
= pde
->last_ts
- pde
->window_size
;
195 while ((p
= pulse_queue_get_tail(pde
)) != NULL
) {
196 if (p
->ts
>= min_valid_ts
)
198 pulse_queue_dequeue(pde
);
202 static bool pulse_queue_enqueue(struct pri_detector
*pde
, u64 ts
)
204 struct pulse_elem
*p
= pool_get_pulse_elem();
206 p
= kmalloc(sizeof(*p
), GFP_ATOMIC
);
208 DFS_POOL_STAT_INC(pulse_alloc_error
);
211 DFS_POOL_STAT_INC(pulse_allocated
);
212 DFS_POOL_STAT_INC(pulse_used
);
214 INIT_LIST_HEAD(&p
->head
);
216 list_add(&p
->head
, &pde
->pulses
);
219 pulse_queue_check_window(pde
);
220 if (pde
->count
>= pde
->max_count
)
221 pulse_queue_dequeue(pde
);
225 static bool pseq_handler_create_sequences(struct pri_detector
*pde
,
226 u64 ts
, u32 min_count
)
228 struct pulse_elem
*p
;
229 list_for_each_entry(p
, &pde
->pulses
, head
) {
230 struct pri_sequence ps
, *new_ps
;
231 struct pulse_elem
*p2
;
234 u32 delta_ts
= ts
- p
->ts
;
236 if (delta_ts
< pde
->rs
->pri_min
)
237 /* ignore too small pri */
240 if (delta_ts
> pde
->rs
->pri_max
)
241 /* stop on too large pri (sorted list) */
244 /* build a new sequence with new potential pri */
249 ps
.pri
= GET_PRI_TO_USE(pde
->rs
->pri_min
,
250 pde
->rs
->pri_max
, ts
- p
->ts
);
251 ps
.dur
= ps
.pri
* (pde
->rs
->ppb
- 1)
252 + 2 * pde
->rs
->max_pri_tolerance
;
256 min_valid_ts
= ts
- ps
.dur
;
257 /* check which past pulses are candidates for new sequence */
258 list_for_each_entry_continue(p2
, &pde
->pulses
, head
) {
260 if (p2
->ts
< min_valid_ts
)
261 /* stop on crossing window border */
263 /* check if pulse match (multi)PRI */
264 factor
= pde_get_multiple(ps
.last_ts
- p2
->ts
, ps
.pri
,
265 pde
->rs
->max_pri_tolerance
);
268 ps
.first_ts
= p2
->ts
;
270 * on match, add the intermediate falses
273 ps
.count_falses
+= tmp_false_count
;
276 /* this is a potential false one */
280 if (ps
.count
<= min_count
)
281 /* did not reach minimum count, drop sequence */
284 /* this is a valid one, add it */
285 ps
.deadline_ts
= ps
.first_ts
+ ps
.dur
;
286 new_ps
= pool_get_pseq_elem();
287 if (new_ps
== NULL
) {
288 new_ps
= kmalloc(sizeof(*new_ps
), GFP_ATOMIC
);
289 if (new_ps
== NULL
) {
290 DFS_POOL_STAT_INC(pseq_alloc_error
);
293 DFS_POOL_STAT_INC(pseq_allocated
);
294 DFS_POOL_STAT_INC(pseq_used
);
296 memcpy(new_ps
, &ps
, sizeof(ps
));
297 INIT_LIST_HEAD(&new_ps
->head
);
298 list_add(&new_ps
->head
, &pde
->sequences
);
303 /* check new ts and add to all matching existing sequences */
305 pseq_handler_add_to_existing_seqs(struct pri_detector
*pde
, u64 ts
)
308 struct pri_sequence
*ps
, *ps2
;
309 list_for_each_entry_safe(ps
, ps2
, &pde
->sequences
, head
) {
313 /* first ensure that sequence is within window */
314 if (ts
> ps
->deadline_ts
) {
315 list_del_init(&ps
->head
);
316 pool_put_pseq_elem(ps
);
320 delta_ts
= ts
- ps
->last_ts
;
321 factor
= pde_get_multiple(delta_ts
, ps
->pri
,
322 pde
->rs
->max_pri_tolerance
);
327 if (max_count
< ps
->count
)
328 max_count
= ps
->count
;
336 static struct pri_sequence
*
337 pseq_handler_check_detection(struct pri_detector
*pde
)
339 struct pri_sequence
*ps
;
341 if (list_empty(&pde
->sequences
))
344 list_for_each_entry(ps
, &pde
->sequences
, head
) {
346 * we assume to have enough matching confidence if we
347 * 1) have enough pulses
348 * 2) have more matching than false pulses
350 if ((ps
->count
>= pde
->rs
->ppb_thresh
) &&
351 (ps
->count
* pde
->rs
->num_pri
>= ps
->count_falses
))
358 /* free pulse queue and sequences list and give objects back to pools */
359 static void pri_detector_reset(struct pri_detector
*pde
, u64 ts
)
361 struct pri_sequence
*ps
, *ps0
;
362 struct pulse_elem
*p
, *p0
;
363 list_for_each_entry_safe(ps
, ps0
, &pde
->sequences
, head
) {
364 list_del_init(&ps
->head
);
365 pool_put_pseq_elem(ps
);
367 list_for_each_entry_safe(p
, p0
, &pde
->pulses
, head
) {
368 list_del_init(&p
->head
);
369 pool_put_pulse_elem(p
);
375 static void pri_detector_exit(struct pri_detector
*de
)
377 pri_detector_reset(de
, 0);
378 pool_deregister_ref();
382 static struct pri_sequence
*pri_detector_add_pulse(struct pri_detector
*de
,
383 struct pulse_event
*event
)
386 struct pri_sequence
*ps
;
388 const struct radar_detector_specs
*rs
= de
->rs
;
390 /* ignore pulses not within width range */
391 if ((rs
->width_min
> event
->width
) || (rs
->width_max
< event
->width
))
394 if ((ts
- de
->last_ts
) < rs
->max_pri_tolerance
)
395 /* if delta to last pulse is too short, don't use this pulse */
397 /* radar detector spec needs chirp, but not detected */
398 if (rs
->chirp
&& rs
->chirp
!= event
->chirp
)
403 max_updated_seq
= pseq_handler_add_to_existing_seqs(de
, ts
);
405 if (!pseq_handler_create_sequences(de
, ts
, max_updated_seq
)) {
406 pri_detector_reset(de
, ts
);
410 ps
= pseq_handler_check_detection(de
);
413 pulse_queue_enqueue(de
, ts
);
418 struct pri_detector
*pri_detector_init(const struct radar_detector_specs
*rs
)
420 struct pri_detector
*de
;
422 de
= kzalloc(sizeof(*de
), GFP_ATOMIC
);
425 de
->exit
= pri_detector_exit
;
426 de
->add_pulse
= pri_detector_add_pulse
;
427 de
->reset
= pri_detector_reset
;
429 INIT_LIST_HEAD(&de
->sequences
);
430 INIT_LIST_HEAD(&de
->pulses
);
431 de
->window_size
= rs
->pri_max
* rs
->ppb
* rs
->num_pri
;
432 de
->max_count
= rs
->ppb
* 2;