2 * Copyright (c) 2012 Neratec Solutions AG
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17 #include <linux/slab.h>
18 #include <linux/spinlock.h>
21 #include "dfs_pattern_detector.h"
22 #include "dfs_pri_detector.h"
24 struct ath_dfs_pool_stats global_dfs_pool_stats
= {};
26 #define DFS_POOL_STAT_INC(c) (global_dfs_pool_stats.c++)
27 #define DFS_POOL_STAT_DEC(c) (global_dfs_pool_stats.c--)
30 * struct pulse_elem - elements in pulse queue
31 * @ts: time stamp in usecs
34 struct list_head head
;
39 * pde_get_multiple() - get number of multiples considering a given tolerance
40 * @return factor if abs(val - factor*fraction) <= tolerance, 0 otherwise
42 static u32
pde_get_multiple(u32 val
, u32 fraction
, u32 tolerance
)
51 delta
= (val
< fraction
) ? (fraction
- val
) : (val
- fraction
);
53 if (delta
<= tolerance
)
54 /* val and fraction are within tolerance */
57 factor
= val
/ fraction
;
58 remainder
= val
% fraction
;
59 if (remainder
> tolerance
) {
61 if ((fraction
- remainder
) <= tolerance
)
62 /* remainder is within tolerance */
71 * DOC: Singleton Pulse and Sequence Pools
73 * Instances of pri_sequence and pulse_elem are kept in singleton pools to
74 * reduce the number of dynamic allocations. They are shared between all
75 * instances and grow up to the peak number of simultaneously used objects.
77 * Memory is freed after all references to the pools are released.
79 static u32 singleton_pool_references
;
80 static LIST_HEAD(pulse_pool
);
81 static LIST_HEAD(pseq_pool
);
82 static DEFINE_SPINLOCK(pool_lock
);
84 static void pool_register_ref(void)
86 spin_lock_bh(&pool_lock
);
87 singleton_pool_references
++;
88 DFS_POOL_STAT_INC(pool_reference
);
89 spin_unlock_bh(&pool_lock
);
92 static void pool_deregister_ref(void)
94 spin_lock_bh(&pool_lock
);
95 singleton_pool_references
--;
96 DFS_POOL_STAT_DEC(pool_reference
);
97 if (singleton_pool_references
== 0) {
98 /* free singleton pools with no references left */
99 struct pri_sequence
*ps
, *ps0
;
100 struct pulse_elem
*p
, *p0
;
102 list_for_each_entry_safe(p
, p0
, &pulse_pool
, head
) {
104 DFS_POOL_STAT_DEC(pulse_allocated
);
107 list_for_each_entry_safe(ps
, ps0
, &pseq_pool
, head
) {
109 DFS_POOL_STAT_DEC(pseq_allocated
);
113 spin_unlock_bh(&pool_lock
);
116 static void pool_put_pulse_elem(struct pulse_elem
*pe
)
118 spin_lock_bh(&pool_lock
);
119 list_add(&pe
->head
, &pulse_pool
);
120 DFS_POOL_STAT_DEC(pulse_used
);
121 spin_unlock_bh(&pool_lock
);
124 static void pool_put_pseq_elem(struct pri_sequence
*pse
)
126 spin_lock_bh(&pool_lock
);
127 list_add(&pse
->head
, &pseq_pool
);
128 DFS_POOL_STAT_DEC(pseq_used
);
129 spin_unlock_bh(&pool_lock
);
132 static struct pri_sequence
*pool_get_pseq_elem(void)
134 struct pri_sequence
*pse
= NULL
;
135 spin_lock_bh(&pool_lock
);
136 if (!list_empty(&pseq_pool
)) {
137 pse
= list_first_entry(&pseq_pool
, struct pri_sequence
, head
);
138 list_del(&pse
->head
);
139 DFS_POOL_STAT_INC(pseq_used
);
141 spin_unlock_bh(&pool_lock
);
145 static struct pulse_elem
*pool_get_pulse_elem(void)
147 struct pulse_elem
*pe
= NULL
;
148 spin_lock_bh(&pool_lock
);
149 if (!list_empty(&pulse_pool
)) {
150 pe
= list_first_entry(&pulse_pool
, struct pulse_elem
, head
);
152 DFS_POOL_STAT_INC(pulse_used
);
154 spin_unlock_bh(&pool_lock
);
158 static struct pulse_elem
*pulse_queue_get_tail(struct pri_detector
*pde
)
160 struct list_head
*l
= &pde
->pulses
;
163 return list_entry(l
->prev
, struct pulse_elem
, head
);
166 static bool pulse_queue_dequeue(struct pri_detector
*pde
)
168 struct pulse_elem
*p
= pulse_queue_get_tail(pde
);
170 list_del_init(&p
->head
);
172 /* give it back to pool */
173 pool_put_pulse_elem(p
);
175 return (pde
->count
> 0);
178 /* remove pulses older than window */
179 static void pulse_queue_check_window(struct pri_detector
*pde
)
182 struct pulse_elem
*p
;
184 /* there is no delta time with less than 2 pulses */
188 if (pde
->last_ts
<= pde
->window_size
)
191 min_valid_ts
= pde
->last_ts
- pde
->window_size
;
192 while ((p
= pulse_queue_get_tail(pde
)) != NULL
) {
193 if (p
->ts
>= min_valid_ts
)
195 pulse_queue_dequeue(pde
);
199 static bool pulse_queue_enqueue(struct pri_detector
*pde
, u64 ts
)
201 struct pulse_elem
*p
= pool_get_pulse_elem();
203 p
= kmalloc(sizeof(*p
), GFP_ATOMIC
);
205 DFS_POOL_STAT_INC(pulse_alloc_error
);
208 DFS_POOL_STAT_INC(pulse_allocated
);
209 DFS_POOL_STAT_INC(pulse_used
);
211 INIT_LIST_HEAD(&p
->head
);
213 list_add(&p
->head
, &pde
->pulses
);
216 pulse_queue_check_window(pde
);
217 if (pde
->count
>= pde
->max_count
)
218 pulse_queue_dequeue(pde
);
222 static bool pseq_handler_create_sequences(struct pri_detector
*pde
,
223 u64 ts
, u32 min_count
)
225 struct pulse_elem
*p
;
226 list_for_each_entry(p
, &pde
->pulses
, head
) {
227 struct pri_sequence ps
, *new_ps
;
228 struct pulse_elem
*p2
;
231 u32 delta_ts
= ts
- p
->ts
;
233 if (delta_ts
< pde
->rs
->pri_min
)
234 /* ignore too small pri */
237 if (delta_ts
> pde
->rs
->pri_max
)
238 /* stop on too large pri (sorted list) */
241 /* build a new sequence with new potential pri */
247 ps
.dur
= ps
.pri
* (pde
->rs
->ppb
- 1)
248 + 2 * pde
->rs
->max_pri_tolerance
;
252 min_valid_ts
= ts
- ps
.dur
;
253 /* check which past pulses are candidates for new sequence */
254 list_for_each_entry_continue(p2
, &pde
->pulses
, head
) {
256 if (p2
->ts
< min_valid_ts
)
257 /* stop on crossing window border */
259 /* check if pulse match (multi)PRI */
260 factor
= pde_get_multiple(ps
.last_ts
- p2
->ts
, ps
.pri
,
261 pde
->rs
->max_pri_tolerance
);
264 ps
.first_ts
= p2
->ts
;
266 * on match, add the intermediate falses
269 ps
.count_falses
+= tmp_false_count
;
272 /* this is a potential false one */
276 if (ps
.count
< min_count
)
277 /* did not reach minimum count, drop sequence */
280 /* this is a valid one, add it */
281 ps
.deadline_ts
= ps
.first_ts
+ ps
.dur
;
282 new_ps
= pool_get_pseq_elem();
283 if (new_ps
== NULL
) {
284 new_ps
= kmalloc(sizeof(*new_ps
), GFP_ATOMIC
);
285 if (new_ps
== NULL
) {
286 DFS_POOL_STAT_INC(pseq_alloc_error
);
289 DFS_POOL_STAT_INC(pseq_allocated
);
290 DFS_POOL_STAT_INC(pseq_used
);
292 memcpy(new_ps
, &ps
, sizeof(ps
));
293 INIT_LIST_HEAD(&new_ps
->head
);
294 list_add(&new_ps
->head
, &pde
->sequences
);
299 /* check new ts and add to all matching existing sequences */
301 pseq_handler_add_to_existing_seqs(struct pri_detector
*pde
, u64 ts
)
304 struct pri_sequence
*ps
, *ps2
;
305 list_for_each_entry_safe(ps
, ps2
, &pde
->sequences
, head
) {
309 /* first ensure that sequence is within window */
310 if (ts
> ps
->deadline_ts
) {
311 list_del_init(&ps
->head
);
312 pool_put_pseq_elem(ps
);
316 delta_ts
= ts
- ps
->last_ts
;
317 factor
= pde_get_multiple(delta_ts
, ps
->pri
,
318 pde
->rs
->max_pri_tolerance
);
323 if (max_count
< ps
->count
)
324 max_count
= ps
->count
;
332 static struct pri_sequence
*
333 pseq_handler_check_detection(struct pri_detector
*pde
)
335 struct pri_sequence
*ps
;
337 if (list_empty(&pde
->sequences
))
340 list_for_each_entry(ps
, &pde
->sequences
, head
) {
342 * we assume to have enough matching confidence if we
343 * 1) have enough pulses
344 * 2) have more matching than false pulses
346 if ((ps
->count
>= pde
->rs
->ppb_thresh
) &&
347 (ps
->count
* pde
->rs
->num_pri
>= ps
->count_falses
))
354 /* free pulse queue and sequences list and give objects back to pools */
355 static void pri_detector_reset(struct pri_detector
*pde
, u64 ts
)
357 struct pri_sequence
*ps
, *ps0
;
358 struct pulse_elem
*p
, *p0
;
359 list_for_each_entry_safe(ps
, ps0
, &pde
->sequences
, head
) {
360 list_del_init(&ps
->head
);
361 pool_put_pseq_elem(ps
);
363 list_for_each_entry_safe(p
, p0
, &pde
->pulses
, head
) {
364 list_del_init(&p
->head
);
365 pool_put_pulse_elem(p
);
371 static void pri_detector_exit(struct pri_detector
*de
)
373 pri_detector_reset(de
, 0);
374 pool_deregister_ref();
378 static struct pri_sequence
*pri_detector_add_pulse(struct pri_detector
*de
,
379 struct pulse_event
*event
)
382 struct pri_sequence
*ps
;
384 const struct radar_detector_specs
*rs
= de
->rs
;
386 /* ignore pulses not within width range */
387 if ((rs
->width_min
> event
->width
) || (rs
->width_max
< event
->width
))
390 if ((ts
- de
->last_ts
) < rs
->max_pri_tolerance
)
391 /* if delta to last pulse is too short, don't use this pulse */
395 max_updated_seq
= pseq_handler_add_to_existing_seqs(de
, ts
);
397 if (!pseq_handler_create_sequences(de
, ts
, max_updated_seq
)) {
398 pri_detector_reset(de
, ts
);
402 ps
= pseq_handler_check_detection(de
);
405 pulse_queue_enqueue(de
, ts
);
410 struct pri_detector
*pri_detector_init(const struct radar_detector_specs
*rs
)
412 struct pri_detector
*de
;
414 de
= kzalloc(sizeof(*de
), GFP_ATOMIC
);
417 de
->exit
= pri_detector_exit
;
418 de
->add_pulse
= pri_detector_add_pulse
;
419 de
->reset
= pri_detector_reset
;
421 INIT_LIST_HEAD(&de
->sequences
);
422 INIT_LIST_HEAD(&de
->pulses
);
423 de
->window_size
= rs
->pri_max
* rs
->ppb
* rs
->num_pri
;
424 de
->max_count
= rs
->ppb
* 2;