2 * Copyright (c) 2012 Neratec Solutions AG
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17 #include <linux/slab.h>
18 #include <linux/spinlock.h>
21 #include "dfs_pattern_detector.h"
22 #include "dfs_pri_detector.h"
23 #include "dfs_debug.h"
26 * struct pulse_elem - elements in pulse queue
27 * @ts: time stamp in usecs
30 struct list_head head
;
35 * pde_get_multiple() - get number of multiples considering a given tolerance
36 * @return factor if abs(val - factor*fraction) <= tolerance, 0 otherwise
38 static u32
pde_get_multiple(u32 val
, u32 fraction
, u32 tolerance
)
47 delta
= (val
< fraction
) ? (fraction
- val
) : (val
- fraction
);
49 if (delta
<= tolerance
)
50 /* val and fraction are within tolerance */
53 factor
= val
/ fraction
;
54 remainder
= val
% fraction
;
55 if (remainder
> tolerance
) {
57 if ((fraction
- remainder
) <= tolerance
)
58 /* remainder is within tolerance */
67 * DOC: Singleton Pulse and Sequence Pools
69 * Instances of pri_sequence and pulse_elem are kept in singleton pools to
70 * reduce the number of dynamic allocations. They are shared between all
71 * instances and grow up to the peak number of simultaneously used objects.
73 * Memory is freed after all references to the pools are released.
75 static u32 singleton_pool_references
;
76 static LIST_HEAD(pulse_pool
);
77 static LIST_HEAD(pseq_pool
);
78 static DEFINE_SPINLOCK(pool_lock
);
80 static void pool_register_ref(void)
82 spin_lock_bh(&pool_lock
);
83 singleton_pool_references
++;
84 DFS_POOL_STAT_INC(pool_reference
);
85 spin_unlock_bh(&pool_lock
);
88 static void pool_deregister_ref(void)
90 spin_lock_bh(&pool_lock
);
91 singleton_pool_references
--;
92 DFS_POOL_STAT_DEC(pool_reference
);
93 if (singleton_pool_references
== 0) {
94 /* free singleton pools with no references left */
95 struct pri_sequence
*ps
, *ps0
;
96 struct pulse_elem
*p
, *p0
;
98 list_for_each_entry_safe(p
, p0
, &pulse_pool
, head
) {
100 DFS_POOL_STAT_DEC(pulse_allocated
);
103 list_for_each_entry_safe(ps
, ps0
, &pseq_pool
, head
) {
105 DFS_POOL_STAT_DEC(pseq_allocated
);
109 spin_unlock_bh(&pool_lock
);
112 static void pool_put_pulse_elem(struct pulse_elem
*pe
)
114 spin_lock_bh(&pool_lock
);
115 list_add(&pe
->head
, &pulse_pool
);
116 DFS_POOL_STAT_DEC(pulse_used
);
117 spin_unlock_bh(&pool_lock
);
120 static void pool_put_pseq_elem(struct pri_sequence
*pse
)
122 spin_lock_bh(&pool_lock
);
123 list_add(&pse
->head
, &pseq_pool
);
124 DFS_POOL_STAT_DEC(pseq_used
);
125 spin_unlock_bh(&pool_lock
);
128 static struct pri_sequence
*pool_get_pseq_elem(void)
130 struct pri_sequence
*pse
= NULL
;
131 spin_lock_bh(&pool_lock
);
132 if (!list_empty(&pseq_pool
)) {
133 pse
= list_first_entry(&pseq_pool
, struct pri_sequence
, head
);
134 list_del(&pse
->head
);
135 DFS_POOL_STAT_INC(pseq_used
);
137 spin_unlock_bh(&pool_lock
);
141 static struct pulse_elem
*pool_get_pulse_elem(void)
143 struct pulse_elem
*pe
= NULL
;
144 spin_lock_bh(&pool_lock
);
145 if (!list_empty(&pulse_pool
)) {
146 pe
= list_first_entry(&pulse_pool
, struct pulse_elem
, head
);
148 DFS_POOL_STAT_INC(pulse_used
);
150 spin_unlock_bh(&pool_lock
);
154 static struct pulse_elem
*pulse_queue_get_tail(struct pri_detector
*pde
)
156 struct list_head
*l
= &pde
->pulses
;
159 return list_entry(l
->prev
, struct pulse_elem
, head
);
162 static bool pulse_queue_dequeue(struct pri_detector
*pde
)
164 struct pulse_elem
*p
= pulse_queue_get_tail(pde
);
166 list_del_init(&p
->head
);
168 /* give it back to pool */
169 pool_put_pulse_elem(p
);
171 return (pde
->count
> 0);
174 /* remove pulses older than window */
175 static void pulse_queue_check_window(struct pri_detector
*pde
)
178 struct pulse_elem
*p
;
180 /* there is no delta time with less than 2 pulses */
184 if (pde
->last_ts
<= pde
->window_size
)
187 min_valid_ts
= pde
->last_ts
- pde
->window_size
;
188 while ((p
= pulse_queue_get_tail(pde
)) != NULL
) {
189 if (p
->ts
>= min_valid_ts
)
191 pulse_queue_dequeue(pde
);
195 static bool pulse_queue_enqueue(struct pri_detector
*pde
, u64 ts
)
197 struct pulse_elem
*p
= pool_get_pulse_elem();
199 p
= kmalloc(sizeof(*p
), GFP_ATOMIC
);
201 DFS_POOL_STAT_INC(pulse_alloc_error
);
204 DFS_POOL_STAT_INC(pulse_allocated
);
205 DFS_POOL_STAT_INC(pulse_used
);
207 INIT_LIST_HEAD(&p
->head
);
209 list_add(&p
->head
, &pde
->pulses
);
212 pulse_queue_check_window(pde
);
213 if (pde
->count
>= pde
->max_count
)
214 pulse_queue_dequeue(pde
);
218 static bool pseq_handler_create_sequences(struct pri_detector
*pde
,
219 u64 ts
, u32 min_count
)
221 struct pulse_elem
*p
;
222 list_for_each_entry(p
, &pde
->pulses
, head
) {
223 struct pri_sequence ps
, *new_ps
;
224 struct pulse_elem
*p2
;
227 u32 delta_ts
= ts
- p
->ts
;
229 if (delta_ts
< pde
->rs
->pri_min
)
230 /* ignore too small pri */
233 if (delta_ts
> pde
->rs
->pri_max
)
234 /* stop on too large pri (sorted list) */
237 /* build a new sequence with new potential pri */
243 ps
.dur
= ps
.pri
* (pde
->rs
->ppb
- 1)
244 + 2 * pde
->rs
->max_pri_tolerance
;
248 min_valid_ts
= ts
- ps
.dur
;
249 /* check which past pulses are candidates for new sequence */
250 list_for_each_entry_continue(p2
, &pde
->pulses
, head
) {
252 if (p2
->ts
< min_valid_ts
)
253 /* stop on crossing window border */
255 /* check if pulse match (multi)PRI */
256 factor
= pde_get_multiple(ps
.last_ts
- p2
->ts
, ps
.pri
,
257 pde
->rs
->max_pri_tolerance
);
260 ps
.first_ts
= p2
->ts
;
262 * on match, add the intermediate falses
265 ps
.count_falses
+= tmp_false_count
;
268 /* this is a potential false one */
272 if (ps
.count
< min_count
)
273 /* did not reach minimum count, drop sequence */
276 /* this is a valid one, add it */
277 ps
.deadline_ts
= ps
.first_ts
+ ps
.dur
;
278 new_ps
= pool_get_pseq_elem();
279 if (new_ps
== NULL
) {
280 new_ps
= kmalloc(sizeof(*new_ps
), GFP_ATOMIC
);
281 if (new_ps
== NULL
) {
282 DFS_POOL_STAT_INC(pseq_alloc_error
);
285 DFS_POOL_STAT_INC(pseq_allocated
);
286 DFS_POOL_STAT_INC(pseq_used
);
288 memcpy(new_ps
, &ps
, sizeof(ps
));
289 INIT_LIST_HEAD(&new_ps
->head
);
290 list_add(&new_ps
->head
, &pde
->sequences
);
295 /* check new ts and add to all matching existing sequences */
297 pseq_handler_add_to_existing_seqs(struct pri_detector
*pde
, u64 ts
)
300 struct pri_sequence
*ps
, *ps2
;
301 list_for_each_entry_safe(ps
, ps2
, &pde
->sequences
, head
) {
305 /* first ensure that sequence is within window */
306 if (ts
> ps
->deadline_ts
) {
307 list_del_init(&ps
->head
);
308 pool_put_pseq_elem(ps
);
312 delta_ts
= ts
- ps
->last_ts
;
313 factor
= pde_get_multiple(delta_ts
, ps
->pri
,
314 pde
->rs
->max_pri_tolerance
);
319 if (max_count
< ps
->count
)
320 max_count
= ps
->count
;
328 static struct pri_sequence
*
329 pseq_handler_check_detection(struct pri_detector
*pde
)
331 struct pri_sequence
*ps
;
333 if (list_empty(&pde
->sequences
))
336 list_for_each_entry(ps
, &pde
->sequences
, head
) {
338 * we assume to have enough matching confidence if we
339 * 1) have enough pulses
340 * 2) have more matching than false pulses
342 if ((ps
->count
>= pde
->rs
->ppb_thresh
) &&
343 (ps
->count
* pde
->rs
->num_pri
>= ps
->count_falses
))
350 /* free pulse queue and sequences list and give objects back to pools */
351 static void pri_detector_reset(struct pri_detector
*pde
, u64 ts
)
353 struct pri_sequence
*ps
, *ps0
;
354 struct pulse_elem
*p
, *p0
;
355 list_for_each_entry_safe(ps
, ps0
, &pde
->sequences
, head
) {
356 list_del_init(&ps
->head
);
357 pool_put_pseq_elem(ps
);
359 list_for_each_entry_safe(p
, p0
, &pde
->pulses
, head
) {
360 list_del_init(&p
->head
);
361 pool_put_pulse_elem(p
);
367 static void pri_detector_exit(struct pri_detector
*de
)
369 pri_detector_reset(de
, 0);
370 pool_deregister_ref();
374 static struct pri_sequence
*pri_detector_add_pulse(struct pri_detector
*de
,
375 struct pulse_event
*event
)
378 struct pri_sequence
*ps
;
380 const struct radar_detector_specs
*rs
= de
->rs
;
382 /* ignore pulses not within width range */
383 if ((rs
->width_min
> event
->width
) || (rs
->width_max
< event
->width
))
386 if ((ts
- de
->last_ts
) < rs
->max_pri_tolerance
)
387 /* if delta to last pulse is too short, don't use this pulse */
391 max_updated_seq
= pseq_handler_add_to_existing_seqs(de
, ts
);
393 if (!pseq_handler_create_sequences(de
, ts
, max_updated_seq
)) {
394 pri_detector_reset(de
, ts
);
398 ps
= pseq_handler_check_detection(de
);
401 pulse_queue_enqueue(de
, ts
);
406 struct pri_detector
*pri_detector_init(const struct radar_detector_specs
*rs
)
408 struct pri_detector
*de
;
410 de
= kzalloc(sizeof(*de
), GFP_ATOMIC
);
413 de
->exit
= pri_detector_exit
;
414 de
->add_pulse
= pri_detector_add_pulse
;
415 de
->reset
= pri_detector_reset
;
417 INIT_LIST_HEAD(&de
->sequences
);
418 INIT_LIST_HEAD(&de
->pulses
);
419 de
->window_size
= rs
->pri_max
* rs
->ppb
* rs
->num_pri
;
420 de
->max_count
= rs
->ppb
* 2;