ARM: mm: Recreate kernel mappings in early_paging_init()
[linux/fpc-iii.git] / drivers / net / wireless / ath / ath9k / dfs_pri_detector.c
blob5ba4b6fe37c0aa630692c5291858fea0fcae6b35
1 /*
2 * Copyright (c) 2012 Neratec Solutions AG
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17 #include <linux/slab.h>
18 #include <linux/spinlock.h>
20 #include "ath9k.h"
21 #include "dfs_pattern_detector.h"
22 #include "dfs_pri_detector.h"
23 #include "dfs_debug.h"
25 /**
26 * struct pulse_elem - elements in pulse queue
27 * @ts: time stamp in usecs
29 struct pulse_elem {
30 struct list_head head;
31 u64 ts;
34 /**
35 * pde_get_multiple() - get number of multiples considering a given tolerance
36 * @return factor if abs(val - factor*fraction) <= tolerance, 0 otherwise
38 static u32 pde_get_multiple(u32 val, u32 fraction, u32 tolerance)
40 u32 remainder;
41 u32 factor;
42 u32 delta;
44 if (fraction == 0)
45 return 0;
47 delta = (val < fraction) ? (fraction - val) : (val - fraction);
49 if (delta <= tolerance)
50 /* val and fraction are within tolerance */
51 return 1;
53 factor = val / fraction;
54 remainder = val % fraction;
55 if (remainder > tolerance) {
56 /* no exact match */
57 if ((fraction - remainder) <= tolerance)
58 /* remainder is within tolerance */
59 factor++;
60 else
61 factor = 0;
63 return factor;
66 /**
67 * DOC: Singleton Pulse and Sequence Pools
69 * Instances of pri_sequence and pulse_elem are kept in singleton pools to
70 * reduce the number of dynamic allocations. They are shared between all
71 * instances and grow up to the peak number of simultaneously used objects.
73 * Memory is freed after all references to the pools are released.
75 static u32 singleton_pool_references;
76 static LIST_HEAD(pulse_pool);
77 static LIST_HEAD(pseq_pool);
78 static DEFINE_SPINLOCK(pool_lock);
80 static void pool_register_ref(void)
82 spin_lock_bh(&pool_lock);
83 singleton_pool_references++;
84 DFS_POOL_STAT_INC(pool_reference);
85 spin_unlock_bh(&pool_lock);
88 static void pool_deregister_ref(void)
90 spin_lock_bh(&pool_lock);
91 singleton_pool_references--;
92 DFS_POOL_STAT_DEC(pool_reference);
93 if (singleton_pool_references == 0) {
94 /* free singleton pools with no references left */
95 struct pri_sequence *ps, *ps0;
96 struct pulse_elem *p, *p0;
98 list_for_each_entry_safe(p, p0, &pulse_pool, head) {
99 list_del(&p->head);
100 DFS_POOL_STAT_DEC(pulse_allocated);
101 kfree(p);
103 list_for_each_entry_safe(ps, ps0, &pseq_pool, head) {
104 list_del(&ps->head);
105 DFS_POOL_STAT_DEC(pseq_allocated);
106 kfree(ps);
109 spin_unlock_bh(&pool_lock);
112 static void pool_put_pulse_elem(struct pulse_elem *pe)
114 spin_lock_bh(&pool_lock);
115 list_add(&pe->head, &pulse_pool);
116 DFS_POOL_STAT_DEC(pulse_used);
117 spin_unlock_bh(&pool_lock);
120 static void pool_put_pseq_elem(struct pri_sequence *pse)
122 spin_lock_bh(&pool_lock);
123 list_add(&pse->head, &pseq_pool);
124 DFS_POOL_STAT_DEC(pseq_used);
125 spin_unlock_bh(&pool_lock);
128 static struct pri_sequence *pool_get_pseq_elem(void)
130 struct pri_sequence *pse = NULL;
131 spin_lock_bh(&pool_lock);
132 if (!list_empty(&pseq_pool)) {
133 pse = list_first_entry(&pseq_pool, struct pri_sequence, head);
134 list_del(&pse->head);
135 DFS_POOL_STAT_INC(pseq_used);
137 spin_unlock_bh(&pool_lock);
138 return pse;
141 static struct pulse_elem *pool_get_pulse_elem(void)
143 struct pulse_elem *pe = NULL;
144 spin_lock_bh(&pool_lock);
145 if (!list_empty(&pulse_pool)) {
146 pe = list_first_entry(&pulse_pool, struct pulse_elem, head);
147 list_del(&pe->head);
148 DFS_POOL_STAT_INC(pulse_used);
150 spin_unlock_bh(&pool_lock);
151 return pe;
154 static struct pulse_elem *pulse_queue_get_tail(struct pri_detector *pde)
156 struct list_head *l = &pde->pulses;
157 if (list_empty(l))
158 return NULL;
159 return list_entry(l->prev, struct pulse_elem, head);
162 static bool pulse_queue_dequeue(struct pri_detector *pde)
164 struct pulse_elem *p = pulse_queue_get_tail(pde);
165 if (p != NULL) {
166 list_del_init(&p->head);
167 pde->count--;
168 /* give it back to pool */
169 pool_put_pulse_elem(p);
171 return (pde->count > 0);
174 /* remove pulses older than window */
175 static void pulse_queue_check_window(struct pri_detector *pde)
177 u64 min_valid_ts;
178 struct pulse_elem *p;
180 /* there is no delta time with less than 2 pulses */
181 if (pde->count < 2)
182 return;
184 if (pde->last_ts <= pde->window_size)
185 return;
187 min_valid_ts = pde->last_ts - pde->window_size;
188 while ((p = pulse_queue_get_tail(pde)) != NULL) {
189 if (p->ts >= min_valid_ts)
190 return;
191 pulse_queue_dequeue(pde);
195 static bool pulse_queue_enqueue(struct pri_detector *pde, u64 ts)
197 struct pulse_elem *p = pool_get_pulse_elem();
198 if (p == NULL) {
199 p = kmalloc(sizeof(*p), GFP_ATOMIC);
200 if (p == NULL) {
201 DFS_POOL_STAT_INC(pulse_alloc_error);
202 return false;
204 DFS_POOL_STAT_INC(pulse_allocated);
205 DFS_POOL_STAT_INC(pulse_used);
207 INIT_LIST_HEAD(&p->head);
208 p->ts = ts;
209 list_add(&p->head, &pde->pulses);
210 pde->count++;
211 pde->last_ts = ts;
212 pulse_queue_check_window(pde);
213 if (pde->count >= pde->max_count)
214 pulse_queue_dequeue(pde);
215 return true;
218 static bool pseq_handler_create_sequences(struct pri_detector *pde,
219 u64 ts, u32 min_count)
221 struct pulse_elem *p;
222 list_for_each_entry(p, &pde->pulses, head) {
223 struct pri_sequence ps, *new_ps;
224 struct pulse_elem *p2;
225 u32 tmp_false_count;
226 u64 min_valid_ts;
227 u32 delta_ts = ts - p->ts;
229 if (delta_ts < pde->rs->pri_min)
230 /* ignore too small pri */
231 continue;
233 if (delta_ts > pde->rs->pri_max)
234 /* stop on too large pri (sorted list) */
235 break;
237 /* build a new sequence with new potential pri */
238 ps.count = 2;
239 ps.count_falses = 0;
240 ps.first_ts = p->ts;
241 ps.last_ts = ts;
242 ps.pri = ts - p->ts;
243 ps.dur = ps.pri * (pde->rs->ppb - 1)
244 + 2 * pde->rs->max_pri_tolerance;
246 p2 = p;
247 tmp_false_count = 0;
248 min_valid_ts = ts - ps.dur;
249 /* check which past pulses are candidates for new sequence */
250 list_for_each_entry_continue(p2, &pde->pulses, head) {
251 u32 factor;
252 if (p2->ts < min_valid_ts)
253 /* stop on crossing window border */
254 break;
255 /* check if pulse match (multi)PRI */
256 factor = pde_get_multiple(ps.last_ts - p2->ts, ps.pri,
257 pde->rs->max_pri_tolerance);
258 if (factor > 0) {
259 ps.count++;
260 ps.first_ts = p2->ts;
262 * on match, add the intermediate falses
263 * and reset counter
265 ps.count_falses += tmp_false_count;
266 tmp_false_count = 0;
267 } else {
268 /* this is a potential false one */
269 tmp_false_count++;
272 if (ps.count < min_count)
273 /* did not reach minimum count, drop sequence */
274 continue;
276 /* this is a valid one, add it */
277 ps.deadline_ts = ps.first_ts + ps.dur;
278 new_ps = pool_get_pseq_elem();
279 if (new_ps == NULL) {
280 new_ps = kmalloc(sizeof(*new_ps), GFP_ATOMIC);
281 if (new_ps == NULL) {
282 DFS_POOL_STAT_INC(pseq_alloc_error);
283 return false;
285 DFS_POOL_STAT_INC(pseq_allocated);
286 DFS_POOL_STAT_INC(pseq_used);
288 memcpy(new_ps, &ps, sizeof(ps));
289 INIT_LIST_HEAD(&new_ps->head);
290 list_add(&new_ps->head, &pde->sequences);
292 return true;
295 /* check new ts and add to all matching existing sequences */
296 static u32
297 pseq_handler_add_to_existing_seqs(struct pri_detector *pde, u64 ts)
299 u32 max_count = 0;
300 struct pri_sequence *ps, *ps2;
301 list_for_each_entry_safe(ps, ps2, &pde->sequences, head) {
302 u32 delta_ts;
303 u32 factor;
305 /* first ensure that sequence is within window */
306 if (ts > ps->deadline_ts) {
307 list_del_init(&ps->head);
308 pool_put_pseq_elem(ps);
309 continue;
312 delta_ts = ts - ps->last_ts;
313 factor = pde_get_multiple(delta_ts, ps->pri,
314 pde->rs->max_pri_tolerance);
315 if (factor > 0) {
316 ps->last_ts = ts;
317 ps->count++;
319 if (max_count < ps->count)
320 max_count = ps->count;
321 } else {
322 ps->count_falses++;
325 return max_count;
328 static struct pri_sequence *
329 pseq_handler_check_detection(struct pri_detector *pde)
331 struct pri_sequence *ps;
333 if (list_empty(&pde->sequences))
334 return NULL;
336 list_for_each_entry(ps, &pde->sequences, head) {
338 * we assume to have enough matching confidence if we
339 * 1) have enough pulses
340 * 2) have more matching than false pulses
342 if ((ps->count >= pde->rs->ppb_thresh) &&
343 (ps->count * pde->rs->num_pri >= ps->count_falses))
344 return ps;
346 return NULL;
350 /* free pulse queue and sequences list and give objects back to pools */
351 static void pri_detector_reset(struct pri_detector *pde, u64 ts)
353 struct pri_sequence *ps, *ps0;
354 struct pulse_elem *p, *p0;
355 list_for_each_entry_safe(ps, ps0, &pde->sequences, head) {
356 list_del_init(&ps->head);
357 pool_put_pseq_elem(ps);
359 list_for_each_entry_safe(p, p0, &pde->pulses, head) {
360 list_del_init(&p->head);
361 pool_put_pulse_elem(p);
363 pde->count = 0;
364 pde->last_ts = ts;
367 static void pri_detector_exit(struct pri_detector *de)
369 pri_detector_reset(de, 0);
370 pool_deregister_ref();
371 kfree(de);
374 static struct pri_sequence *pri_detector_add_pulse(struct pri_detector *de,
375 struct pulse_event *event)
377 u32 max_updated_seq;
378 struct pri_sequence *ps;
379 u64 ts = event->ts;
380 const struct radar_detector_specs *rs = de->rs;
382 /* ignore pulses not within width range */
383 if ((rs->width_min > event->width) || (rs->width_max < event->width))
384 return NULL;
386 if ((ts - de->last_ts) < rs->max_pri_tolerance)
387 /* if delta to last pulse is too short, don't use this pulse */
388 return NULL;
389 de->last_ts = ts;
391 max_updated_seq = pseq_handler_add_to_existing_seqs(de, ts);
393 if (!pseq_handler_create_sequences(de, ts, max_updated_seq)) {
394 pri_detector_reset(de, ts);
395 return false;
398 ps = pseq_handler_check_detection(de);
400 if (ps == NULL)
401 pulse_queue_enqueue(de, ts);
403 return ps;
406 struct pri_detector *pri_detector_init(const struct radar_detector_specs *rs)
408 struct pri_detector *de;
410 de = kzalloc(sizeof(*de), GFP_ATOMIC);
411 if (de == NULL)
412 return NULL;
413 de->exit = pri_detector_exit;
414 de->add_pulse = pri_detector_add_pulse;
415 de->reset = pri_detector_reset;
417 INIT_LIST_HEAD(&de->sequences);
418 INIT_LIST_HEAD(&de->pulses);
419 de->window_size = rs->pri_max * rs->ppb * rs->num_pri;
420 de->max_count = rs->ppb * 2;
421 de->rs = rs;
423 pool_register_ref();
424 return de;