Linux 4.19.133
[linux/fpc-iii.git] / drivers / net / wireless / broadcom / brcm80211 / brcmfmac / pno.c
blobffa243e2e2d0fa87afcbbf5917f1538c68b19f50
1 /*
2 * Copyright (c) 2016 Broadcom
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
11 * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
13 * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
14 * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
16 #include <linux/netdevice.h>
17 #include <linux/gcd.h>
18 #include <net/cfg80211.h>
20 #include "core.h"
21 #include "debug.h"
22 #include "fwil.h"
23 #include "fwil_types.h"
24 #include "cfg80211.h"
25 #include "pno.h"
27 #define BRCMF_PNO_VERSION 2
28 #define BRCMF_PNO_REPEAT 4
29 #define BRCMF_PNO_FREQ_EXPO_MAX 3
30 #define BRCMF_PNO_IMMEDIATE_SCAN_BIT 3
31 #define BRCMF_PNO_ENABLE_BD_SCAN_BIT 5
32 #define BRCMF_PNO_ENABLE_ADAPTSCAN_BIT 6
33 #define BRCMF_PNO_REPORT_SEPARATELY_BIT 11
34 #define BRCMF_PNO_SCAN_INCOMPLETE 0
35 #define BRCMF_PNO_WPA_AUTH_ANY 0xFFFFFFFF
36 #define BRCMF_PNO_HIDDEN_BIT 2
37 #define BRCMF_PNO_SCHED_SCAN_PERIOD 30
39 #define BRCMF_PNO_MAX_BUCKETS 16
40 #define GSCAN_BATCH_NO_THR_SET 101
41 #define GSCAN_RETRY_THRESHOLD 3
43 struct brcmf_pno_info {
44 int n_reqs;
45 struct cfg80211_sched_scan_request *reqs[BRCMF_PNO_MAX_BUCKETS];
46 struct mutex req_lock;
49 #define ifp_to_pno(_ifp) ((_ifp)->drvr->config->pno)
51 static int brcmf_pno_store_request(struct brcmf_pno_info *pi,
52 struct cfg80211_sched_scan_request *req)
54 if (WARN(pi->n_reqs == BRCMF_PNO_MAX_BUCKETS,
55 "pno request storage full\n"))
56 return -ENOSPC;
58 brcmf_dbg(SCAN, "reqid=%llu\n", req->reqid);
59 mutex_lock(&pi->req_lock);
60 pi->reqs[pi->n_reqs++] = req;
61 mutex_unlock(&pi->req_lock);
62 return 0;
65 static int brcmf_pno_remove_request(struct brcmf_pno_info *pi, u64 reqid)
67 int i, err = 0;
69 mutex_lock(&pi->req_lock);
71 /* find request */
72 for (i = 0; i < pi->n_reqs; i++) {
73 if (pi->reqs[i]->reqid == reqid)
74 break;
76 /* request not found */
77 if (WARN(i == pi->n_reqs, "reqid not found\n")) {
78 err = -ENOENT;
79 goto done;
82 brcmf_dbg(SCAN, "reqid=%llu\n", reqid);
83 pi->n_reqs--;
85 /* if last we are done */
86 if (!pi->n_reqs || i == pi->n_reqs)
87 goto done;
89 /* fill the gap with remaining requests */
90 while (i <= pi->n_reqs - 1) {
91 pi->reqs[i] = pi->reqs[i + 1];
92 i++;
95 done:
96 mutex_unlock(&pi->req_lock);
97 return err;
100 static int brcmf_pno_channel_config(struct brcmf_if *ifp,
101 struct brcmf_pno_config_le *cfg)
103 cfg->reporttype = 0;
104 cfg->flags = 0;
106 return brcmf_fil_iovar_data_set(ifp, "pfn_cfg", cfg, sizeof(*cfg));
109 static int brcmf_pno_config(struct brcmf_if *ifp, u32 scan_freq,
110 u32 mscan, u32 bestn)
112 struct brcmf_pno_param_le pfn_param;
113 u16 flags;
114 u32 pfnmem;
115 s32 err;
117 memset(&pfn_param, 0, sizeof(pfn_param));
118 pfn_param.version = cpu_to_le32(BRCMF_PNO_VERSION);
120 /* set extra pno params */
121 flags = BIT(BRCMF_PNO_IMMEDIATE_SCAN_BIT) |
122 BIT(BRCMF_PNO_ENABLE_ADAPTSCAN_BIT);
123 pfn_param.repeat = BRCMF_PNO_REPEAT;
124 pfn_param.exp = BRCMF_PNO_FREQ_EXPO_MAX;
126 /* set up pno scan fr */
127 pfn_param.scan_freq = cpu_to_le32(scan_freq);
129 if (mscan) {
130 pfnmem = bestn;
132 /* set bestn in firmware */
133 err = brcmf_fil_iovar_int_set(ifp, "pfnmem", pfnmem);
134 if (err < 0) {
135 brcmf_err("failed to set pfnmem\n");
136 goto exit;
138 /* get max mscan which the firmware supports */
139 err = brcmf_fil_iovar_int_get(ifp, "pfnmem", &pfnmem);
140 if (err < 0) {
141 brcmf_err("failed to get pfnmem\n");
142 goto exit;
144 mscan = min_t(u32, mscan, pfnmem);
145 pfn_param.mscan = mscan;
146 pfn_param.bestn = bestn;
147 flags |= BIT(BRCMF_PNO_ENABLE_BD_SCAN_BIT);
148 brcmf_dbg(INFO, "mscan=%d, bestn=%d\n", mscan, bestn);
151 pfn_param.flags = cpu_to_le16(flags);
152 err = brcmf_fil_iovar_data_set(ifp, "pfn_set", &pfn_param,
153 sizeof(pfn_param));
154 if (err)
155 brcmf_err("pfn_set failed, err=%d\n", err);
157 exit:
158 return err;
161 static int brcmf_pno_set_random(struct brcmf_if *ifp, struct brcmf_pno_info *pi)
163 struct brcmf_pno_macaddr_le pfn_mac;
164 u8 *mac_addr = NULL;
165 u8 *mac_mask = NULL;
166 int err, i;
168 for (i = 0; i < pi->n_reqs; i++)
169 if (pi->reqs[i]->flags & NL80211_SCAN_FLAG_RANDOM_ADDR) {
170 mac_addr = pi->reqs[i]->mac_addr;
171 mac_mask = pi->reqs[i]->mac_addr_mask;
172 break;
175 /* no random mac requested */
176 if (!mac_addr)
177 return 0;
179 pfn_mac.version = BRCMF_PFN_MACADDR_CFG_VER;
180 pfn_mac.flags = BRCMF_PFN_MAC_OUI_ONLY | BRCMF_PFN_SET_MAC_UNASSOC;
182 memcpy(pfn_mac.mac, mac_addr, ETH_ALEN);
183 for (i = 0; i < ETH_ALEN; i++) {
184 pfn_mac.mac[i] &= mac_mask[i];
185 pfn_mac.mac[i] |= get_random_int() & ~(mac_mask[i]);
187 /* Clear multi bit */
188 pfn_mac.mac[0] &= 0xFE;
189 /* Set locally administered */
190 pfn_mac.mac[0] |= 0x02;
192 brcmf_dbg(SCAN, "enabling random mac: reqid=%llu mac=%pM\n",
193 pi->reqs[i]->reqid, pfn_mac.mac);
194 err = brcmf_fil_iovar_data_set(ifp, "pfn_macaddr", &pfn_mac,
195 sizeof(pfn_mac));
196 if (err)
197 brcmf_err("pfn_macaddr failed, err=%d\n", err);
199 return err;
202 static int brcmf_pno_add_ssid(struct brcmf_if *ifp, struct cfg80211_ssid *ssid,
203 bool active)
205 struct brcmf_pno_net_param_le pfn;
206 int err;
208 pfn.auth = cpu_to_le32(WLAN_AUTH_OPEN);
209 pfn.wpa_auth = cpu_to_le32(BRCMF_PNO_WPA_AUTH_ANY);
210 pfn.wsec = cpu_to_le32(0);
211 pfn.infra = cpu_to_le32(1);
212 pfn.flags = 0;
213 if (active)
214 pfn.flags = cpu_to_le32(1 << BRCMF_PNO_HIDDEN_BIT);
215 pfn.ssid.SSID_len = cpu_to_le32(ssid->ssid_len);
216 memcpy(pfn.ssid.SSID, ssid->ssid, ssid->ssid_len);
218 brcmf_dbg(SCAN, "adding ssid=%.32s (active=%d)\n", ssid->ssid, active);
219 err = brcmf_fil_iovar_data_set(ifp, "pfn_add", &pfn, sizeof(pfn));
220 if (err < 0)
221 brcmf_err("adding failed: err=%d\n", err);
222 return err;
225 static int brcmf_pno_add_bssid(struct brcmf_if *ifp, const u8 *bssid)
227 struct brcmf_pno_bssid_le bssid_cfg;
228 int err;
230 memcpy(bssid_cfg.bssid, bssid, ETH_ALEN);
231 bssid_cfg.flags = 0;
233 brcmf_dbg(SCAN, "adding bssid=%pM\n", bssid);
234 err = brcmf_fil_iovar_data_set(ifp, "pfn_add_bssid", &bssid_cfg,
235 sizeof(bssid_cfg));
236 if (err < 0)
237 brcmf_err("adding failed: err=%d\n", err);
238 return err;
241 static bool brcmf_is_ssid_active(struct cfg80211_ssid *ssid,
242 struct cfg80211_sched_scan_request *req)
244 int i;
246 if (!ssid || !req->ssids || !req->n_ssids)
247 return false;
249 for (i = 0; i < req->n_ssids; i++) {
250 if (ssid->ssid_len == req->ssids[i].ssid_len) {
251 if (!strncmp(ssid->ssid, req->ssids[i].ssid,
252 ssid->ssid_len))
253 return true;
256 return false;
259 static int brcmf_pno_clean(struct brcmf_if *ifp)
261 int ret;
263 /* Disable pfn */
264 ret = brcmf_fil_iovar_int_set(ifp, "pfn", 0);
265 if (ret == 0) {
266 /* clear pfn */
267 ret = brcmf_fil_iovar_data_set(ifp, "pfnclear", NULL, 0);
269 if (ret < 0)
270 brcmf_err("failed code %d\n", ret);
272 return ret;
275 static int brcmf_pno_get_bucket_channels(struct cfg80211_sched_scan_request *r,
276 struct brcmf_pno_config_le *pno_cfg)
278 u32 n_chan = le32_to_cpu(pno_cfg->channel_num);
279 u16 chan;
280 int i, err = 0;
282 for (i = 0; i < r->n_channels; i++) {
283 if (n_chan >= BRCMF_NUMCHANNELS) {
284 err = -ENOSPC;
285 goto done;
287 chan = r->channels[i]->hw_value;
288 brcmf_dbg(SCAN, "[%d] Chan : %u\n", n_chan, chan);
289 pno_cfg->channel_list[n_chan++] = cpu_to_le16(chan);
291 /* return number of channels */
292 err = n_chan;
293 done:
294 pno_cfg->channel_num = cpu_to_le32(n_chan);
295 return err;
298 static int brcmf_pno_prep_fwconfig(struct brcmf_pno_info *pi,
299 struct brcmf_pno_config_le *pno_cfg,
300 struct brcmf_gscan_bucket_config **buckets,
301 u32 *scan_freq)
303 struct cfg80211_sched_scan_request *sr;
304 struct brcmf_gscan_bucket_config *fw_buckets;
305 int i, err, chidx;
307 brcmf_dbg(SCAN, "n_reqs=%d\n", pi->n_reqs);
308 if (WARN_ON(!pi->n_reqs))
309 return -ENODATA;
312 * actual scan period is determined using gcd() for each
313 * scheduled scan period.
315 *scan_freq = pi->reqs[0]->scan_plans[0].interval;
316 for (i = 1; i < pi->n_reqs; i++) {
317 sr = pi->reqs[i];
318 *scan_freq = gcd(sr->scan_plans[0].interval, *scan_freq);
320 if (*scan_freq < BRCMF_PNO_SCHED_SCAN_MIN_PERIOD) {
321 brcmf_dbg(SCAN, "scan period too small, using minimum\n");
322 *scan_freq = BRCMF_PNO_SCHED_SCAN_MIN_PERIOD;
325 *buckets = NULL;
326 fw_buckets = kcalloc(pi->n_reqs, sizeof(*fw_buckets), GFP_KERNEL);
327 if (!fw_buckets)
328 return -ENOMEM;
330 memset(pno_cfg, 0, sizeof(*pno_cfg));
331 for (i = 0; i < pi->n_reqs; i++) {
332 sr = pi->reqs[i];
333 chidx = brcmf_pno_get_bucket_channels(sr, pno_cfg);
334 if (chidx < 0) {
335 err = chidx;
336 goto fail;
338 fw_buckets[i].bucket_end_index = chidx - 1;
339 fw_buckets[i].bucket_freq_multiple =
340 sr->scan_plans[0].interval / *scan_freq;
341 /* assure period is non-zero */
342 if (!fw_buckets[i].bucket_freq_multiple)
343 fw_buckets[i].bucket_freq_multiple = 1;
344 fw_buckets[i].flag = BRCMF_PNO_REPORT_NO_BATCH;
347 if (BRCMF_SCAN_ON()) {
348 brcmf_err("base period=%u\n", *scan_freq);
349 for (i = 0; i < pi->n_reqs; i++) {
350 brcmf_err("[%d] period %u max %u repeat %u flag %x idx %u\n",
351 i, fw_buckets[i].bucket_freq_multiple,
352 le16_to_cpu(fw_buckets[i].max_freq_multiple),
353 fw_buckets[i].repeat, fw_buckets[i].flag,
354 fw_buckets[i].bucket_end_index);
357 *buckets = fw_buckets;
358 return pi->n_reqs;
360 fail:
361 kfree(fw_buckets);
362 return err;
365 static int brcmf_pno_config_networks(struct brcmf_if *ifp,
366 struct brcmf_pno_info *pi)
368 struct cfg80211_sched_scan_request *r;
369 struct cfg80211_match_set *ms;
370 bool active;
371 int i, j, err = 0;
373 for (i = 0; i < pi->n_reqs; i++) {
374 r = pi->reqs[i];
376 for (j = 0; j < r->n_match_sets; j++) {
377 ms = &r->match_sets[j];
378 if (ms->ssid.ssid_len) {
379 active = brcmf_is_ssid_active(&ms->ssid, r);
380 err = brcmf_pno_add_ssid(ifp, &ms->ssid,
381 active);
383 if (!err && is_valid_ether_addr(ms->bssid))
384 err = brcmf_pno_add_bssid(ifp, ms->bssid);
386 if (err < 0)
387 return err;
390 return 0;
393 static int brcmf_pno_config_sched_scans(struct brcmf_if *ifp)
395 struct brcmf_pno_info *pi;
396 struct brcmf_gscan_config *gscan_cfg;
397 struct brcmf_gscan_bucket_config *buckets;
398 struct brcmf_pno_config_le pno_cfg;
399 size_t gsz;
400 u32 scan_freq;
401 int err, n_buckets;
403 pi = ifp_to_pno(ifp);
404 n_buckets = brcmf_pno_prep_fwconfig(pi, &pno_cfg, &buckets,
405 &scan_freq);
406 if (n_buckets < 0)
407 return n_buckets;
409 gsz = sizeof(*gscan_cfg) + (n_buckets - 1) * sizeof(*buckets);
410 gscan_cfg = kzalloc(gsz, GFP_KERNEL);
411 if (!gscan_cfg) {
412 err = -ENOMEM;
413 goto free_buckets;
416 /* clean up everything */
417 err = brcmf_pno_clean(ifp);
418 if (err < 0) {
419 brcmf_err("failed error=%d\n", err);
420 goto free_gscan;
423 /* configure pno */
424 err = brcmf_pno_config(ifp, scan_freq, 0, 0);
425 if (err < 0)
426 goto free_gscan;
428 err = brcmf_pno_channel_config(ifp, &pno_cfg);
429 if (err < 0)
430 goto clean;
432 gscan_cfg->version = cpu_to_le16(BRCMF_GSCAN_CFG_VERSION);
433 gscan_cfg->retry_threshold = GSCAN_RETRY_THRESHOLD;
434 gscan_cfg->buffer_threshold = GSCAN_BATCH_NO_THR_SET;
435 gscan_cfg->flags = BRCMF_GSCAN_CFG_ALL_BUCKETS_IN_1ST_SCAN;
437 gscan_cfg->count_of_channel_buckets = n_buckets;
438 memcpy(&gscan_cfg->bucket[0], buckets,
439 n_buckets * sizeof(*buckets));
441 err = brcmf_fil_iovar_data_set(ifp, "pfn_gscan_cfg", gscan_cfg, gsz);
443 if (err < 0)
444 goto clean;
446 /* configure random mac */
447 err = brcmf_pno_set_random(ifp, pi);
448 if (err < 0)
449 goto clean;
451 err = brcmf_pno_config_networks(ifp, pi);
452 if (err < 0)
453 goto clean;
455 /* Enable the PNO */
456 err = brcmf_fil_iovar_int_set(ifp, "pfn", 1);
458 clean:
459 if (err < 0)
460 brcmf_pno_clean(ifp);
461 free_gscan:
462 kfree(gscan_cfg);
463 free_buckets:
464 kfree(buckets);
465 return err;
468 int brcmf_pno_start_sched_scan(struct brcmf_if *ifp,
469 struct cfg80211_sched_scan_request *req)
471 struct brcmf_pno_info *pi;
472 int ret;
474 brcmf_dbg(TRACE, "reqid=%llu\n", req->reqid);
476 pi = ifp_to_pno(ifp);
477 ret = brcmf_pno_store_request(pi, req);
478 if (ret < 0)
479 return ret;
481 ret = brcmf_pno_config_sched_scans(ifp);
482 if (ret < 0) {
483 brcmf_pno_remove_request(pi, req->reqid);
484 if (pi->n_reqs)
485 (void)brcmf_pno_config_sched_scans(ifp);
486 return ret;
488 return 0;
491 int brcmf_pno_stop_sched_scan(struct brcmf_if *ifp, u64 reqid)
493 struct brcmf_pno_info *pi;
494 int err;
496 brcmf_dbg(TRACE, "reqid=%llu\n", reqid);
498 pi = ifp_to_pno(ifp);
499 err = brcmf_pno_remove_request(pi, reqid);
500 if (err)
501 return err;
503 brcmf_pno_clean(ifp);
505 if (pi->n_reqs)
506 (void)brcmf_pno_config_sched_scans(ifp);
508 return 0;
511 int brcmf_pno_attach(struct brcmf_cfg80211_info *cfg)
513 struct brcmf_pno_info *pi;
515 brcmf_dbg(TRACE, "enter\n");
516 pi = kzalloc(sizeof(*pi), GFP_KERNEL);
517 if (!pi)
518 return -ENOMEM;
520 cfg->pno = pi;
521 mutex_init(&pi->req_lock);
522 return 0;
525 void brcmf_pno_detach(struct brcmf_cfg80211_info *cfg)
527 struct brcmf_pno_info *pi;
529 brcmf_dbg(TRACE, "enter\n");
530 pi = cfg->pno;
531 cfg->pno = NULL;
533 WARN_ON(pi->n_reqs);
534 mutex_destroy(&pi->req_lock);
535 kfree(pi);
538 void brcmf_pno_wiphy_params(struct wiphy *wiphy, bool gscan)
540 /* scheduled scan settings */
541 wiphy->max_sched_scan_reqs = gscan ? BRCMF_PNO_MAX_BUCKETS : 1;
542 wiphy->max_sched_scan_ssids = BRCMF_PNO_MAX_PFN_COUNT;
543 wiphy->max_match_sets = BRCMF_PNO_MAX_PFN_COUNT;
544 wiphy->max_sched_scan_ie_len = BRCMF_SCAN_IE_LEN_MAX;
545 wiphy->max_sched_scan_plan_interval = BRCMF_PNO_SCHED_SCAN_MAX_PERIOD;
548 u64 brcmf_pno_find_reqid_by_bucket(struct brcmf_pno_info *pi, u32 bucket)
550 u64 reqid = 0;
552 mutex_lock(&pi->req_lock);
554 if (bucket < pi->n_reqs)
555 reqid = pi->reqs[bucket]->reqid;
557 mutex_unlock(&pi->req_lock);
558 return reqid;
561 u32 brcmf_pno_get_bucket_map(struct brcmf_pno_info *pi,
562 struct brcmf_pno_net_info_le *ni)
564 struct cfg80211_sched_scan_request *req;
565 struct cfg80211_match_set *ms;
566 u32 bucket_map = 0;
567 int i, j;
569 mutex_lock(&pi->req_lock);
570 for (i = 0; i < pi->n_reqs; i++) {
571 req = pi->reqs[i];
573 if (!req->n_match_sets)
574 continue;
575 for (j = 0; j < req->n_match_sets; j++) {
576 ms = &req->match_sets[j];
577 if (ms->ssid.ssid_len == ni->SSID_len &&
578 !memcmp(ms->ssid.ssid, ni->SSID, ni->SSID_len)) {
579 bucket_map |= BIT(i);
580 break;
582 if (is_valid_ether_addr(ms->bssid) &&
583 !memcmp(ms->bssid, ni->bssid, ETH_ALEN)) {
584 bucket_map |= BIT(i);
585 break;
589 mutex_unlock(&pi->req_lock);
590 return bucket_map;