i2c: gpio: fault-injector: refactor incomplete transfer
[linux/fpc-iii.git] / drivers / net / wireless / mediatek / mt76 / mac80211.c
blobd62e34e7eadfe9dfce468dad87bf630c4d82c3f3
1 /*
2 * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
16 #include <linux/of.h>
17 #include "mt76.h"
19 #define CHAN2G(_idx, _freq) { \
20 .band = NL80211_BAND_2GHZ, \
21 .center_freq = (_freq), \
22 .hw_value = (_idx), \
23 .max_power = 30, \
26 #define CHAN5G(_idx, _freq) { \
27 .band = NL80211_BAND_5GHZ, \
28 .center_freq = (_freq), \
29 .hw_value = (_idx), \
30 .max_power = 30, \
33 static const struct ieee80211_channel mt76_channels_2ghz[] = {
34 CHAN2G(1, 2412),
35 CHAN2G(2, 2417),
36 CHAN2G(3, 2422),
37 CHAN2G(4, 2427),
38 CHAN2G(5, 2432),
39 CHAN2G(6, 2437),
40 CHAN2G(7, 2442),
41 CHAN2G(8, 2447),
42 CHAN2G(9, 2452),
43 CHAN2G(10, 2457),
44 CHAN2G(11, 2462),
45 CHAN2G(12, 2467),
46 CHAN2G(13, 2472),
47 CHAN2G(14, 2484),
50 static const struct ieee80211_channel mt76_channels_5ghz[] = {
51 CHAN5G(36, 5180),
52 CHAN5G(40, 5200),
53 CHAN5G(44, 5220),
54 CHAN5G(48, 5240),
56 CHAN5G(52, 5260),
57 CHAN5G(56, 5280),
58 CHAN5G(60, 5300),
59 CHAN5G(64, 5320),
61 CHAN5G(100, 5500),
62 CHAN5G(104, 5520),
63 CHAN5G(108, 5540),
64 CHAN5G(112, 5560),
65 CHAN5G(116, 5580),
66 CHAN5G(120, 5600),
67 CHAN5G(124, 5620),
68 CHAN5G(128, 5640),
69 CHAN5G(132, 5660),
70 CHAN5G(136, 5680),
71 CHAN5G(140, 5700),
73 CHAN5G(149, 5745),
74 CHAN5G(153, 5765),
75 CHAN5G(157, 5785),
76 CHAN5G(161, 5805),
77 CHAN5G(165, 5825),
80 static const struct ieee80211_tpt_blink mt76_tpt_blink[] = {
81 { .throughput = 0 * 1024, .blink_time = 334 },
82 { .throughput = 1 * 1024, .blink_time = 260 },
83 { .throughput = 5 * 1024, .blink_time = 220 },
84 { .throughput = 10 * 1024, .blink_time = 190 },
85 { .throughput = 20 * 1024, .blink_time = 170 },
86 { .throughput = 50 * 1024, .blink_time = 150 },
87 { .throughput = 70 * 1024, .blink_time = 130 },
88 { .throughput = 100 * 1024, .blink_time = 110 },
89 { .throughput = 200 * 1024, .blink_time = 80 },
90 { .throughput = 300 * 1024, .blink_time = 50 },
93 static int mt76_led_init(struct mt76_dev *dev)
95 struct device_node *np = dev->dev->of_node;
96 struct ieee80211_hw *hw = dev->hw;
97 int led_pin;
99 if (!dev->led_cdev.brightness_set && !dev->led_cdev.blink_set)
100 return 0;
102 snprintf(dev->led_name, sizeof(dev->led_name),
103 "mt76-%s", wiphy_name(hw->wiphy));
105 dev->led_cdev.name = dev->led_name;
106 dev->led_cdev.default_trigger =
107 ieee80211_create_tpt_led_trigger(hw,
108 IEEE80211_TPT_LEDTRIG_FL_RADIO,
109 mt76_tpt_blink,
110 ARRAY_SIZE(mt76_tpt_blink));
112 np = of_get_child_by_name(np, "led");
113 if (np) {
114 if (!of_property_read_u32(np, "led-sources", &led_pin))
115 dev->led_pin = led_pin;
116 dev->led_al = of_property_read_bool(np, "led-active-low");
119 return devm_led_classdev_register(dev->dev, &dev->led_cdev);
122 static void mt76_init_stream_cap(struct mt76_dev *dev,
123 struct ieee80211_supported_band *sband,
124 bool vht)
126 struct ieee80211_sta_ht_cap *ht_cap = &sband->ht_cap;
127 int i, nstream = __sw_hweight8(dev->antenna_mask);
128 struct ieee80211_sta_vht_cap *vht_cap;
129 u16 mcs_map = 0;
131 if (nstream > 1)
132 ht_cap->cap |= IEEE80211_HT_CAP_TX_STBC;
133 else
134 ht_cap->cap &= ~IEEE80211_HT_CAP_TX_STBC;
136 for (i = 0; i < IEEE80211_HT_MCS_MASK_LEN; i++)
137 ht_cap->mcs.rx_mask[i] = i < nstream ? 0xff : 0;
139 if (!vht)
140 return;
142 vht_cap = &sband->vht_cap;
143 if (nstream > 1)
144 vht_cap->cap |= IEEE80211_VHT_CAP_TXSTBC;
145 else
146 vht_cap->cap &= ~IEEE80211_VHT_CAP_TXSTBC;
148 for (i = 0; i < 8; i++) {
149 if (i < nstream)
150 mcs_map |= (IEEE80211_VHT_MCS_SUPPORT_0_9 << (i * 2));
151 else
152 mcs_map |=
153 (IEEE80211_VHT_MCS_NOT_SUPPORTED << (i * 2));
155 vht_cap->vht_mcs.rx_mcs_map = cpu_to_le16(mcs_map);
156 vht_cap->vht_mcs.tx_mcs_map = cpu_to_le16(mcs_map);
159 void mt76_set_stream_caps(struct mt76_dev *dev, bool vht)
161 if (dev->cap.has_2ghz)
162 mt76_init_stream_cap(dev, &dev->sband_2g.sband, false);
163 if (dev->cap.has_5ghz)
164 mt76_init_stream_cap(dev, &dev->sband_5g.sband, vht);
166 EXPORT_SYMBOL_GPL(mt76_set_stream_caps);
168 static int
169 mt76_init_sband(struct mt76_dev *dev, struct mt76_sband *msband,
170 const struct ieee80211_channel *chan, int n_chan,
171 struct ieee80211_rate *rates, int n_rates, bool vht)
173 struct ieee80211_supported_band *sband = &msband->sband;
174 struct ieee80211_sta_ht_cap *ht_cap;
175 struct ieee80211_sta_vht_cap *vht_cap;
176 void *chanlist;
177 int size;
179 size = n_chan * sizeof(*chan);
180 chanlist = devm_kmemdup(dev->dev, chan, size, GFP_KERNEL);
181 if (!chanlist)
182 return -ENOMEM;
184 msband->chan = devm_kcalloc(dev->dev, n_chan, sizeof(*msband->chan),
185 GFP_KERNEL);
186 if (!msband->chan)
187 return -ENOMEM;
189 sband->channels = chanlist;
190 sband->n_channels = n_chan;
191 sband->bitrates = rates;
192 sband->n_bitrates = n_rates;
193 dev->chandef.chan = &sband->channels[0];
195 ht_cap = &sband->ht_cap;
196 ht_cap->ht_supported = true;
197 ht_cap->cap |= IEEE80211_HT_CAP_SUP_WIDTH_20_40 |
198 IEEE80211_HT_CAP_GRN_FLD |
199 IEEE80211_HT_CAP_SGI_20 |
200 IEEE80211_HT_CAP_SGI_40 |
201 (1 << IEEE80211_HT_CAP_RX_STBC_SHIFT);
203 ht_cap->mcs.tx_params = IEEE80211_HT_MCS_TX_DEFINED;
204 ht_cap->ampdu_factor = IEEE80211_HT_MAX_AMPDU_64K;
205 ht_cap->ampdu_density = IEEE80211_HT_MPDU_DENSITY_4;
207 mt76_init_stream_cap(dev, sband, vht);
209 if (!vht)
210 return 0;
212 vht_cap = &sband->vht_cap;
213 vht_cap->vht_supported = true;
214 vht_cap->cap |= IEEE80211_VHT_CAP_RXLDPC |
215 IEEE80211_VHT_CAP_RXSTBC_1 |
216 IEEE80211_VHT_CAP_SHORT_GI_80 |
217 (3 << IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_SHIFT);
219 return 0;
222 static int
223 mt76_init_sband_2g(struct mt76_dev *dev, struct ieee80211_rate *rates,
224 int n_rates)
226 dev->hw->wiphy->bands[NL80211_BAND_2GHZ] = &dev->sband_2g.sband;
228 return mt76_init_sband(dev, &dev->sband_2g,
229 mt76_channels_2ghz,
230 ARRAY_SIZE(mt76_channels_2ghz),
231 rates, n_rates, false);
234 static int
235 mt76_init_sband_5g(struct mt76_dev *dev, struct ieee80211_rate *rates,
236 int n_rates, bool vht)
238 dev->hw->wiphy->bands[NL80211_BAND_5GHZ] = &dev->sband_5g.sband;
240 return mt76_init_sband(dev, &dev->sband_5g,
241 mt76_channels_5ghz,
242 ARRAY_SIZE(mt76_channels_5ghz),
243 rates, n_rates, vht);
246 static void
247 mt76_check_sband(struct mt76_dev *dev, int band)
249 struct ieee80211_supported_band *sband = dev->hw->wiphy->bands[band];
250 bool found = false;
251 int i;
253 if (!sband)
254 return;
256 for (i = 0; i < sband->n_channels; i++) {
257 if (sband->channels[i].flags & IEEE80211_CHAN_DISABLED)
258 continue;
260 found = true;
261 break;
264 if (found)
265 return;
267 sband->n_channels = 0;
268 dev->hw->wiphy->bands[band] = NULL;
271 struct mt76_dev *
272 mt76_alloc_device(unsigned int size, const struct ieee80211_ops *ops)
274 struct ieee80211_hw *hw;
275 struct mt76_dev *dev;
277 hw = ieee80211_alloc_hw(size, ops);
278 if (!hw)
279 return NULL;
281 dev = hw->priv;
282 dev->hw = hw;
283 spin_lock_init(&dev->rx_lock);
284 spin_lock_init(&dev->lock);
285 spin_lock_init(&dev->cc_lock);
286 init_waitqueue_head(&dev->tx_wait);
288 return dev;
290 EXPORT_SYMBOL_GPL(mt76_alloc_device);
292 int mt76_register_device(struct mt76_dev *dev, bool vht,
293 struct ieee80211_rate *rates, int n_rates)
295 struct ieee80211_hw *hw = dev->hw;
296 struct wiphy *wiphy = hw->wiphy;
297 int ret;
299 dev_set_drvdata(dev->dev, dev);
301 INIT_LIST_HEAD(&dev->txwi_cache);
303 SET_IEEE80211_DEV(hw, dev->dev);
304 SET_IEEE80211_PERM_ADDR(hw, dev->macaddr);
306 wiphy->interface_modes =
307 BIT(NL80211_IFTYPE_STATION) |
308 BIT(NL80211_IFTYPE_AP) |
309 #ifdef CONFIG_MAC80211_MESH
310 BIT(NL80211_IFTYPE_MESH_POINT) |
311 #endif
312 BIT(NL80211_IFTYPE_ADHOC);
314 wiphy->features |= NL80211_FEATURE_ACTIVE_MONITOR;
316 wiphy->available_antennas_tx = dev->antenna_mask;
317 wiphy->available_antennas_rx = dev->antenna_mask;
319 hw->txq_data_size = sizeof(struct mt76_txq);
320 hw->max_tx_fragments = 16;
322 ieee80211_hw_set(hw, SIGNAL_DBM);
323 ieee80211_hw_set(hw, PS_NULLFUNC_STACK);
324 ieee80211_hw_set(hw, HOST_BROADCAST_PS_BUFFERING);
325 ieee80211_hw_set(hw, AMPDU_AGGREGATION);
326 ieee80211_hw_set(hw, SUPPORTS_RC_TABLE);
327 ieee80211_hw_set(hw, SUPPORT_FAST_XMIT);
328 ieee80211_hw_set(hw, SUPPORTS_CLONED_SKBS);
329 ieee80211_hw_set(hw, SUPPORTS_AMSDU_IN_AMPDU);
330 ieee80211_hw_set(hw, TX_AMSDU);
331 ieee80211_hw_set(hw, TX_FRAG_LIST);
332 ieee80211_hw_set(hw, MFP_CAPABLE);
333 ieee80211_hw_set(hw, AP_LINK_PS);
335 wiphy->flags |= WIPHY_FLAG_IBSS_RSN;
337 if (dev->cap.has_2ghz) {
338 ret = mt76_init_sband_2g(dev, rates, n_rates);
339 if (ret)
340 return ret;
343 if (dev->cap.has_5ghz) {
344 ret = mt76_init_sband_5g(dev, rates + 4, n_rates - 4, vht);
345 if (ret)
346 return ret;
349 wiphy_read_of_freq_limits(dev->hw->wiphy);
350 mt76_check_sband(dev, NL80211_BAND_2GHZ);
351 mt76_check_sband(dev, NL80211_BAND_5GHZ);
353 ret = mt76_led_init(dev);
354 if (ret)
355 return ret;
357 return ieee80211_register_hw(hw);
359 EXPORT_SYMBOL_GPL(mt76_register_device);
361 void mt76_unregister_device(struct mt76_dev *dev)
363 struct ieee80211_hw *hw = dev->hw;
365 ieee80211_unregister_hw(hw);
366 mt76_tx_free(dev);
368 EXPORT_SYMBOL_GPL(mt76_unregister_device);
370 void mt76_rx(struct mt76_dev *dev, enum mt76_rxq_id q, struct sk_buff *skb)
372 if (!test_bit(MT76_STATE_RUNNING, &dev->state)) {
373 dev_kfree_skb(skb);
374 return;
377 __skb_queue_tail(&dev->rx_skb[q], skb);
379 EXPORT_SYMBOL_GPL(mt76_rx);
381 static bool mt76_has_tx_pending(struct mt76_dev *dev)
383 int i;
385 for (i = 0; i < ARRAY_SIZE(dev->q_tx); i++) {
386 if (dev->q_tx[i].queued)
387 return true;
390 return false;
393 void mt76_set_channel(struct mt76_dev *dev)
395 struct ieee80211_hw *hw = dev->hw;
396 struct cfg80211_chan_def *chandef = &hw->conf.chandef;
397 struct mt76_channel_state *state;
398 bool offchannel = hw->conf.flags & IEEE80211_CONF_OFFCHANNEL;
399 int timeout = HZ / 5;
401 if (offchannel)
402 set_bit(MT76_OFFCHANNEL, &dev->state);
403 else
404 clear_bit(MT76_OFFCHANNEL, &dev->state);
406 wait_event_timeout(dev->tx_wait, !mt76_has_tx_pending(dev), timeout);
408 if (dev->drv->update_survey)
409 dev->drv->update_survey(dev);
411 dev->chandef = *chandef;
413 if (!offchannel)
414 dev->main_chan = chandef->chan;
416 if (chandef->chan != dev->main_chan) {
417 state = mt76_channel_state(dev, chandef->chan);
418 memset(state, 0, sizeof(*state));
421 EXPORT_SYMBOL_GPL(mt76_set_channel);
423 int mt76_get_survey(struct ieee80211_hw *hw, int idx,
424 struct survey_info *survey)
426 struct mt76_dev *dev = hw->priv;
427 struct mt76_sband *sband;
428 struct ieee80211_channel *chan;
429 struct mt76_channel_state *state;
430 int ret = 0;
432 if (idx == 0 && dev->drv->update_survey)
433 dev->drv->update_survey(dev);
435 sband = &dev->sband_2g;
436 if (idx >= sband->sband.n_channels) {
437 idx -= sband->sband.n_channels;
438 sband = &dev->sband_5g;
441 if (idx >= sband->sband.n_channels)
442 return -ENOENT;
444 chan = &sband->sband.channels[idx];
445 state = mt76_channel_state(dev, chan);
447 memset(survey, 0, sizeof(*survey));
448 survey->channel = chan;
449 survey->filled = SURVEY_INFO_TIME | SURVEY_INFO_TIME_BUSY;
450 if (chan == dev->main_chan)
451 survey->filled |= SURVEY_INFO_IN_USE;
453 spin_lock_bh(&dev->cc_lock);
454 survey->time = div_u64(state->cc_active, 1000);
455 survey->time_busy = div_u64(state->cc_busy, 1000);
456 spin_unlock_bh(&dev->cc_lock);
458 return ret;
460 EXPORT_SYMBOL_GPL(mt76_get_survey);
462 void mt76_wcid_key_setup(struct mt76_dev *dev, struct mt76_wcid *wcid,
463 struct ieee80211_key_conf *key)
465 struct ieee80211_key_seq seq;
466 int i;
468 wcid->rx_check_pn = false;
470 if (!key)
471 return;
473 if (key->cipher == WLAN_CIPHER_SUITE_CCMP)
474 wcid->rx_check_pn = true;
476 for (i = 0; i < IEEE80211_NUM_TIDS; i++) {
477 ieee80211_get_key_rx_seq(key, i, &seq);
478 memcpy(wcid->rx_key_pn[i], seq.ccmp.pn, sizeof(seq.ccmp.pn));
481 EXPORT_SYMBOL(mt76_wcid_key_setup);
483 static struct ieee80211_sta *mt76_rx_convert(struct sk_buff *skb)
485 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
486 struct mt76_rx_status mstat;
488 mstat = *((struct mt76_rx_status *) skb->cb);
489 memset(status, 0, sizeof(*status));
491 status->flag = mstat.flag;
492 status->freq = mstat.freq;
493 status->enc_flags = mstat.enc_flags;
494 status->encoding = mstat.encoding;
495 status->bw = mstat.bw;
496 status->rate_idx = mstat.rate_idx;
497 status->nss = mstat.nss;
498 status->band = mstat.band;
499 status->signal = mstat.signal;
500 status->chains = mstat.chains;
502 BUILD_BUG_ON(sizeof(mstat) > sizeof(skb->cb));
503 BUILD_BUG_ON(sizeof(status->chain_signal) != sizeof(mstat.chain_signal));
504 memcpy(status->chain_signal, mstat.chain_signal, sizeof(mstat.chain_signal));
506 return wcid_to_sta(mstat.wcid);
509 static int
510 mt76_check_ccmp_pn(struct sk_buff *skb)
512 struct mt76_rx_status *status = (struct mt76_rx_status *) skb->cb;
513 struct mt76_wcid *wcid = status->wcid;
514 struct ieee80211_hdr *hdr;
515 int ret;
517 if (!(status->flag & RX_FLAG_DECRYPTED))
518 return 0;
520 if (!wcid || !wcid->rx_check_pn)
521 return 0;
523 if (!(status->flag & RX_FLAG_IV_STRIPPED)) {
525 * Validate the first fragment both here and in mac80211
526 * All further fragments will be validated by mac80211 only.
528 hdr = (struct ieee80211_hdr *) skb->data;
529 if (ieee80211_is_frag(hdr) &&
530 !ieee80211_is_first_frag(hdr->frame_control))
531 return 0;
534 BUILD_BUG_ON(sizeof(status->iv) != sizeof(wcid->rx_key_pn[0]));
535 ret = memcmp(status->iv, wcid->rx_key_pn[status->tid],
536 sizeof(status->iv));
537 if (ret <= 0)
538 return -EINVAL; /* replay */
540 memcpy(wcid->rx_key_pn[status->tid], status->iv, sizeof(status->iv));
542 if (status->flag & RX_FLAG_IV_STRIPPED)
543 status->flag |= RX_FLAG_PN_VALIDATED;
545 return 0;
548 static void
549 mt76_check_ps(struct mt76_dev *dev, struct sk_buff *skb)
551 struct mt76_rx_status *status = (struct mt76_rx_status *) skb->cb;
552 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
553 struct ieee80211_sta *sta;
554 struct mt76_wcid *wcid = status->wcid;
555 bool ps;
557 if (!wcid || !wcid->sta)
558 return;
560 sta = container_of((void *) wcid, struct ieee80211_sta, drv_priv);
562 if (!test_bit(MT_WCID_FLAG_CHECK_PS, &wcid->flags))
563 return;
565 if (ieee80211_is_pspoll(hdr->frame_control)) {
566 ieee80211_sta_pspoll(sta);
567 return;
570 if (ieee80211_has_morefrags(hdr->frame_control) ||
571 !(ieee80211_is_mgmt(hdr->frame_control) ||
572 ieee80211_is_data(hdr->frame_control)))
573 return;
575 ps = ieee80211_has_pm(hdr->frame_control);
577 if (ps && (ieee80211_is_data_qos(hdr->frame_control) ||
578 ieee80211_is_qos_nullfunc(hdr->frame_control)))
579 ieee80211_sta_uapsd_trigger(sta, status->tid);
581 if (!!test_bit(MT_WCID_FLAG_PS, &wcid->flags) == ps)
582 return;
584 if (ps)
585 set_bit(MT_WCID_FLAG_PS, &wcid->flags);
586 else
587 clear_bit(MT_WCID_FLAG_PS, &wcid->flags);
589 dev->drv->sta_ps(dev, sta, ps);
590 ieee80211_sta_ps_transition(sta, ps);
593 void mt76_rx_complete(struct mt76_dev *dev, struct sk_buff_head *frames,
594 int queue)
596 struct napi_struct *napi = NULL;
597 struct ieee80211_sta *sta;
598 struct sk_buff *skb;
600 if (queue >= 0)
601 napi = &dev->napi[queue];
603 spin_lock(&dev->rx_lock);
604 while ((skb = __skb_dequeue(frames)) != NULL) {
605 if (mt76_check_ccmp_pn(skb)) {
606 dev_kfree_skb(skb);
607 continue;
610 sta = mt76_rx_convert(skb);
611 ieee80211_rx_napi(dev->hw, sta, skb, napi);
613 spin_unlock(&dev->rx_lock);
616 void mt76_rx_poll_complete(struct mt76_dev *dev, enum mt76_rxq_id q)
618 struct sk_buff_head frames;
619 struct sk_buff *skb;
621 __skb_queue_head_init(&frames);
623 while ((skb = __skb_dequeue(&dev->rx_skb[q])) != NULL) {
624 mt76_check_ps(dev, skb);
625 mt76_rx_aggr_reorder(skb, &frames);
628 mt76_rx_complete(dev, &frames, q);