2 * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19 #define CHAN2G(_idx, _freq) { \
20 .band = NL80211_BAND_2GHZ, \
21 .center_freq = (_freq), \
26 #define CHAN5G(_idx, _freq) { \
27 .band = NL80211_BAND_5GHZ, \
28 .center_freq = (_freq), \
33 static const struct ieee80211_channel mt76_channels_2ghz
[] = {
50 static const struct ieee80211_channel mt76_channels_5ghz
[] = {
80 static const struct ieee80211_tpt_blink mt76_tpt_blink
[] = {
81 { .throughput
= 0 * 1024, .blink_time
= 334 },
82 { .throughput
= 1 * 1024, .blink_time
= 260 },
83 { .throughput
= 5 * 1024, .blink_time
= 220 },
84 { .throughput
= 10 * 1024, .blink_time
= 190 },
85 { .throughput
= 20 * 1024, .blink_time
= 170 },
86 { .throughput
= 50 * 1024, .blink_time
= 150 },
87 { .throughput
= 70 * 1024, .blink_time
= 130 },
88 { .throughput
= 100 * 1024, .blink_time
= 110 },
89 { .throughput
= 200 * 1024, .blink_time
= 80 },
90 { .throughput
= 300 * 1024, .blink_time
= 50 },
93 static int mt76_led_init(struct mt76_dev
*dev
)
95 struct device_node
*np
= dev
->dev
->of_node
;
96 struct ieee80211_hw
*hw
= dev
->hw
;
99 if (!dev
->led_cdev
.brightness_set
&& !dev
->led_cdev
.blink_set
)
102 snprintf(dev
->led_name
, sizeof(dev
->led_name
),
103 "mt76-%s", wiphy_name(hw
->wiphy
));
105 dev
->led_cdev
.name
= dev
->led_name
;
106 dev
->led_cdev
.default_trigger
=
107 ieee80211_create_tpt_led_trigger(hw
,
108 IEEE80211_TPT_LEDTRIG_FL_RADIO
,
110 ARRAY_SIZE(mt76_tpt_blink
));
112 np
= of_get_child_by_name(np
, "led");
114 if (!of_property_read_u32(np
, "led-sources", &led_pin
))
115 dev
->led_pin
= led_pin
;
116 dev
->led_al
= of_property_read_bool(np
, "led-active-low");
119 return devm_led_classdev_register(dev
->dev
, &dev
->led_cdev
);
122 static void mt76_init_stream_cap(struct mt76_dev
*dev
,
123 struct ieee80211_supported_band
*sband
,
126 struct ieee80211_sta_ht_cap
*ht_cap
= &sband
->ht_cap
;
127 int i
, nstream
= __sw_hweight8(dev
->antenna_mask
);
128 struct ieee80211_sta_vht_cap
*vht_cap
;
132 ht_cap
->cap
|= IEEE80211_HT_CAP_TX_STBC
;
134 ht_cap
->cap
&= ~IEEE80211_HT_CAP_TX_STBC
;
136 for (i
= 0; i
< IEEE80211_HT_MCS_MASK_LEN
; i
++)
137 ht_cap
->mcs
.rx_mask
[i
] = i
< nstream
? 0xff : 0;
142 vht_cap
= &sband
->vht_cap
;
144 vht_cap
->cap
|= IEEE80211_VHT_CAP_TXSTBC
;
146 vht_cap
->cap
&= ~IEEE80211_VHT_CAP_TXSTBC
;
148 for (i
= 0; i
< 8; i
++) {
150 mcs_map
|= (IEEE80211_VHT_MCS_SUPPORT_0_9
<< (i
* 2));
153 (IEEE80211_VHT_MCS_NOT_SUPPORTED
<< (i
* 2));
155 vht_cap
->vht_mcs
.rx_mcs_map
= cpu_to_le16(mcs_map
);
156 vht_cap
->vht_mcs
.tx_mcs_map
= cpu_to_le16(mcs_map
);
159 void mt76_set_stream_caps(struct mt76_dev
*dev
, bool vht
)
161 if (dev
->cap
.has_2ghz
)
162 mt76_init_stream_cap(dev
, &dev
->sband_2g
.sband
, false);
163 if (dev
->cap
.has_5ghz
)
164 mt76_init_stream_cap(dev
, &dev
->sband_5g
.sband
, vht
);
166 EXPORT_SYMBOL_GPL(mt76_set_stream_caps
);
169 mt76_init_sband(struct mt76_dev
*dev
, struct mt76_sband
*msband
,
170 const struct ieee80211_channel
*chan
, int n_chan
,
171 struct ieee80211_rate
*rates
, int n_rates
, bool vht
)
173 struct ieee80211_supported_band
*sband
= &msband
->sband
;
174 struct ieee80211_sta_ht_cap
*ht_cap
;
175 struct ieee80211_sta_vht_cap
*vht_cap
;
179 size
= n_chan
* sizeof(*chan
);
180 chanlist
= devm_kmemdup(dev
->dev
, chan
, size
, GFP_KERNEL
);
184 msband
->chan
= devm_kcalloc(dev
->dev
, n_chan
, sizeof(*msband
->chan
),
189 sband
->channels
= chanlist
;
190 sband
->n_channels
= n_chan
;
191 sband
->bitrates
= rates
;
192 sband
->n_bitrates
= n_rates
;
193 dev
->chandef
.chan
= &sband
->channels
[0];
195 ht_cap
= &sband
->ht_cap
;
196 ht_cap
->ht_supported
= true;
197 ht_cap
->cap
|= IEEE80211_HT_CAP_SUP_WIDTH_20_40
|
198 IEEE80211_HT_CAP_GRN_FLD
|
199 IEEE80211_HT_CAP_SGI_20
|
200 IEEE80211_HT_CAP_SGI_40
|
201 (1 << IEEE80211_HT_CAP_RX_STBC_SHIFT
);
203 ht_cap
->mcs
.tx_params
= IEEE80211_HT_MCS_TX_DEFINED
;
204 ht_cap
->ampdu_factor
= IEEE80211_HT_MAX_AMPDU_64K
;
205 ht_cap
->ampdu_density
= IEEE80211_HT_MPDU_DENSITY_4
;
207 mt76_init_stream_cap(dev
, sband
, vht
);
212 vht_cap
= &sband
->vht_cap
;
213 vht_cap
->vht_supported
= true;
214 vht_cap
->cap
|= IEEE80211_VHT_CAP_RXLDPC
|
215 IEEE80211_VHT_CAP_RXSTBC_1
|
216 IEEE80211_VHT_CAP_SHORT_GI_80
|
217 (3 << IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_SHIFT
);
223 mt76_init_sband_2g(struct mt76_dev
*dev
, struct ieee80211_rate
*rates
,
226 dev
->hw
->wiphy
->bands
[NL80211_BAND_2GHZ
] = &dev
->sband_2g
.sband
;
228 return mt76_init_sband(dev
, &dev
->sband_2g
,
230 ARRAY_SIZE(mt76_channels_2ghz
),
231 rates
, n_rates
, false);
235 mt76_init_sband_5g(struct mt76_dev
*dev
, struct ieee80211_rate
*rates
,
236 int n_rates
, bool vht
)
238 dev
->hw
->wiphy
->bands
[NL80211_BAND_5GHZ
] = &dev
->sband_5g
.sband
;
240 return mt76_init_sband(dev
, &dev
->sband_5g
,
242 ARRAY_SIZE(mt76_channels_5ghz
),
243 rates
, n_rates
, vht
);
247 mt76_check_sband(struct mt76_dev
*dev
, int band
)
249 struct ieee80211_supported_band
*sband
= dev
->hw
->wiphy
->bands
[band
];
256 for (i
= 0; i
< sband
->n_channels
; i
++) {
257 if (sband
->channels
[i
].flags
& IEEE80211_CHAN_DISABLED
)
267 sband
->n_channels
= 0;
268 dev
->hw
->wiphy
->bands
[band
] = NULL
;
272 mt76_alloc_device(unsigned int size
, const struct ieee80211_ops
*ops
)
274 struct ieee80211_hw
*hw
;
275 struct mt76_dev
*dev
;
277 hw
= ieee80211_alloc_hw(size
, ops
);
283 spin_lock_init(&dev
->rx_lock
);
284 spin_lock_init(&dev
->lock
);
285 spin_lock_init(&dev
->cc_lock
);
286 init_waitqueue_head(&dev
->tx_wait
);
290 EXPORT_SYMBOL_GPL(mt76_alloc_device
);
292 int mt76_register_device(struct mt76_dev
*dev
, bool vht
,
293 struct ieee80211_rate
*rates
, int n_rates
)
295 struct ieee80211_hw
*hw
= dev
->hw
;
296 struct wiphy
*wiphy
= hw
->wiphy
;
299 dev_set_drvdata(dev
->dev
, dev
);
301 INIT_LIST_HEAD(&dev
->txwi_cache
);
303 SET_IEEE80211_DEV(hw
, dev
->dev
);
304 SET_IEEE80211_PERM_ADDR(hw
, dev
->macaddr
);
306 wiphy
->interface_modes
=
307 BIT(NL80211_IFTYPE_STATION
) |
308 BIT(NL80211_IFTYPE_AP
) |
309 #ifdef CONFIG_MAC80211_MESH
310 BIT(NL80211_IFTYPE_MESH_POINT
) |
312 BIT(NL80211_IFTYPE_ADHOC
);
314 wiphy
->features
|= NL80211_FEATURE_ACTIVE_MONITOR
;
316 wiphy
->available_antennas_tx
= dev
->antenna_mask
;
317 wiphy
->available_antennas_rx
= dev
->antenna_mask
;
319 hw
->txq_data_size
= sizeof(struct mt76_txq
);
320 hw
->max_tx_fragments
= 16;
322 ieee80211_hw_set(hw
, SIGNAL_DBM
);
323 ieee80211_hw_set(hw
, PS_NULLFUNC_STACK
);
324 ieee80211_hw_set(hw
, HOST_BROADCAST_PS_BUFFERING
);
325 ieee80211_hw_set(hw
, AMPDU_AGGREGATION
);
326 ieee80211_hw_set(hw
, SUPPORTS_RC_TABLE
);
327 ieee80211_hw_set(hw
, SUPPORT_FAST_XMIT
);
328 ieee80211_hw_set(hw
, SUPPORTS_CLONED_SKBS
);
329 ieee80211_hw_set(hw
, SUPPORTS_AMSDU_IN_AMPDU
);
330 ieee80211_hw_set(hw
, TX_AMSDU
);
331 ieee80211_hw_set(hw
, TX_FRAG_LIST
);
332 ieee80211_hw_set(hw
, MFP_CAPABLE
);
333 ieee80211_hw_set(hw
, AP_LINK_PS
);
335 wiphy
->flags
|= WIPHY_FLAG_IBSS_RSN
;
337 if (dev
->cap
.has_2ghz
) {
338 ret
= mt76_init_sband_2g(dev
, rates
, n_rates
);
343 if (dev
->cap
.has_5ghz
) {
344 ret
= mt76_init_sband_5g(dev
, rates
+ 4, n_rates
- 4, vht
);
349 wiphy_read_of_freq_limits(dev
->hw
->wiphy
);
350 mt76_check_sband(dev
, NL80211_BAND_2GHZ
);
351 mt76_check_sband(dev
, NL80211_BAND_5GHZ
);
353 ret
= mt76_led_init(dev
);
357 return ieee80211_register_hw(hw
);
359 EXPORT_SYMBOL_GPL(mt76_register_device
);
361 void mt76_unregister_device(struct mt76_dev
*dev
)
363 struct ieee80211_hw
*hw
= dev
->hw
;
365 ieee80211_unregister_hw(hw
);
368 EXPORT_SYMBOL_GPL(mt76_unregister_device
);
370 void mt76_rx(struct mt76_dev
*dev
, enum mt76_rxq_id q
, struct sk_buff
*skb
)
372 if (!test_bit(MT76_STATE_RUNNING
, &dev
->state
)) {
377 __skb_queue_tail(&dev
->rx_skb
[q
], skb
);
379 EXPORT_SYMBOL_GPL(mt76_rx
);
381 static bool mt76_has_tx_pending(struct mt76_dev
*dev
)
385 for (i
= 0; i
< ARRAY_SIZE(dev
->q_tx
); i
++) {
386 if (dev
->q_tx
[i
].queued
)
393 void mt76_set_channel(struct mt76_dev
*dev
)
395 struct ieee80211_hw
*hw
= dev
->hw
;
396 struct cfg80211_chan_def
*chandef
= &hw
->conf
.chandef
;
397 struct mt76_channel_state
*state
;
398 bool offchannel
= hw
->conf
.flags
& IEEE80211_CONF_OFFCHANNEL
;
399 int timeout
= HZ
/ 5;
402 set_bit(MT76_OFFCHANNEL
, &dev
->state
);
404 clear_bit(MT76_OFFCHANNEL
, &dev
->state
);
406 wait_event_timeout(dev
->tx_wait
, !mt76_has_tx_pending(dev
), timeout
);
408 if (dev
->drv
->update_survey
)
409 dev
->drv
->update_survey(dev
);
411 dev
->chandef
= *chandef
;
414 dev
->main_chan
= chandef
->chan
;
416 if (chandef
->chan
!= dev
->main_chan
) {
417 state
= mt76_channel_state(dev
, chandef
->chan
);
418 memset(state
, 0, sizeof(*state
));
421 EXPORT_SYMBOL_GPL(mt76_set_channel
);
423 int mt76_get_survey(struct ieee80211_hw
*hw
, int idx
,
424 struct survey_info
*survey
)
426 struct mt76_dev
*dev
= hw
->priv
;
427 struct mt76_sband
*sband
;
428 struct ieee80211_channel
*chan
;
429 struct mt76_channel_state
*state
;
432 if (idx
== 0 && dev
->drv
->update_survey
)
433 dev
->drv
->update_survey(dev
);
435 sband
= &dev
->sband_2g
;
436 if (idx
>= sband
->sband
.n_channels
) {
437 idx
-= sband
->sband
.n_channels
;
438 sband
= &dev
->sband_5g
;
441 if (idx
>= sband
->sband
.n_channels
)
444 chan
= &sband
->sband
.channels
[idx
];
445 state
= mt76_channel_state(dev
, chan
);
447 memset(survey
, 0, sizeof(*survey
));
448 survey
->channel
= chan
;
449 survey
->filled
= SURVEY_INFO_TIME
| SURVEY_INFO_TIME_BUSY
;
450 if (chan
== dev
->main_chan
)
451 survey
->filled
|= SURVEY_INFO_IN_USE
;
453 spin_lock_bh(&dev
->cc_lock
);
454 survey
->time
= div_u64(state
->cc_active
, 1000);
455 survey
->time_busy
= div_u64(state
->cc_busy
, 1000);
456 spin_unlock_bh(&dev
->cc_lock
);
460 EXPORT_SYMBOL_GPL(mt76_get_survey
);
462 void mt76_wcid_key_setup(struct mt76_dev
*dev
, struct mt76_wcid
*wcid
,
463 struct ieee80211_key_conf
*key
)
465 struct ieee80211_key_seq seq
;
468 wcid
->rx_check_pn
= false;
473 if (key
->cipher
== WLAN_CIPHER_SUITE_CCMP
)
474 wcid
->rx_check_pn
= true;
476 for (i
= 0; i
< IEEE80211_NUM_TIDS
; i
++) {
477 ieee80211_get_key_rx_seq(key
, i
, &seq
);
478 memcpy(wcid
->rx_key_pn
[i
], seq
.ccmp
.pn
, sizeof(seq
.ccmp
.pn
));
481 EXPORT_SYMBOL(mt76_wcid_key_setup
);
483 static struct ieee80211_sta
*mt76_rx_convert(struct sk_buff
*skb
)
485 struct ieee80211_rx_status
*status
= IEEE80211_SKB_RXCB(skb
);
486 struct mt76_rx_status mstat
;
488 mstat
= *((struct mt76_rx_status
*) skb
->cb
);
489 memset(status
, 0, sizeof(*status
));
491 status
->flag
= mstat
.flag
;
492 status
->freq
= mstat
.freq
;
493 status
->enc_flags
= mstat
.enc_flags
;
494 status
->encoding
= mstat
.encoding
;
495 status
->bw
= mstat
.bw
;
496 status
->rate_idx
= mstat
.rate_idx
;
497 status
->nss
= mstat
.nss
;
498 status
->band
= mstat
.band
;
499 status
->signal
= mstat
.signal
;
500 status
->chains
= mstat
.chains
;
502 BUILD_BUG_ON(sizeof(mstat
) > sizeof(skb
->cb
));
503 BUILD_BUG_ON(sizeof(status
->chain_signal
) != sizeof(mstat
.chain_signal
));
504 memcpy(status
->chain_signal
, mstat
.chain_signal
, sizeof(mstat
.chain_signal
));
506 return wcid_to_sta(mstat
.wcid
);
510 mt76_check_ccmp_pn(struct sk_buff
*skb
)
512 struct mt76_rx_status
*status
= (struct mt76_rx_status
*) skb
->cb
;
513 struct mt76_wcid
*wcid
= status
->wcid
;
514 struct ieee80211_hdr
*hdr
;
517 if (!(status
->flag
& RX_FLAG_DECRYPTED
))
520 if (!wcid
|| !wcid
->rx_check_pn
)
523 if (!(status
->flag
& RX_FLAG_IV_STRIPPED
)) {
525 * Validate the first fragment both here and in mac80211
526 * All further fragments will be validated by mac80211 only.
528 hdr
= (struct ieee80211_hdr
*) skb
->data
;
529 if (ieee80211_is_frag(hdr
) &&
530 !ieee80211_is_first_frag(hdr
->frame_control
))
534 BUILD_BUG_ON(sizeof(status
->iv
) != sizeof(wcid
->rx_key_pn
[0]));
535 ret
= memcmp(status
->iv
, wcid
->rx_key_pn
[status
->tid
],
538 return -EINVAL
; /* replay */
540 memcpy(wcid
->rx_key_pn
[status
->tid
], status
->iv
, sizeof(status
->iv
));
542 if (status
->flag
& RX_FLAG_IV_STRIPPED
)
543 status
->flag
|= RX_FLAG_PN_VALIDATED
;
549 mt76_check_ps(struct mt76_dev
*dev
, struct sk_buff
*skb
)
551 struct mt76_rx_status
*status
= (struct mt76_rx_status
*) skb
->cb
;
552 struct ieee80211_hdr
*hdr
= (struct ieee80211_hdr
*) skb
->data
;
553 struct ieee80211_sta
*sta
;
554 struct mt76_wcid
*wcid
= status
->wcid
;
557 if (!wcid
|| !wcid
->sta
)
560 sta
= container_of((void *) wcid
, struct ieee80211_sta
, drv_priv
);
562 if (!test_bit(MT_WCID_FLAG_CHECK_PS
, &wcid
->flags
))
565 if (ieee80211_is_pspoll(hdr
->frame_control
)) {
566 ieee80211_sta_pspoll(sta
);
570 if (ieee80211_has_morefrags(hdr
->frame_control
) ||
571 !(ieee80211_is_mgmt(hdr
->frame_control
) ||
572 ieee80211_is_data(hdr
->frame_control
)))
575 ps
= ieee80211_has_pm(hdr
->frame_control
);
577 if (ps
&& (ieee80211_is_data_qos(hdr
->frame_control
) ||
578 ieee80211_is_qos_nullfunc(hdr
->frame_control
)))
579 ieee80211_sta_uapsd_trigger(sta
, status
->tid
);
581 if (!!test_bit(MT_WCID_FLAG_PS
, &wcid
->flags
) == ps
)
585 set_bit(MT_WCID_FLAG_PS
, &wcid
->flags
);
587 clear_bit(MT_WCID_FLAG_PS
, &wcid
->flags
);
589 dev
->drv
->sta_ps(dev
, sta
, ps
);
590 ieee80211_sta_ps_transition(sta
, ps
);
593 void mt76_rx_complete(struct mt76_dev
*dev
, struct sk_buff_head
*frames
,
596 struct napi_struct
*napi
= NULL
;
597 struct ieee80211_sta
*sta
;
601 napi
= &dev
->napi
[queue
];
603 spin_lock(&dev
->rx_lock
);
604 while ((skb
= __skb_dequeue(frames
)) != NULL
) {
605 if (mt76_check_ccmp_pn(skb
)) {
610 sta
= mt76_rx_convert(skb
);
611 ieee80211_rx_napi(dev
->hw
, sta
, skb
, napi
);
613 spin_unlock(&dev
->rx_lock
);
616 void mt76_rx_poll_complete(struct mt76_dev
*dev
, enum mt76_rxq_id q
)
618 struct sk_buff_head frames
;
621 __skb_queue_head_init(&frames
);
623 while ((skb
= __skb_dequeue(&dev
->rx_skb
[q
])) != NULL
) {
624 mt76_check_ps(dev
, skb
);
625 mt76_rx_aggr_reorder(skb
, &frames
);
628 mt76_rx_complete(dev
, &frames
, q
);