2 * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 #include <linux/kernel.h>
22 #include <linux/spinlock.h>
23 #include <linux/skbuff.h>
24 #include <linux/leds.h>
25 #include <linux/usb.h>
26 #include <linux/average.h>
27 #include <net/mac80211.h>
30 #define MT_TX_RING_SIZE 256
31 #define MT_MCU_RING_SIZE 32
32 #define MT_RX_BUF_SIZE 2048
37 struct mt76_reg_pair
{
48 u32 (*rr
)(struct mt76_dev
*dev
, u32 offset
);
49 void (*wr
)(struct mt76_dev
*dev
, u32 offset
, u32 val
);
50 u32 (*rmw
)(struct mt76_dev
*dev
, u32 offset
, u32 mask
, u32 val
);
51 void (*copy
)(struct mt76_dev
*dev
, u32 offset
, const void *data
,
53 int (*wr_rp
)(struct mt76_dev
*dev
, u32 base
,
54 const struct mt76_reg_pair
*rp
, int len
);
55 int (*rd_rp
)(struct mt76_dev
*dev
, u32 base
,
56 struct mt76_reg_pair
*rp
, int len
);
57 enum mt76_bus_type type
;
60 #define mt76_is_usb(dev) ((dev)->mt76.bus->type == MT76_BUS_USB)
61 #define mt76_is_mmio(dev) ((dev)->mt76.bus->type == MT76_BUS_MMIO)
64 MT_TXQ_VO
= IEEE80211_AC_VO
,
65 MT_TXQ_VI
= IEEE80211_AC_VI
,
66 MT_TXQ_BE
= IEEE80211_AC_BE
,
67 MT_TXQ_BK
= IEEE80211_AC_BK
,
81 struct mt76_queue_buf
{
94 struct mt76_queue_entry
{
100 struct mt76_txwi_cache
*txwi
;
101 struct mt76u_buf ubuf
;
106 struct mt76_queue_regs
{
111 } __packed
__aligned(4);
114 struct mt76_queue_regs __iomem
*regs
;
117 struct mt76_queue_entry
*entry
;
118 struct mt76_desc
*desc
;
120 struct list_head swq
;
134 struct sk_buff
*rx_head
;
135 struct page_frag_cache rx_page
;
136 spinlock_t rx_page_lock
;
139 struct mt76_mcu_ops
{
140 int (*mcu_send_msg
)(struct mt76_dev
*dev
, int cmd
, const void *data
,
141 int len
, bool wait_resp
);
142 int (*mcu_wr_rp
)(struct mt76_dev
*dev
, u32 base
,
143 const struct mt76_reg_pair
*rp
, int len
);
144 int (*mcu_rd_rp
)(struct mt76_dev
*dev
, u32 base
,
145 struct mt76_reg_pair
*rp
, int len
);
148 struct mt76_queue_ops
{
149 int (*init
)(struct mt76_dev
*dev
);
151 int (*alloc
)(struct mt76_dev
*dev
, struct mt76_queue
*q
);
153 int (*add_buf
)(struct mt76_dev
*dev
, struct mt76_queue
*q
,
154 struct mt76_queue_buf
*buf
, int nbufs
, u32 info
,
155 struct sk_buff
*skb
, void *txwi
);
157 int (*tx_queue_skb
)(struct mt76_dev
*dev
, struct mt76_queue
*q
,
158 struct sk_buff
*skb
, struct mt76_wcid
*wcid
,
159 struct ieee80211_sta
*sta
);
161 int (*tx_queue_skb_raw
)(struct mt76_dev
*dev
, enum mt76_txq_id qid
,
162 struct sk_buff
*skb
, u32 tx_info
);
164 void *(*dequeue
)(struct mt76_dev
*dev
, struct mt76_queue
*q
, bool flush
,
165 int *len
, u32
*info
, bool *more
);
167 void (*rx_reset
)(struct mt76_dev
*dev
, enum mt76_rxq_id qid
);
169 void (*tx_cleanup
)(struct mt76_dev
*dev
, enum mt76_txq_id qid
,
172 void (*kick
)(struct mt76_dev
*dev
, struct mt76_queue
*q
);
175 enum mt76_wcid_flags
{
176 MT_WCID_FLAG_CHECK_PS
,
180 #define MT76_N_WCIDS 128
182 DECLARE_EWMA(signal
, 10, 8);
185 struct mt76_rx_tid __rcu
*aggr
[IEEE80211_NUM_TIDS
];
187 struct work_struct aggr_work
;
191 struct ewma_signal rssi
;
200 u8 rx_key_pn
[IEEE80211_NUM_TIDS
][6];
212 struct list_head list
;
213 struct mt76_queue
*hwq
;
214 struct mt76_wcid
*wcid
;
216 struct sk_buff_head retry_q
;
223 struct mt76_txwi_cache
{
226 struct list_head list
;
231 struct rcu_head rcu_head
;
233 struct mt76_dev
*dev
;
236 struct delayed_work reorder_work
;
242 u8 started
:1, stopped
:1, timer_pending
:1;
244 struct sk_buff
*reorder_buf
[];
247 #define MT_TX_CB_DMA_DONE BIT(0)
248 #define MT_TX_CB_TXS_DONE BIT(1)
249 #define MT_TX_CB_TXS_FAILED BIT(2)
251 #define MT_PACKET_ID_MASK GENMASK(7, 0)
252 #define MT_PACKET_ID_NO_ACK 0
253 #define MT_PACKET_ID_NO_SKB 1
254 #define MT_PACKET_ID_FIRST 2
256 #define MT_TX_STATUS_SKB_TIMEOUT HZ
259 unsigned long jiffies
;
266 MT76_STATE_INITIALIZED
,
268 MT76_STATE_MCU_RUNNING
,
281 struct mt76_driver_ops
{
284 void (*update_survey
)(struct mt76_dev
*dev
);
286 int (*tx_prepare_skb
)(struct mt76_dev
*dev
, void *txwi_ptr
,
287 struct sk_buff
*skb
, struct mt76_queue
*q
,
288 struct mt76_wcid
*wcid
,
289 struct ieee80211_sta
*sta
, u32
*tx_info
);
291 void (*tx_complete_skb
)(struct mt76_dev
*dev
, struct mt76_queue
*q
,
292 struct mt76_queue_entry
*e
, bool flush
);
294 bool (*tx_status_data
)(struct mt76_dev
*dev
, u8
*update
);
296 void (*rx_skb
)(struct mt76_dev
*dev
, enum mt76_rxq_id q
,
297 struct sk_buff
*skb
);
299 void (*rx_poll_complete
)(struct mt76_dev
*dev
, enum mt76_rxq_id q
);
301 void (*sta_ps
)(struct mt76_dev
*dev
, struct ieee80211_sta
*sta
,
304 int (*sta_add
)(struct mt76_dev
*dev
, struct ieee80211_vif
*vif
,
305 struct ieee80211_sta
*sta
);
307 void (*sta_assoc
)(struct mt76_dev
*dev
, struct ieee80211_vif
*vif
,
308 struct ieee80211_sta
*sta
);
310 void (*sta_remove
)(struct mt76_dev
*dev
, struct ieee80211_vif
*vif
,
311 struct ieee80211_sta
*sta
);
314 struct mt76_channel_state
{
320 struct ieee80211_supported_band sband
;
321 struct mt76_channel_state
*chan
;
324 struct mt76_rate_power
{
338 #define MT_VEND_TYPE_EEPROM BIT(31)
339 #define MT_VEND_TYPE_CFG BIT(30)
340 #define MT_VEND_TYPE_MASK (MT_VEND_TYPE_EEPROM | MT_VEND_TYPE_CFG)
342 #define MT_VEND_ADDR(type, n) (MT_VEND_TYPE_##type | (n))
344 MT_VEND_DEV_MODE
= 0x1,
346 MT_VEND_MULTI_WRITE
= 0x6,
347 MT_VEND_MULTI_READ
= 0x7,
348 MT_VEND_READ_EEPROM
= 0x9,
349 MT_VEND_WRITE_FCE
= 0x42,
350 MT_VEND_WRITE_CFG
= 0x46,
351 MT_VEND_READ_CFG
= 0x47,
361 MT_EP_OUT_INBAND_CMD
,
370 #define MT_SG_MAX_SIZE 8
371 #define MT_NUM_TX_ENTRIES 256
372 #define MT_NUM_RX_ENTRIES 128
373 #define MCU_RESP_URB_SIZE 1024
375 struct mutex usb_ctrl_mtx
;
378 struct tasklet_struct rx_tasklet
;
379 struct tasklet_struct tx_tasklet
;
380 struct delayed_work stat_work
;
382 u8 out_ep
[__MT_EP_OUT_MAX
];
384 u8 in_ep
[__MT_EP_IN_MAX
];
394 struct mt76_reg_pair
*rp
;
405 wait_queue_head_t wait
;
406 struct sk_buff_head res_q
;
416 struct ieee80211_hw
*hw
;
417 struct cfg80211_chan_def chandef
;
418 struct ieee80211_channel
*main_chan
;
425 const struct mt76_bus_ops
*bus
;
426 const struct mt76_driver_ops
*drv
;
427 const struct mt76_mcu_ops
*mcu_ops
;
430 struct net_device napi_dev
;
432 struct napi_struct napi
[__MT_RXQ_MAX
];
433 struct sk_buff_head rx_skb
[__MT_RXQ_MAX
];
435 struct list_head txwi_cache
;
436 struct mt76_queue q_tx
[__MT_TXQ_MAX
];
437 struct mt76_queue q_rx
[__MT_RXQ_MAX
];
438 const struct mt76_queue_ops
*queue_ops
;
441 wait_queue_head_t tx_wait
;
442 struct sk_buff_head status_list
;
444 unsigned long wcid_mask
[MT76_N_WCIDS
/ BITS_PER_LONG
];
446 struct mt76_wcid global_wcid
;
447 struct mt76_wcid __rcu
*wcid
[MT76_N_WCIDS
];
449 u8 macaddr
[ETH_ALEN
];
456 struct mt76_sband sband_2g
;
457 struct mt76_sband sband_5g
;
458 struct debugfs_blob_wrapper eeprom
;
459 struct debugfs_blob_wrapper otp
;
460 struct mt76_hw_cap cap
;
462 struct mt76_rate_power rate_power
;
468 struct led_classdev led_cdev
;
478 struct mt76_mmio mmio
;
491 struct mt76_rx_status
{
492 struct mt76_wcid
*wcid
;
494 unsigned long reorder_time
;
511 s8 chain_signal
[IEEE80211_MAX_CHAINS
];
514 #define __mt76_rr(dev, ...) (dev)->bus->rr((dev), __VA_ARGS__)
515 #define __mt76_wr(dev, ...) (dev)->bus->wr((dev), __VA_ARGS__)
516 #define __mt76_rmw(dev, ...) (dev)->bus->rmw((dev), __VA_ARGS__)
517 #define __mt76_wr_copy(dev, ...) (dev)->bus->copy((dev), __VA_ARGS__)
519 #define __mt76_set(dev, offset, val) __mt76_rmw(dev, offset, 0, val)
520 #define __mt76_clear(dev, offset, val) __mt76_rmw(dev, offset, val, 0)
522 #define mt76_rr(dev, ...) (dev)->mt76.bus->rr(&((dev)->mt76), __VA_ARGS__)
523 #define mt76_wr(dev, ...) (dev)->mt76.bus->wr(&((dev)->mt76), __VA_ARGS__)
524 #define mt76_rmw(dev, ...) (dev)->mt76.bus->rmw(&((dev)->mt76), __VA_ARGS__)
525 #define mt76_wr_copy(dev, ...) (dev)->mt76.bus->copy(&((dev)->mt76), __VA_ARGS__)
526 #define mt76_wr_rp(dev, ...) (dev)->mt76.bus->wr_rp(&((dev)->mt76), __VA_ARGS__)
527 #define mt76_rd_rp(dev, ...) (dev)->mt76.bus->rd_rp(&((dev)->mt76), __VA_ARGS__)
529 #define mt76_mcu_send_msg(dev, ...) (dev)->mt76.mcu_ops->mcu_send_msg(&((dev)->mt76), __VA_ARGS__)
531 #define mt76_set(dev, offset, val) mt76_rmw(dev, offset, 0, val)
532 #define mt76_clear(dev, offset, val) mt76_rmw(dev, offset, val, 0)
534 #define mt76_get_field(_dev, _reg, _field) \
535 FIELD_GET(_field, mt76_rr(dev, _reg))
537 #define mt76_rmw_field(_dev, _reg, _field, _val) \
538 mt76_rmw(_dev, _reg, _field, FIELD_PREP(_field, _val))
540 #define __mt76_rmw_field(_dev, _reg, _field, _val) \
541 __mt76_rmw(_dev, _reg, _field, FIELD_PREP(_field, _val))
543 #define mt76_hw(dev) (dev)->mt76.hw
545 bool __mt76_poll(struct mt76_dev
*dev
, u32 offset
, u32 mask
, u32 val
,
548 #define mt76_poll(dev, ...) __mt76_poll(&((dev)->mt76), __VA_ARGS__)
550 bool __mt76_poll_msec(struct mt76_dev
*dev
, u32 offset
, u32 mask
, u32 val
,
553 #define mt76_poll_msec(dev, ...) __mt76_poll_msec(&((dev)->mt76), __VA_ARGS__)
555 void mt76_mmio_init(struct mt76_dev
*dev
, void __iomem
*regs
);
557 static inline u16
mt76_chip(struct mt76_dev
*dev
)
559 return dev
->rev
>> 16;
562 static inline u16
mt76_rev(struct mt76_dev
*dev
)
564 return dev
->rev
& 0xffff;
567 #define mt76xx_chip(dev) mt76_chip(&((dev)->mt76))
568 #define mt76xx_rev(dev) mt76_rev(&((dev)->mt76))
570 #define mt76_init_queues(dev) (dev)->mt76.queue_ops->init(&((dev)->mt76))
571 #define mt76_queue_alloc(dev, ...) (dev)->mt76.queue_ops->alloc(&((dev)->mt76), __VA_ARGS__)
572 #define mt76_tx_queue_skb_raw(dev, ...) (dev)->mt76.queue_ops->tx_queue_skb_raw(&((dev)->mt76), __VA_ARGS__)
573 #define mt76_queue_rx_reset(dev, ...) (dev)->mt76.queue_ops->rx_reset(&((dev)->mt76), __VA_ARGS__)
574 #define mt76_queue_tx_cleanup(dev, ...) (dev)->mt76.queue_ops->tx_cleanup(&((dev)->mt76), __VA_ARGS__)
575 #define mt76_queue_kick(dev, ...) (dev)->mt76.queue_ops->kick(&((dev)->mt76), __VA_ARGS__)
577 static inline struct mt76_channel_state
*
578 mt76_channel_state(struct mt76_dev
*dev
, struct ieee80211_channel
*c
)
580 struct mt76_sband
*msband
;
583 if (c
->band
== NL80211_BAND_2GHZ
)
584 msband
= &dev
->sband_2g
;
586 msband
= &dev
->sband_5g
;
588 idx
= c
- &msband
->sband
.channels
[0];
589 return &msband
->chan
[idx
];
592 struct mt76_dev
*mt76_alloc_device(struct device
*pdev
, unsigned int size
,
593 const struct ieee80211_ops
*ops
,
594 const struct mt76_driver_ops
*drv_ops
);
595 int mt76_register_device(struct mt76_dev
*dev
, bool vht
,
596 struct ieee80211_rate
*rates
, int n_rates
);
597 void mt76_unregister_device(struct mt76_dev
*dev
);
599 struct dentry
*mt76_register_debugfs(struct mt76_dev
*dev
);
600 void mt76_seq_puts_array(struct seq_file
*file
, const char *str
,
603 int mt76_eeprom_init(struct mt76_dev
*dev
, int len
);
604 void mt76_eeprom_override(struct mt76_dev
*dev
);
606 /* increment with wrap-around */
607 static inline int mt76_incr(int val
, int size
)
609 return (val
+ 1) & (size
- 1);
612 /* decrement with wrap-around */
613 static inline int mt76_decr(int val
, int size
)
615 return (val
- 1) & (size
- 1);
618 u8
mt76_ac_to_hwq(u8 ac
);
620 static inline struct ieee80211_txq
*
621 mtxq_to_txq(struct mt76_txq
*mtxq
)
625 return container_of(ptr
, struct ieee80211_txq
, drv_priv
);
628 static inline struct ieee80211_sta
*
629 wcid_to_sta(struct mt76_wcid
*wcid
)
633 if (!wcid
|| !wcid
->sta
)
636 return container_of(ptr
, struct ieee80211_sta
, drv_priv
);
639 static inline struct mt76_tx_cb
*mt76_tx_skb_cb(struct sk_buff
*skb
)
641 BUILD_BUG_ON(sizeof(struct mt76_tx_cb
) >
642 sizeof(IEEE80211_SKB_CB(skb
)->status
.status_driver_data
));
643 return ((void *) IEEE80211_SKB_CB(skb
)->status
.status_driver_data
);
646 int mt76_dma_tx_queue_skb(struct mt76_dev
*dev
, struct mt76_queue
*q
,
647 struct sk_buff
*skb
, struct mt76_wcid
*wcid
,
648 struct ieee80211_sta
*sta
);
650 void mt76_rx(struct mt76_dev
*dev
, enum mt76_rxq_id q
, struct sk_buff
*skb
);
651 void mt76_tx(struct mt76_dev
*dev
, struct ieee80211_sta
*sta
,
652 struct mt76_wcid
*wcid
, struct sk_buff
*skb
);
653 void mt76_txq_init(struct mt76_dev
*dev
, struct ieee80211_txq
*txq
);
654 void mt76_txq_remove(struct mt76_dev
*dev
, struct ieee80211_txq
*txq
);
655 void mt76_wake_tx_queue(struct ieee80211_hw
*hw
, struct ieee80211_txq
*txq
);
656 void mt76_stop_tx_queues(struct mt76_dev
*dev
, struct ieee80211_sta
*sta
,
658 void mt76_txq_schedule(struct mt76_dev
*dev
, struct mt76_queue
*hwq
);
659 void mt76_txq_schedule_all(struct mt76_dev
*dev
);
660 void mt76_release_buffered_frames(struct ieee80211_hw
*hw
,
661 struct ieee80211_sta
*sta
,
662 u16 tids
, int nframes
,
663 enum ieee80211_frame_release_type reason
,
665 void mt76_set_channel(struct mt76_dev
*dev
);
666 int mt76_get_survey(struct ieee80211_hw
*hw
, int idx
,
667 struct survey_info
*survey
);
668 void mt76_set_stream_caps(struct mt76_dev
*dev
, bool vht
);
670 int mt76_rx_aggr_start(struct mt76_dev
*dev
, struct mt76_wcid
*wcid
, u8 tid
,
672 void mt76_rx_aggr_stop(struct mt76_dev
*dev
, struct mt76_wcid
*wcid
, u8 tid
);
674 void mt76_wcid_key_setup(struct mt76_dev
*dev
, struct mt76_wcid
*wcid
,
675 struct ieee80211_key_conf
*key
);
677 void mt76_tx_status_lock(struct mt76_dev
*dev
, struct sk_buff_head
*list
)
678 __acquires(&dev
->status_list
.lock
);
679 void mt76_tx_status_unlock(struct mt76_dev
*dev
, struct sk_buff_head
*list
)
680 __releases(&dev
->status_list
.lock
);
682 int mt76_tx_status_skb_add(struct mt76_dev
*dev
, struct mt76_wcid
*wcid
,
683 struct sk_buff
*skb
);
684 struct sk_buff
*mt76_tx_status_skb_get(struct mt76_dev
*dev
,
685 struct mt76_wcid
*wcid
, int pktid
,
686 struct sk_buff_head
*list
);
687 void mt76_tx_status_skb_done(struct mt76_dev
*dev
, struct sk_buff
*skb
,
688 struct sk_buff_head
*list
);
689 void mt76_tx_complete_skb(struct mt76_dev
*dev
, struct sk_buff
*skb
);
690 void mt76_tx_status_check(struct mt76_dev
*dev
, struct mt76_wcid
*wcid
,
692 int mt76_sta_state(struct ieee80211_hw
*hw
, struct ieee80211_vif
*vif
,
693 struct ieee80211_sta
*sta
,
694 enum ieee80211_sta_state old_state
,
695 enum ieee80211_sta_state new_state
);
697 struct ieee80211_sta
*mt76_rx_convert(struct sk_buff
*skb
);
699 int mt76_get_min_avg_rssi(struct mt76_dev
*dev
);
701 int mt76_get_txpower(struct ieee80211_hw
*hw
, struct ieee80211_vif
*vif
,
704 void mt76_csa_check(struct mt76_dev
*dev
);
705 void mt76_csa_finish(struct mt76_dev
*dev
);
708 void mt76_tx_free(struct mt76_dev
*dev
);
709 struct mt76_txwi_cache
*mt76_get_txwi(struct mt76_dev
*dev
);
710 void mt76_put_txwi(struct mt76_dev
*dev
, struct mt76_txwi_cache
*t
);
711 void mt76_rx_complete(struct mt76_dev
*dev
, struct sk_buff_head
*frames
,
712 struct napi_struct
*napi
);
713 void mt76_rx_poll_complete(struct mt76_dev
*dev
, enum mt76_rxq_id q
,
714 struct napi_struct
*napi
);
715 void mt76_rx_aggr_reorder(struct sk_buff
*skb
, struct sk_buff_head
*frames
);
718 static inline bool mt76u_urb_error(struct urb
*urb
)
720 return urb
->status
&&
721 urb
->status
!= -ECONNRESET
&&
722 urb
->status
!= -ESHUTDOWN
&&
723 urb
->status
!= -ENOENT
;
726 /* Map hardware queues to usb endpoints */
727 static inline u8
q2ep(u8 qid
)
729 /* TODO: take management packets to queue 5 */
734 mt76u_bulk_msg(struct mt76_dev
*dev
, void *data
, int len
, int *actual_len
,
737 struct usb_interface
*intf
= to_usb_interface(dev
->dev
);
738 struct usb_device
*udev
= interface_to_usbdev(intf
);
739 struct mt76_usb
*usb
= &dev
->usb
;
743 pipe
= usb_rcvbulkpipe(udev
, usb
->in_ep
[MT_EP_IN_CMD_RESP
]);
745 pipe
= usb_sndbulkpipe(udev
, usb
->out_ep
[MT_EP_OUT_INBAND_CMD
]);
747 return usb_bulk_msg(udev
, pipe
, data
, len
, actual_len
, timeout
);
750 int mt76u_vendor_request(struct mt76_dev
*dev
, u8 req
,
751 u8 req_type
, u16 val
, u16 offset
,
752 void *buf
, size_t len
);
753 void mt76u_single_wr(struct mt76_dev
*dev
, const u8 req
,
754 const u16 offset
, const u32 val
);
755 int mt76u_init(struct mt76_dev
*dev
, struct usb_interface
*intf
);
756 int mt76u_submit_rx_buffers(struct mt76_dev
*dev
);
757 int mt76u_alloc_queues(struct mt76_dev
*dev
);
758 void mt76u_stop_queues(struct mt76_dev
*dev
);
759 void mt76u_stop_stat_wk(struct mt76_dev
*dev
);
760 void mt76u_queues_deinit(struct mt76_dev
*dev
);
763 mt76_mcu_msg_alloc(const void *data
, int head_len
,
764 int data_len
, int tail_len
);
765 void mt76_mcu_rx_event(struct mt76_dev
*dev
, struct sk_buff
*skb
);
766 struct sk_buff
*mt76_mcu_get_response(struct mt76_dev
*dev
,
767 unsigned long expires
);