1 /* SPDX-License-Identifier: ISC */
3 * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
9 #include <linux/kernel.h>
11 #include <linux/spinlock.h>
12 #include <linux/skbuff.h>
13 #include <linux/leds.h>
14 #include <linux/usb.h>
15 #include <linux/average.h>
16 #include <net/mac80211.h>
19 #define MT_TX_RING_SIZE 256
20 #define MT_MCU_RING_SIZE 32
21 #define MT_RX_BUF_SIZE 2048
22 #define MT_SKB_HEAD_LEN 128
28 struct mt76_reg_pair
{
39 u32 (*rr
)(struct mt76_dev
*dev
, u32 offset
);
40 void (*wr
)(struct mt76_dev
*dev
, u32 offset
, u32 val
);
41 u32 (*rmw
)(struct mt76_dev
*dev
, u32 offset
, u32 mask
, u32 val
);
42 void (*write_copy
)(struct mt76_dev
*dev
, u32 offset
, const void *data
,
44 void (*read_copy
)(struct mt76_dev
*dev
, u32 offset
, void *data
,
46 int (*wr_rp
)(struct mt76_dev
*dev
, u32 base
,
47 const struct mt76_reg_pair
*rp
, int len
);
48 int (*rd_rp
)(struct mt76_dev
*dev
, u32 base
,
49 struct mt76_reg_pair
*rp
, int len
);
50 enum mt76_bus_type type
;
53 #define mt76_is_usb(dev) ((dev)->bus->type == MT76_BUS_USB)
54 #define mt76_is_mmio(dev) ((dev)->bus->type == MT76_BUS_MMIO)
57 MT_TXQ_VO
= IEEE80211_AC_VO
,
58 MT_TXQ_VI
= IEEE80211_AC_VI
,
59 MT_TXQ_BE
= IEEE80211_AC_BE
,
60 MT_TXQ_BK
= IEEE80211_AC_BK
,
75 struct mt76_queue_buf
{
81 struct mt76_queue_buf buf
[32];
87 struct mt76_queue_entry
{
93 struct mt76_txwi_cache
*txwi
;
102 struct mt76_queue_regs
{
107 } __packed
__aligned(4);
110 struct mt76_queue_regs __iomem
*regs
;
113 struct mt76_queue_entry
*entry
;
114 struct mt76_desc
*desc
;
128 struct sk_buff
*rx_head
;
129 struct page_frag_cache rx_page
;
132 struct mt76_sw_queue
{
133 struct mt76_queue
*q
;
135 struct list_head swq
;
139 struct mt76_mcu_ops
{
140 int (*mcu_send_msg
)(struct mt76_dev
*dev
, int cmd
, const void *data
,
141 int len
, bool wait_resp
);
142 int (*mcu_skb_send_msg
)(struct mt76_dev
*dev
, struct sk_buff
*skb
,
143 int cmd
, bool wait_resp
);
144 int (*mcu_wr_rp
)(struct mt76_dev
*dev
, u32 base
,
145 const struct mt76_reg_pair
*rp
, int len
);
146 int (*mcu_rd_rp
)(struct mt76_dev
*dev
, u32 base
,
147 struct mt76_reg_pair
*rp
, int len
);
148 int (*mcu_restart
)(struct mt76_dev
*dev
);
151 struct mt76_queue_ops
{
152 int (*init
)(struct mt76_dev
*dev
);
154 int (*alloc
)(struct mt76_dev
*dev
, struct mt76_queue
*q
,
155 int idx
, int n_desc
, int bufsize
,
158 int (*tx_queue_skb
)(struct mt76_dev
*dev
, enum mt76_txq_id qid
,
159 struct sk_buff
*skb
, struct mt76_wcid
*wcid
,
160 struct ieee80211_sta
*sta
);
162 int (*tx_queue_skb_raw
)(struct mt76_dev
*dev
, enum mt76_txq_id qid
,
163 struct sk_buff
*skb
, u32 tx_info
);
165 void *(*dequeue
)(struct mt76_dev
*dev
, struct mt76_queue
*q
, bool flush
,
166 int *len
, u32
*info
, bool *more
);
168 void (*rx_reset
)(struct mt76_dev
*dev
, enum mt76_rxq_id qid
);
170 void (*tx_cleanup
)(struct mt76_dev
*dev
, enum mt76_txq_id qid
,
173 void (*kick
)(struct mt76_dev
*dev
, struct mt76_queue
*q
);
176 enum mt76_wcid_flags
{
177 MT_WCID_FLAG_CHECK_PS
,
181 #define MT76_N_WCIDS 128
183 /* stored in ieee80211_tx_info::hw_queue */
184 #define MT_TX_HW_QUEUE_EXT_PHY BIT(3)
186 DECLARE_EWMA(signal
, 10, 8);
188 #define MT_WCID_TX_INFO_RATE GENMASK(15, 0)
189 #define MT_WCID_TX_INFO_NSS GENMASK(17, 16)
190 #define MT_WCID_TX_INFO_TXPWR_ADJ GENMASK(25, 18)
191 #define MT_WCID_TX_INFO_SET BIT(31)
194 struct mt76_rx_tid __rcu
*aggr
[IEEE80211_NUM_TIDS
];
198 struct ewma_signal rssi
;
208 u8 rx_key_pn
[IEEE80211_NUM_TIDS
][6];
218 struct mt76_sw_queue
*swq
;
219 struct mt76_wcid
*wcid
;
221 struct sk_buff_head retry_q
;
228 struct mt76_txwi_cache
{
229 struct list_head list
;
236 struct rcu_head rcu_head
;
238 struct mt76_dev
*dev
;
241 struct delayed_work reorder_work
;
249 u8 started
:1, stopped
:1, timer_pending
:1;
251 struct sk_buff
*reorder_buf
[];
254 #define MT_TX_CB_DMA_DONE BIT(0)
255 #define MT_TX_CB_TXS_DONE BIT(1)
256 #define MT_TX_CB_TXS_FAILED BIT(2)
258 #define MT_PACKET_ID_MASK GENMASK(6, 0)
259 #define MT_PACKET_ID_NO_ACK 0
260 #define MT_PACKET_ID_NO_SKB 1
261 #define MT_PACKET_ID_FIRST 2
262 #define MT_PACKET_ID_HAS_RATE BIT(7)
264 #define MT_TX_STATUS_SKB_TIMEOUT HZ
267 unsigned long jiffies
;
274 MT76_STATE_INITIALIZED
,
276 MT76_STATE_MCU_RUNNING
,
289 #define MT_DRV_TXWI_NO_FREE BIT(0)
290 #define MT_DRV_TX_ALIGNED4_SKBS BIT(1)
291 #define MT_DRV_SW_RX_AIRTIME BIT(2)
292 #define MT_DRV_RX_DMA_HDR BIT(3)
294 struct mt76_driver_ops
{
299 void (*update_survey
)(struct mt76_dev
*dev
);
301 int (*tx_prepare_skb
)(struct mt76_dev
*dev
, void *txwi_ptr
,
302 enum mt76_txq_id qid
, struct mt76_wcid
*wcid
,
303 struct ieee80211_sta
*sta
,
304 struct mt76_tx_info
*tx_info
);
306 void (*tx_complete_skb
)(struct mt76_dev
*dev
, enum mt76_txq_id qid
,
307 struct mt76_queue_entry
*e
);
309 bool (*tx_status_data
)(struct mt76_dev
*dev
, u8
*update
);
311 void (*rx_skb
)(struct mt76_dev
*dev
, enum mt76_rxq_id q
,
312 struct sk_buff
*skb
);
314 void (*rx_poll_complete
)(struct mt76_dev
*dev
, enum mt76_rxq_id q
);
316 void (*sta_ps
)(struct mt76_dev
*dev
, struct ieee80211_sta
*sta
,
319 int (*sta_add
)(struct mt76_dev
*dev
, struct ieee80211_vif
*vif
,
320 struct ieee80211_sta
*sta
);
322 void (*sta_assoc
)(struct mt76_dev
*dev
, struct ieee80211_vif
*vif
,
323 struct ieee80211_sta
*sta
);
325 void (*sta_remove
)(struct mt76_dev
*dev
, struct ieee80211_vif
*vif
,
326 struct ieee80211_sta
*sta
);
329 struct mt76_channel_state
{
340 struct ieee80211_supported_band sband
;
341 struct mt76_channel_state
*chan
;
344 struct mt76_rate_power
{
358 #define MT_VEND_TYPE_EEPROM BIT(31)
359 #define MT_VEND_TYPE_CFG BIT(30)
360 #define MT_VEND_TYPE_MASK (MT_VEND_TYPE_EEPROM | MT_VEND_TYPE_CFG)
362 #define MT_VEND_ADDR(type, n) (MT_VEND_TYPE_##type | (n))
364 MT_VEND_DEV_MODE
= 0x1,
366 MT_VEND_POWER_ON
= 0x4,
367 MT_VEND_MULTI_WRITE
= 0x6,
368 MT_VEND_MULTI_READ
= 0x7,
369 MT_VEND_READ_EEPROM
= 0x9,
370 MT_VEND_WRITE_FCE
= 0x42,
371 MT_VEND_WRITE_CFG
= 0x46,
372 MT_VEND_READ_CFG
= 0x47,
373 MT_VEND_READ_EXT
= 0x63,
374 MT_VEND_WRITE_EXT
= 0x66,
384 MT_EP_OUT_INBAND_CMD
,
397 struct sk_buff_head res_q
;
398 wait_queue_head_t wait
;
401 #define MT_TX_SG_MAX_SIZE 8
402 #define MT_RX_SG_MAX_SIZE 4
403 #define MT_NUM_TX_ENTRIES 256
404 #define MT_NUM_RX_ENTRIES 128
405 #define MCU_RESP_URB_SIZE 1024
407 struct mutex usb_ctrl_mtx
;
411 struct tasklet_struct rx_tasklet
;
412 struct workqueue_struct
*wq
;
413 struct work_struct stat_work
;
415 u8 out_ep
[__MT_EP_OUT_MAX
];
416 u8 in_ep
[__MT_EP_IN_MAX
];
422 struct mt76_reg_pair
*rp
;
435 struct mt76_rx_status
{
437 struct mt76_wcid
*wcid
;
441 unsigned long reorder_time
;
461 s8 chain_signal
[IEEE80211_MAX_CHAINS
];
465 struct ieee80211_hw
*hw
;
466 struct mt76_dev
*dev
;
471 struct cfg80211_chan_def chandef
;
472 struct ieee80211_channel
*main_chan
;
474 struct mt76_channel_state
*chan_state
;
477 struct mt76_sband sband_2g
;
478 struct mt76_sband sband_5g
;
485 struct mt76_phy phy
; /* must be first */
487 struct mt76_phy
*phy2
;
489 struct ieee80211_hw
*hw
;
496 struct mt76_rx_status rx_ampdu_status
;
502 const struct mt76_bus_ops
*bus
;
503 const struct mt76_driver_ops
*drv
;
504 const struct mt76_mcu_ops
*mcu_ops
;
509 struct net_device napi_dev
;
511 struct napi_struct napi
[__MT_RXQ_MAX
];
512 struct sk_buff_head rx_skb
[__MT_RXQ_MAX
];
514 struct list_head txwi_cache
;
515 struct mt76_sw_queue q_tx
[2 * __MT_TXQ_MAX
];
516 struct mt76_queue q_rx
[__MT_RXQ_MAX
];
517 const struct mt76_queue_ops
*queue_ops
;
520 struct tasklet_struct tx_tasklet
;
521 struct napi_struct tx_napi
;
522 struct delayed_work mac_work
;
524 wait_queue_head_t tx_wait
;
525 struct sk_buff_head status_list
;
527 unsigned long wcid_mask
[MT76_N_WCIDS
/ BITS_PER_LONG
];
528 unsigned long wcid_phy_mask
[MT76_N_WCIDS
/ BITS_PER_LONG
];
530 struct mt76_wcid global_wcid
;
531 struct mt76_wcid __rcu
*wcid
[MT76_N_WCIDS
];
533 u8 macaddr
[ETH_ALEN
];
538 struct tasklet_struct pre_tbtt_tasklet
;
542 struct debugfs_blob_wrapper eeprom
;
543 struct debugfs_blob_wrapper otp
;
544 struct mt76_hw_cap cap
;
546 struct mt76_rate_power rate_power
;
548 enum nl80211_dfs_regions region
;
552 struct led_classdev led_cdev
;
562 struct mt76_mmio mmio
;
575 #define __mt76_rr(dev, ...) (dev)->bus->rr((dev), __VA_ARGS__)
576 #define __mt76_wr(dev, ...) (dev)->bus->wr((dev), __VA_ARGS__)
577 #define __mt76_rmw(dev, ...) (dev)->bus->rmw((dev), __VA_ARGS__)
578 #define __mt76_wr_copy(dev, ...) (dev)->bus->write_copy((dev), __VA_ARGS__)
579 #define __mt76_rr_copy(dev, ...) (dev)->bus->read_copy((dev), __VA_ARGS__)
581 #define __mt76_set(dev, offset, val) __mt76_rmw(dev, offset, 0, val)
582 #define __mt76_clear(dev, offset, val) __mt76_rmw(dev, offset, val, 0)
584 #define mt76_rr(dev, ...) (dev)->mt76.bus->rr(&((dev)->mt76), __VA_ARGS__)
585 #define mt76_wr(dev, ...) (dev)->mt76.bus->wr(&((dev)->mt76), __VA_ARGS__)
586 #define mt76_rmw(dev, ...) (dev)->mt76.bus->rmw(&((dev)->mt76), __VA_ARGS__)
587 #define mt76_wr_copy(dev, ...) (dev)->mt76.bus->write_copy(&((dev)->mt76), __VA_ARGS__)
588 #define mt76_rr_copy(dev, ...) (dev)->mt76.bus->read_copy(&((dev)->mt76), __VA_ARGS__)
589 #define mt76_wr_rp(dev, ...) (dev)->mt76.bus->wr_rp(&((dev)->mt76), __VA_ARGS__)
590 #define mt76_rd_rp(dev, ...) (dev)->mt76.bus->rd_rp(&((dev)->mt76), __VA_ARGS__)
592 #define mt76_mcu_send_msg(dev, ...) (dev)->mt76.mcu_ops->mcu_send_msg(&((dev)->mt76), __VA_ARGS__)
594 #define __mt76_mcu_send_msg(dev, ...) (dev)->mcu_ops->mcu_send_msg((dev), __VA_ARGS__)
595 #define __mt76_mcu_skb_send_msg(dev, ...) (dev)->mcu_ops->mcu_skb_send_msg((dev), __VA_ARGS__)
596 #define mt76_mcu_restart(dev, ...) (dev)->mt76.mcu_ops->mcu_restart(&((dev)->mt76))
597 #define __mt76_mcu_restart(dev, ...) (dev)->mcu_ops->mcu_restart((dev))
599 #define mt76_set(dev, offset, val) mt76_rmw(dev, offset, 0, val)
600 #define mt76_clear(dev, offset, val) mt76_rmw(dev, offset, val, 0)
602 #define mt76_get_field(_dev, _reg, _field) \
603 FIELD_GET(_field, mt76_rr(dev, _reg))
605 #define mt76_rmw_field(_dev, _reg, _field, _val) \
606 mt76_rmw(_dev, _reg, _field, FIELD_PREP(_field, _val))
608 #define __mt76_rmw_field(_dev, _reg, _field, _val) \
609 __mt76_rmw(_dev, _reg, _field, FIELD_PREP(_field, _val))
611 #define mt76_hw(dev) (dev)->mphy.hw
613 static inline struct ieee80211_hw
*
614 mt76_wcid_hw(struct mt76_dev
*dev
, u8 wcid
)
616 if (wcid
<= MT76_N_WCIDS
&&
617 mt76_wcid_mask_test(dev
->wcid_phy_mask
, wcid
))
618 return dev
->phy2
->hw
;
623 bool __mt76_poll(struct mt76_dev
*dev
, u32 offset
, u32 mask
, u32 val
,
626 #define mt76_poll(dev, ...) __mt76_poll(&((dev)->mt76), __VA_ARGS__)
628 bool __mt76_poll_msec(struct mt76_dev
*dev
, u32 offset
, u32 mask
, u32 val
,
631 #define mt76_poll_msec(dev, ...) __mt76_poll_msec(&((dev)->mt76), __VA_ARGS__)
633 void mt76_mmio_init(struct mt76_dev
*dev
, void __iomem
*regs
);
634 void mt76_pci_disable_aspm(struct pci_dev
*pdev
);
636 static inline u16
mt76_chip(struct mt76_dev
*dev
)
638 return dev
->rev
>> 16;
641 static inline u16
mt76_rev(struct mt76_dev
*dev
)
643 return dev
->rev
& 0xffff;
646 #define mt76xx_chip(dev) mt76_chip(&((dev)->mt76))
647 #define mt76xx_rev(dev) mt76_rev(&((dev)->mt76))
649 #define mt76_init_queues(dev) (dev)->mt76.queue_ops->init(&((dev)->mt76))
650 #define mt76_queue_alloc(dev, ...) (dev)->mt76.queue_ops->alloc(&((dev)->mt76), __VA_ARGS__)
651 #define mt76_tx_queue_skb_raw(dev, ...) (dev)->mt76.queue_ops->tx_queue_skb_raw(&((dev)->mt76), __VA_ARGS__)
652 #define mt76_tx_queue_skb(dev, ...) (dev)->mt76.queue_ops->tx_queue_skb(&((dev)->mt76), __VA_ARGS__)
653 #define mt76_queue_rx_reset(dev, ...) (dev)->mt76.queue_ops->rx_reset(&((dev)->mt76), __VA_ARGS__)
654 #define mt76_queue_tx_cleanup(dev, ...) (dev)->mt76.queue_ops->tx_cleanup(&((dev)->mt76), __VA_ARGS__)
655 #define mt76_queue_kick(dev, ...) (dev)->mt76.queue_ops->kick(&((dev)->mt76), __VA_ARGS__)
657 struct mt76_dev
*mt76_alloc_device(struct device
*pdev
, unsigned int size
,
658 const struct ieee80211_ops
*ops
,
659 const struct mt76_driver_ops
*drv_ops
);
660 int mt76_register_device(struct mt76_dev
*dev
, bool vht
,
661 struct ieee80211_rate
*rates
, int n_rates
);
662 void mt76_unregister_device(struct mt76_dev
*dev
);
663 void mt76_free_device(struct mt76_dev
*dev
);
664 void mt76_unregister_phy(struct mt76_phy
*phy
);
666 struct mt76_phy
*mt76_alloc_phy(struct mt76_dev
*dev
, unsigned int size
,
667 const struct ieee80211_ops
*ops
);
668 int mt76_register_phy(struct mt76_phy
*phy
);
670 struct dentry
*mt76_register_debugfs(struct mt76_dev
*dev
);
671 int mt76_queues_read(struct seq_file
*s
, void *data
);
672 void mt76_seq_puts_array(struct seq_file
*file
, const char *str
,
675 int mt76_eeprom_init(struct mt76_dev
*dev
, int len
);
676 void mt76_eeprom_override(struct mt76_dev
*dev
);
678 static inline struct mt76_phy
*
679 mt76_dev_phy(struct mt76_dev
*dev
, bool phy_ext
)
681 if (phy_ext
&& dev
->phy2
)
686 static inline struct ieee80211_hw
*
687 mt76_phy_hw(struct mt76_dev
*dev
, bool phy_ext
)
689 return mt76_dev_phy(dev
, phy_ext
)->hw
;
693 mt76_get_txwi_ptr(struct mt76_dev
*dev
, struct mt76_txwi_cache
*t
)
695 return (u8
*)t
- dev
->drv
->txwi_size
;
698 /* increment with wrap-around */
699 static inline int mt76_incr(int val
, int size
)
701 return (val
+ 1) & (size
- 1);
704 /* decrement with wrap-around */
705 static inline int mt76_decr(int val
, int size
)
707 return (val
- 1) & (size
- 1);
710 u8
mt76_ac_to_hwq(u8 ac
);
712 static inline struct ieee80211_txq
*
713 mtxq_to_txq(struct mt76_txq
*mtxq
)
717 return container_of(ptr
, struct ieee80211_txq
, drv_priv
);
720 static inline struct ieee80211_sta
*
721 wcid_to_sta(struct mt76_wcid
*wcid
)
725 if (!wcid
|| !wcid
->sta
)
728 return container_of(ptr
, struct ieee80211_sta
, drv_priv
);
731 static inline struct mt76_tx_cb
*mt76_tx_skb_cb(struct sk_buff
*skb
)
733 BUILD_BUG_ON(sizeof(struct mt76_tx_cb
) >
734 sizeof(IEEE80211_SKB_CB(skb
)->status
.status_driver_data
));
735 return ((void *)IEEE80211_SKB_CB(skb
)->status
.status_driver_data
);
738 static inline void mt76_insert_hdr_pad(struct sk_buff
*skb
)
740 int len
= ieee80211_get_hdrlen_from_skb(skb
);
746 memmove(skb
->data
, skb
->data
+ 2, len
);
749 skb
->data
[len
+ 1] = 0;
752 static inline bool mt76_is_skb_pktid(u8 pktid
)
754 if (pktid
& MT_PACKET_ID_HAS_RATE
)
757 return pktid
>= MT_PACKET_ID_FIRST
;
760 static inline u8
mt76_tx_power_nss_delta(u8 nss
)
762 static const u8 nss_delta
[4] = { 0, 6, 9, 12 };
764 return nss_delta
[nss
- 1];
767 void mt76_rx(struct mt76_dev
*dev
, enum mt76_rxq_id q
, struct sk_buff
*skb
);
768 void mt76_tx(struct mt76_phy
*dev
, struct ieee80211_sta
*sta
,
769 struct mt76_wcid
*wcid
, struct sk_buff
*skb
);
770 void mt76_txq_init(struct mt76_dev
*dev
, struct ieee80211_txq
*txq
);
771 void mt76_txq_remove(struct mt76_dev
*dev
, struct ieee80211_txq
*txq
);
772 void mt76_wake_tx_queue(struct ieee80211_hw
*hw
, struct ieee80211_txq
*txq
);
773 void mt76_stop_tx_queues(struct mt76_dev
*dev
, struct ieee80211_sta
*sta
,
775 void mt76_txq_schedule(struct mt76_phy
*phy
, enum mt76_txq_id qid
);
776 void mt76_txq_schedule_all(struct mt76_phy
*phy
);
777 void mt76_tx_tasklet(unsigned long data
);
778 void mt76_release_buffered_frames(struct ieee80211_hw
*hw
,
779 struct ieee80211_sta
*sta
,
780 u16 tids
, int nframes
,
781 enum ieee80211_frame_release_type reason
,
783 bool mt76_has_tx_pending(struct mt76_phy
*phy
);
784 void mt76_set_channel(struct mt76_phy
*phy
);
785 void mt76_update_survey(struct mt76_dev
*dev
);
786 int mt76_get_survey(struct ieee80211_hw
*hw
, int idx
,
787 struct survey_info
*survey
);
788 void mt76_set_stream_caps(struct mt76_dev
*dev
, bool vht
);
790 int mt76_rx_aggr_start(struct mt76_dev
*dev
, struct mt76_wcid
*wcid
, u8 tid
,
792 void mt76_rx_aggr_stop(struct mt76_dev
*dev
, struct mt76_wcid
*wcid
, u8 tid
);
794 void mt76_wcid_key_setup(struct mt76_dev
*dev
, struct mt76_wcid
*wcid
,
795 struct ieee80211_key_conf
*key
);
797 void mt76_tx_status_lock(struct mt76_dev
*dev
, struct sk_buff_head
*list
)
798 __acquires(&dev
->status_list
.lock
);
799 void mt76_tx_status_unlock(struct mt76_dev
*dev
, struct sk_buff_head
*list
)
800 __releases(&dev
->status_list
.lock
);
802 int mt76_tx_status_skb_add(struct mt76_dev
*dev
, struct mt76_wcid
*wcid
,
803 struct sk_buff
*skb
);
804 struct sk_buff
*mt76_tx_status_skb_get(struct mt76_dev
*dev
,
805 struct mt76_wcid
*wcid
, int pktid
,
806 struct sk_buff_head
*list
);
807 void mt76_tx_status_skb_done(struct mt76_dev
*dev
, struct sk_buff
*skb
,
808 struct sk_buff_head
*list
);
809 void mt76_tx_complete_skb(struct mt76_dev
*dev
, struct sk_buff
*skb
);
810 void mt76_tx_status_check(struct mt76_dev
*dev
, struct mt76_wcid
*wcid
,
812 int mt76_sta_state(struct ieee80211_hw
*hw
, struct ieee80211_vif
*vif
,
813 struct ieee80211_sta
*sta
,
814 enum ieee80211_sta_state old_state
,
815 enum ieee80211_sta_state new_state
);
816 void __mt76_sta_remove(struct mt76_dev
*dev
, struct ieee80211_vif
*vif
,
817 struct ieee80211_sta
*sta
);
818 void mt76_sta_pre_rcu_remove(struct ieee80211_hw
*hw
, struct ieee80211_vif
*vif
,
819 struct ieee80211_sta
*sta
);
821 int mt76_get_min_avg_rssi(struct mt76_dev
*dev
, bool ext_phy
);
823 int mt76_get_txpower(struct ieee80211_hw
*hw
, struct ieee80211_vif
*vif
,
826 void mt76_csa_check(struct mt76_dev
*dev
);
827 void mt76_csa_finish(struct mt76_dev
*dev
);
829 int mt76_get_antenna(struct ieee80211_hw
*hw
, u32
*tx_ant
, u32
*rx_ant
);
830 int mt76_set_tim(struct ieee80211_hw
*hw
, struct ieee80211_sta
*sta
, bool set
);
831 void mt76_insert_ccmp_hdr(struct sk_buff
*skb
, u8 key_id
);
832 int mt76_get_rate(struct mt76_dev
*dev
,
833 struct ieee80211_supported_band
*sband
,
835 void mt76_sw_scan(struct ieee80211_hw
*hw
, struct ieee80211_vif
*vif
,
837 void mt76_sw_scan_complete(struct ieee80211_hw
*hw
,
838 struct ieee80211_vif
*vif
);
841 static inline struct ieee80211_hw
*
842 mt76_tx_status_get_hw(struct mt76_dev
*dev
, struct sk_buff
*skb
)
844 struct ieee80211_tx_info
*info
= IEEE80211_SKB_CB(skb
);
845 struct ieee80211_hw
*hw
= dev
->phy
.hw
;
847 if ((info
->hw_queue
& MT_TX_HW_QUEUE_EXT_PHY
) && dev
->phy2
)
850 info
->hw_queue
&= ~MT_TX_HW_QUEUE_EXT_PHY
;
855 void mt76_tx_free(struct mt76_dev
*dev
);
856 struct mt76_txwi_cache
*mt76_get_txwi(struct mt76_dev
*dev
);
857 void mt76_put_txwi(struct mt76_dev
*dev
, struct mt76_txwi_cache
*t
);
858 void mt76_rx_complete(struct mt76_dev
*dev
, struct sk_buff_head
*frames
,
859 struct napi_struct
*napi
);
860 void mt76_rx_poll_complete(struct mt76_dev
*dev
, enum mt76_rxq_id q
,
861 struct napi_struct
*napi
);
862 void mt76_rx_aggr_reorder(struct sk_buff
*skb
, struct sk_buff_head
*frames
);
865 static inline bool mt76u_urb_error(struct urb
*urb
)
867 return urb
->status
&&
868 urb
->status
!= -ECONNRESET
&&
869 urb
->status
!= -ESHUTDOWN
&&
870 urb
->status
!= -ENOENT
;
873 /* Map hardware queues to usb endpoints */
874 static inline u8
q2ep(u8 qid
)
876 /* TODO: take management packets to queue 5 */
881 mt76u_bulk_msg(struct mt76_dev
*dev
, void *data
, int len
, int *actual_len
,
884 struct usb_interface
*uintf
= to_usb_interface(dev
->dev
);
885 struct usb_device
*udev
= interface_to_usbdev(uintf
);
886 struct mt76_usb
*usb
= &dev
->usb
;
890 pipe
= usb_rcvbulkpipe(udev
, usb
->in_ep
[ep
]);
892 pipe
= usb_sndbulkpipe(udev
, usb
->out_ep
[ep
]);
894 return usb_bulk_msg(udev
, pipe
, data
, len
, actual_len
, timeout
);
897 int mt76u_skb_dma_info(struct sk_buff
*skb
, u32 info
);
898 int mt76u_vendor_request(struct mt76_dev
*dev
, u8 req
,
899 u8 req_type
, u16 val
, u16 offset
,
900 void *buf
, size_t len
);
901 void mt76u_single_wr(struct mt76_dev
*dev
, const u8 req
,
902 const u16 offset
, const u32 val
);
903 void mt76u_deinit(struct mt76_dev
*dev
);
904 int mt76u_init(struct mt76_dev
*dev
, struct usb_interface
*intf
,
906 int mt76u_alloc_mcu_queue(struct mt76_dev
*dev
);
907 int mt76u_alloc_queues(struct mt76_dev
*dev
);
908 void mt76u_stop_tx(struct mt76_dev
*dev
);
909 void mt76u_stop_rx(struct mt76_dev
*dev
);
910 int mt76u_resume_rx(struct mt76_dev
*dev
);
911 void mt76u_queues_deinit(struct mt76_dev
*dev
);
914 mt76_mcu_msg_alloc(const void *data
, int head_len
,
915 int data_len
, int tail_len
);
916 void mt76_mcu_rx_event(struct mt76_dev
*dev
, struct sk_buff
*skb
);
917 struct sk_buff
*mt76_mcu_get_response(struct mt76_dev
*dev
,
918 unsigned long expires
);
920 void mt76_set_irq_mask(struct mt76_dev
*dev
, u32 addr
, u32 clear
, u32 set
);