1 /* SPDX-License-Identifier: ISC */
3 * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
9 #include <linux/kernel.h>
11 #include <linux/spinlock.h>
12 #include <linux/skbuff.h>
13 #include <linux/leds.h>
14 #include <linux/usb.h>
15 #include <linux/average.h>
16 #include <linux/soc/mediatek/mtk_wed.h>
17 #include <net/mac80211.h>
18 #include <net/page_pool/helpers.h>
22 #define MT_MCU_RING_SIZE 32
23 #define MT_RX_BUF_SIZE 2048
24 #define MT_SKB_HEAD_LEN 256
26 #define MT_MAX_NON_AQL_PKT 16
27 #define MT_TXQ_FREE_THR 32
29 #define MT76_TOKEN_FREE_THR 64
31 #define MT_QFLAG_WED_RING GENMASK(1, 0)
32 #define MT_QFLAG_WED_TYPE GENMASK(4, 2)
33 #define MT_QFLAG_WED BIT(5)
34 #define MT_QFLAG_WED_RRO BIT(6)
35 #define MT_QFLAG_WED_RRO_EN BIT(7)
37 #define __MT_WED_Q(_type, _n) (MT_QFLAG_WED | \
38 FIELD_PREP(MT_QFLAG_WED_TYPE, _type) | \
39 FIELD_PREP(MT_QFLAG_WED_RING, _n))
40 #define __MT_WED_RRO_Q(_type, _n) (MT_QFLAG_WED_RRO | __MT_WED_Q(_type, _n))
42 #define MT_WED_Q_TX(_n) __MT_WED_Q(MT76_WED_Q_TX, _n)
43 #define MT_WED_Q_RX(_n) __MT_WED_Q(MT76_WED_Q_RX, _n)
44 #define MT_WED_Q_TXFREE __MT_WED_Q(MT76_WED_Q_TXFREE, 0)
45 #define MT_WED_RRO_Q_DATA(_n) __MT_WED_RRO_Q(MT76_WED_RRO_Q_DATA, _n)
46 #define MT_WED_RRO_Q_MSDU_PG(_n) __MT_WED_RRO_Q(MT76_WED_RRO_Q_MSDU_PG, _n)
47 #define MT_WED_RRO_Q_IND __MT_WED_RRO_Q(MT76_WED_RRO_Q_IND, 0)
54 struct mt76_reg_pair
{
70 MT76_WED_RRO_Q_MSDU_PG
,
75 u32 (*rr
)(struct mt76_dev
*dev
, u32 offset
);
76 void (*wr
)(struct mt76_dev
*dev
, u32 offset
, u32 val
);
77 u32 (*rmw
)(struct mt76_dev
*dev
, u32 offset
, u32 mask
, u32 val
);
78 void (*write_copy
)(struct mt76_dev
*dev
, u32 offset
, const void *data
,
80 void (*read_copy
)(struct mt76_dev
*dev
, u32 offset
, void *data
,
82 int (*wr_rp
)(struct mt76_dev
*dev
, u32 base
,
83 const struct mt76_reg_pair
*rp
, int len
);
84 int (*rd_rp
)(struct mt76_dev
*dev
, u32 base
,
85 struct mt76_reg_pair
*rp
, int len
);
86 enum mt76_bus_type type
;
89 #define mt76_is_usb(dev) ((dev)->bus->type == MT76_BUS_USB)
90 #define mt76_is_mmio(dev) ((dev)->bus->type == MT76_BUS_MMIO)
91 #define mt76_is_sdio(dev) ((dev)->bus->type == MT76_BUS_SDIO)
94 MT_TXQ_VO
= IEEE80211_AC_VO
,
95 MT_TXQ_VI
= IEEE80211_AC_VI
,
96 MT_TXQ_BE
= IEEE80211_AC_BE
,
97 MT_TXQ_BK
= IEEE80211_AC_BK
,
123 MT_RXQ_MSDU_PAGE_BAND0
,
124 MT_RXQ_MSDU_PAGE_BAND1
,
125 MT_RXQ_MSDU_PAGE_BAND2
,
140 enum mt76_cipher_type
{
144 MT_CIPHER_TKIP_NO_MIC
,
147 MT_CIPHER_BIP_CMAC_128
,
156 enum mt76_dfs_state
{
157 MT_DFS_STATE_UNKNOWN
,
158 MT_DFS_STATE_DISABLED
,
163 struct mt76_queue_buf
{
169 struct mt76_tx_info
{
170 struct mt76_queue_buf buf
[32];
176 struct mt76_queue_entry
{
182 struct mt76_txwi_cache
*txwi
;
186 dma_addr_t dma_addr
[2];
194 struct mt76_queue_regs
{
199 } __packed
__aligned(4);
202 struct mt76_queue_regs __iomem
*regs
;
205 spinlock_t cleanup_lock
;
206 struct mt76_queue_entry
*entry
;
207 struct mt76_rro_desc
*rro_desc
;
208 struct mt76_desc
*desc
;
224 struct mtk_wed_device
*wed
;
228 struct sk_buff
*rx_head
;
229 struct page_pool
*page_pool
;
232 struct mt76_mcu_ops
{
233 unsigned int max_retry
;
237 int (*mcu_send_msg
)(struct mt76_dev
*dev
, int cmd
, const void *data
,
238 int len
, bool wait_resp
);
239 int (*mcu_skb_prepare_msg
)(struct mt76_dev
*dev
, struct sk_buff
*skb
,
241 int (*mcu_skb_send_msg
)(struct mt76_dev
*dev
, struct sk_buff
*skb
,
243 int (*mcu_parse_response
)(struct mt76_dev
*dev
, int cmd
,
244 struct sk_buff
*skb
, int seq
);
245 u32 (*mcu_rr
)(struct mt76_dev
*dev
, u32 offset
);
246 void (*mcu_wr
)(struct mt76_dev
*dev
, u32 offset
, u32 val
);
247 int (*mcu_wr_rp
)(struct mt76_dev
*dev
, u32 base
,
248 const struct mt76_reg_pair
*rp
, int len
);
249 int (*mcu_rd_rp
)(struct mt76_dev
*dev
, u32 base
,
250 struct mt76_reg_pair
*rp
, int len
);
251 int (*mcu_restart
)(struct mt76_dev
*dev
);
254 struct mt76_queue_ops
{
255 int (*init
)(struct mt76_dev
*dev
,
256 int (*poll
)(struct napi_struct
*napi
, int budget
));
258 int (*alloc
)(struct mt76_dev
*dev
, struct mt76_queue
*q
,
259 int idx
, int n_desc
, int bufsize
,
262 int (*tx_queue_skb
)(struct mt76_phy
*phy
, struct mt76_queue
*q
,
263 enum mt76_txq_id qid
, struct sk_buff
*skb
,
264 struct mt76_wcid
*wcid
, struct ieee80211_sta
*sta
);
266 int (*tx_queue_skb_raw
)(struct mt76_dev
*dev
, struct mt76_queue
*q
,
267 struct sk_buff
*skb
, u32 tx_info
);
269 void *(*dequeue
)(struct mt76_dev
*dev
, struct mt76_queue
*q
, bool flush
,
270 int *len
, u32
*info
, bool *more
);
272 void (*rx_reset
)(struct mt76_dev
*dev
, enum mt76_rxq_id qid
);
274 void (*tx_cleanup
)(struct mt76_dev
*dev
, struct mt76_queue
*q
,
277 void (*rx_cleanup
)(struct mt76_dev
*dev
, struct mt76_queue
*q
);
279 void (*kick
)(struct mt76_dev
*dev
, struct mt76_queue
*q
);
281 void (*reset_q
)(struct mt76_dev
*dev
, struct mt76_queue
*q
);
290 MT_PHY_TYPE_HE_SU
= 8,
291 MT_PHY_TYPE_HE_EXT_SU
,
294 MT_PHY_TYPE_EHT_SU
= 13,
295 MT_PHY_TYPE_EHT_TRIG
,
300 struct mt76_sta_stats
{
301 u64 tx_mode
[__MT_PHY_TYPE_MAX
];
302 u64 tx_bw
[5]; /* 20, 40, 80, 160, 320 */
303 u64 tx_nss
[4]; /* 1, 2, 3, 4 */
304 u64 tx_mcs
[16]; /* mcs idx */
307 u32 tx_packets
; /* unit: MSDU */
317 enum mt76_wcid_flags
{
318 MT_WCID_FLAG_CHECK_PS
,
321 MT_WCID_FLAG_HDR_TRANS
,
324 #define MT76_N_WCIDS 1088
326 /* stored in ieee80211_tx_info::hw_queue */
327 #define MT_TX_HW_QUEUE_PHY GENMASK(3, 2)
329 DECLARE_EWMA(signal
, 10, 8);
331 #define MT_WCID_TX_INFO_RATE GENMASK(15, 0)
332 #define MT_WCID_TX_INFO_NSS GENMASK(17, 16)
333 #define MT_WCID_TX_INFO_TXPWR_ADJ GENMASK(25, 18)
334 #define MT_WCID_TX_INFO_SET BIT(31)
337 struct mt76_rx_tid __rcu
*aggr
[IEEE80211_NUM_TIDS
];
339 atomic_t non_aql_packets
;
342 struct ewma_signal rssi
;
345 struct rate_info rate
;
346 unsigned long ampdu_state
;
360 u8 rx_key_pn
[IEEE80211_NUM_TIDS
+ 1][6];
366 struct list_head tx_list
;
367 struct sk_buff_head tx_pending
;
368 struct sk_buff_head tx_offchannel
;
370 struct list_head list
;
373 struct mt76_sta_stats stats
;
375 struct list_head poll_list
;
377 struct mt76_wcid
*def_wcid
;
388 struct mt76_wed_rro_ind
{
399 struct mt76_txwi_cache
{
400 struct list_head list
;
410 struct rcu_head rcu_head
;
412 struct mt76_dev
*dev
;
415 struct delayed_work reorder_work
;
424 u8 started
:1, stopped
:1, timer_pending
:1;
426 struct sk_buff
*reorder_buf
[] __counted_by(size
);
429 #define MT_TX_CB_DMA_DONE BIT(0)
430 #define MT_TX_CB_TXS_DONE BIT(1)
431 #define MT_TX_CB_TXS_FAILED BIT(2)
433 #define MT_PACKET_ID_MASK GENMASK(6, 0)
434 #define MT_PACKET_ID_NO_ACK 0
435 #define MT_PACKET_ID_NO_SKB 1
436 #define MT_PACKET_ID_WED 2
437 #define MT_PACKET_ID_FIRST 3
438 #define MT_PACKET_ID_HAS_RATE BIT(7)
439 /* This is timer for when to give up when waiting for TXS callback,
440 * with starting time being the time at which the DMA_DONE callback
441 * was seen (so, we know packet was processed then, it should not take
442 * long after that for firmware to send the TXS callback if it is going
445 #define MT_TX_STATUS_SKB_TIMEOUT (HZ / 4)
448 unsigned long jiffies
;
455 MT76_STATE_INITIALIZED
,
456 MT76_STATE_REGISTERED
,
458 MT76_STATE_MCU_RUNNING
,
461 MT76_HW_SCHED_SCANNING
,
467 MT76_STATE_POWER_OFF
,
471 MT76_STATE_WED_RESET
,
474 enum mt76_sta_event
{
475 MT76_STA_EVENT_ASSOC
,
476 MT76_STA_EVENT_AUTHORIZE
,
477 MT76_STA_EVENT_DISASSOC
,
486 #define MT_DRV_TXWI_NO_FREE BIT(0)
487 #define MT_DRV_TX_ALIGNED4_SKBS BIT(1)
488 #define MT_DRV_SW_RX_AIRTIME BIT(2)
489 #define MT_DRV_RX_DMA_HDR BIT(3)
490 #define MT_DRV_HW_MGMT_TXQ BIT(4)
491 #define MT_DRV_AMSDU_OFFLOAD BIT(5)
493 struct mt76_driver_ops
{
500 void (*update_survey
)(struct mt76_phy
*phy
);
501 int (*set_channel
)(struct mt76_phy
*phy
);
503 int (*tx_prepare_skb
)(struct mt76_dev
*dev
, void *txwi_ptr
,
504 enum mt76_txq_id qid
, struct mt76_wcid
*wcid
,
505 struct ieee80211_sta
*sta
,
506 struct mt76_tx_info
*tx_info
);
508 void (*tx_complete_skb
)(struct mt76_dev
*dev
,
509 struct mt76_queue_entry
*e
);
511 bool (*tx_status_data
)(struct mt76_dev
*dev
, u8
*update
);
513 bool (*rx_check
)(struct mt76_dev
*dev
, void *data
, int len
);
515 void (*rx_skb
)(struct mt76_dev
*dev
, enum mt76_rxq_id q
,
516 struct sk_buff
*skb
, u32
*info
);
518 void (*rx_poll_complete
)(struct mt76_dev
*dev
, enum mt76_rxq_id q
);
520 void (*sta_ps
)(struct mt76_dev
*dev
, struct ieee80211_sta
*sta
,
523 int (*sta_add
)(struct mt76_dev
*dev
, struct ieee80211_vif
*vif
,
524 struct ieee80211_sta
*sta
);
526 int (*sta_event
)(struct mt76_dev
*dev
, struct ieee80211_vif
*vif
,
527 struct ieee80211_sta
*sta
, enum mt76_sta_event ev
);
529 void (*sta_remove
)(struct mt76_dev
*dev
, struct ieee80211_vif
*vif
,
530 struct ieee80211_sta
*sta
);
533 struct mt76_channel_state
{
544 struct ieee80211_supported_band sband
;
545 struct mt76_channel_state
*chan
;
549 #define MT_VEND_TYPE_EEPROM BIT(31)
550 #define MT_VEND_TYPE_CFG BIT(30)
551 #define MT_VEND_TYPE_MASK (MT_VEND_TYPE_EEPROM | MT_VEND_TYPE_CFG)
553 #define MT_VEND_ADDR(type, n) (MT_VEND_TYPE_##type | (n))
555 MT_VEND_DEV_MODE
= 0x1,
557 MT_VEND_POWER_ON
= 0x4,
558 MT_VEND_MULTI_WRITE
= 0x6,
559 MT_VEND_MULTI_READ
= 0x7,
560 MT_VEND_READ_EEPROM
= 0x9,
561 MT_VEND_WRITE_FCE
= 0x42,
562 MT_VEND_WRITE_CFG
= 0x46,
563 MT_VEND_READ_CFG
= 0x47,
564 MT_VEND_READ_EXT
= 0x63,
565 MT_VEND_WRITE_EXT
= 0x66,
566 MT_VEND_FEATURE_SET
= 0x91,
576 MT_EP_OUT_INBAND_CMD
,
590 struct sk_buff_head res_q
;
591 wait_queue_head_t wait
;
594 #define MT_TX_SG_MAX_SIZE 8
595 #define MT_RX_SG_MAX_SIZE 4
596 #define MT_NUM_TX_ENTRIES 256
597 #define MT_NUM_RX_ENTRIES 128
598 #define MCU_RESP_URB_SIZE 1024
600 struct mutex usb_ctrl_mtx
;
604 struct mt76_worker status_worker
;
605 struct mt76_worker rx_worker
;
607 struct work_struct stat_work
;
609 u8 out_ep
[__MT_EP_OUT_MAX
];
610 u8 in_ep
[__MT_EP_IN_MAX
];
616 struct mt76_reg_pair
*rp
;
622 #define MT76S_XMIT_BUF_SZ 0x3fe00
623 #define MT76S_NUM_TX_ENTRIES 256
624 #define MT76S_NUM_RX_ENTRIES 512
626 struct mt76_worker txrx_worker
;
627 struct mt76_worker status_worker
;
628 struct mt76_worker net_worker
;
629 struct mt76_worker stat_worker
;
634 struct sdio_func
*func
;
637 wait_queue_head_t wait
;
647 int (*parse_irq
)(struct mt76_dev
*dev
, struct mt76s_intr
*intr
);
655 struct mtk_wed_device wed
;
656 struct mtk_wed_device wed_hif2
;
657 struct completion wed_reset
;
658 struct completion wed_reset_complete
;
661 struct mt76_rx_status
{
663 struct mt76_wcid
*wcid
;
695 u8 amsdu
:1, first_amsdu
:1, last_amsdu
:1;
700 s8 chain_signal
[IEEE80211_MAX_CHAINS
];
703 struct mt76_freq_range_power
{
704 const struct cfg80211_sar_freq_ranges
*range
;
708 struct mt76_testmode_ops
{
709 int (*set_state
)(struct mt76_phy
*phy
, enum mt76_testmode_state state
);
710 int (*set_params
)(struct mt76_phy
*phy
, struct nlattr
**tb
,
711 enum mt76_testmode_state new_state
);
712 int (*dump_stats
)(struct mt76_phy
*phy
, struct sk_buff
*msg
);
715 struct mt76_testmode_data
{
716 enum mt76_testmode_state state
;
718 u32 param_set
[DIV_ROUND_UP(NUM_MT76_TM_ATTRS
, 32)];
719 struct sk_buff
*tx_skb
;
744 u8 addr
[3][ETH_ALEN
];
751 u64 packets
[__MT_RXQ_MAX
];
752 u64 fcs_error
[__MT_RXQ_MAX
];
766 struct ieee80211_chanctx_conf
*ctx
;
770 struct ieee80211_hw
*hw
;
771 struct mt76_dev
*dev
;
778 struct list_head tx_list
;
779 struct mt76_queue
*q_tx
[__MT_TXQ_MAX
];
781 struct cfg80211_chan_def chandef
;
782 struct ieee80211_channel
*main_chan
;
785 struct mt76_channel_state
*chan_state
;
786 enum mt76_dfs_state dfs_state
;
791 struct mt76_hw_cap cap
;
792 struct mt76_sband sband_2g
;
793 struct mt76_sband sband_5g
;
794 struct mt76_sband sband_6g
;
796 u8 macaddr
[ETH_ALEN
];
802 #ifdef CONFIG_NL80211_TESTMODE
803 struct mt76_testmode_data test
;
806 struct delayed_work mac_work
;
810 struct sk_buff
*head
;
811 struct sk_buff
**tail
;
813 } rx_amsdu
[__MT_RXQ_MAX
];
815 struct mt76_freq_range_power
*frp
;
818 struct led_classdev cdev
;
826 struct mt76_phy phy
; /* must be first */
827 struct mt76_phy
*phys
[__MT_MAX_BAND
];
829 struct ieee80211_hw
*hw
;
837 struct mt76_rx_status rx_ampdu_status
;
843 const struct mt76_bus_ops
*bus
;
844 const struct mt76_driver_ops
*drv
;
845 const struct mt76_mcu_ops
*mcu_ops
;
847 struct device
*dma_dev
;
851 struct net_device
*napi_dev
;
852 struct net_device
*tx_napi_dev
;
854 struct napi_struct napi
[__MT_RXQ_MAX
];
855 struct sk_buff_head rx_skb
[__MT_RXQ_MAX
];
856 struct tasklet_struct irq_tasklet
;
858 struct list_head txwi_cache
;
859 struct list_head rxwi_cache
;
860 struct mt76_queue
*q_mcu
[__MT_MCUQ_MAX
];
861 struct mt76_queue q_rx
[__MT_RXQ_MAX
];
862 const struct mt76_queue_ops
*queue_ops
;
865 struct mt76_worker tx_worker
;
866 struct napi_struct tx_napi
;
868 spinlock_t token_lock
;
874 spinlock_t rx_token_lock
;
878 wait_queue_head_t tx_wait
;
879 /* spinclock used to protect wcid pktid linked list */
880 spinlock_t status_lock
;
882 u32 wcid_mask
[DIV_ROUND_UP(MT76_N_WCIDS
, 32)];
883 u32 wcid_phy_mask
[DIV_ROUND_UP(MT76_N_WCIDS
, 32)];
887 struct mt76_wcid global_wcid
;
888 struct mt76_wcid __rcu
*wcid
[MT76_N_WCIDS
];
889 struct list_head wcid_list
;
891 struct list_head sta_poll_list
;
892 spinlock_t sta_poll_lock
;
896 struct tasklet_struct pre_tbtt_tasklet
;
900 struct debugfs_blob_wrapper eeprom
;
901 struct debugfs_blob_wrapper otp
;
904 enum nl80211_dfs_regions region
;
912 #ifdef CONFIG_NL80211_TESTMODE
913 const struct mt76_testmode_ops
*test_ops
;
919 struct workqueue_struct
*wq
;
922 struct mt76_mmio mmio
;
924 struct mt76_sdio sdio
;
929 struct mt76_mib_stats
{
938 u32 tx_mu_acked_mpdu_cnt
;
939 u32 tx_su_acked_mpdu_cnt
;
940 u32 tx_bf_ibf_ppdu_cnt
;
941 u32 tx_bf_ebf_ppdu_cnt
;
943 u32 tx_bf_rx_fb_all_cnt
;
944 u32 tx_bf_rx_fb_eht_cnt
;
945 u32 tx_bf_rx_fb_he_cnt
;
946 u32 tx_bf_rx_fb_vht_cnt
;
947 u32 tx_bf_rx_fb_ht_cnt
;
949 u32 tx_bf_rx_fb_bw
; /* value of last sample, not cumulative */
950 u32 tx_bf_rx_fb_nc_cnt
;
951 u32 tx_bf_rx_fb_nr_cnt
;
952 u32 tx_bf_fb_cpl_cnt
;
953 u32 tx_bf_fb_trig_cnt
;
956 u32 tx_stop_q_empty_cnt
;
957 u32 tx_mpdu_attempts_cnt
;
958 u32 tx_mpdu_success_cnt
;
966 u32 rx_fifo_full_cnt
;
967 u32 channel_idle_cnt
;
968 u32 primary_cca_busy_time
;
969 u32 secondary_cca_busy_time
;
970 u32 primary_energy_detect_time
;
973 u32 green_mdrdy_time
;
974 u32 rx_vector_mismatch_cnt
;
975 u32 rx_delimiter_fail_cnt
;
977 u32 rx_len_mismatch_cnt
;
980 u32 rx_ampdu_bytes_cnt
;
981 u32 rx_ampdu_valid_subframe_cnt
;
982 u32 rx_ampdu_valid_subframe_bytes_cnt
;
984 u32 rx_vec_queue_overflow_drop_cnt
;
1000 u32 dl_he_ext_su_cnt
;
1007 u32 dl_he_5to8ru_cnt
;
1008 u32 dl_he_9to16ru_cnt
;
1009 u32 dl_he_gtr16ru_cnt
;
1011 u32 ul_hetrig_su_cnt
;
1012 u32 ul_hetrig_2ru_cnt
;
1013 u32 ul_hetrig_3ru_cnt
;
1014 u32 ul_hetrig_4ru_cnt
;
1015 u32 ul_hetrig_5to8ru_cnt
;
1016 u32 ul_hetrig_9to16ru_cnt
;
1017 u32 ul_hetrig_gtr16ru_cnt
;
1018 u32 ul_hetrig_2mu_cnt
;
1019 u32 ul_hetrig_3mu_cnt
;
1020 u32 ul_hetrig_4mu_cnt
;
1023 struct mt76_power_limits
{
1031 struct mt76_ethtool_worker_info
{
1034 int initial_stat_idx
;
1035 int worker_stat_count
;
1039 #define CCK_RATE(_idx, _rate) { \
1041 .flags = IEEE80211_RATE_SHORT_PREAMBLE, \
1042 .hw_value = (MT_PHY_TYPE_CCK << 8) | (_idx), \
1043 .hw_value_short = (MT_PHY_TYPE_CCK << 8) | (4 + _idx), \
1046 #define OFDM_RATE(_idx, _rate) { \
1048 .hw_value = (MT_PHY_TYPE_OFDM << 8) | (_idx), \
1049 .hw_value_short = (MT_PHY_TYPE_OFDM << 8) | (_idx), \
1052 extern struct ieee80211_rate mt76_rates
[12];
1054 #define __mt76_rr(dev, ...) (dev)->bus->rr((dev), __VA_ARGS__)
1055 #define __mt76_wr(dev, ...) (dev)->bus->wr((dev), __VA_ARGS__)
1056 #define __mt76_rmw(dev, ...) (dev)->bus->rmw((dev), __VA_ARGS__)
1057 #define __mt76_wr_copy(dev, ...) (dev)->bus->write_copy((dev), __VA_ARGS__)
1058 #define __mt76_rr_copy(dev, ...) (dev)->bus->read_copy((dev), __VA_ARGS__)
1060 #define __mt76_set(dev, offset, val) __mt76_rmw(dev, offset, 0, val)
1061 #define __mt76_clear(dev, offset, val) __mt76_rmw(dev, offset, val, 0)
1063 #define mt76_rr(dev, ...) (dev)->mt76.bus->rr(&((dev)->mt76), __VA_ARGS__)
1064 #define mt76_wr(dev, ...) (dev)->mt76.bus->wr(&((dev)->mt76), __VA_ARGS__)
1065 #define mt76_rmw(dev, ...) (dev)->mt76.bus->rmw(&((dev)->mt76), __VA_ARGS__)
1066 #define mt76_wr_copy(dev, ...) (dev)->mt76.bus->write_copy(&((dev)->mt76), __VA_ARGS__)
1067 #define mt76_rr_copy(dev, ...) (dev)->mt76.bus->read_copy(&((dev)->mt76), __VA_ARGS__)
1068 #define mt76_wr_rp(dev, ...) (dev)->mt76.bus->wr_rp(&((dev)->mt76), __VA_ARGS__)
1069 #define mt76_rd_rp(dev, ...) (dev)->mt76.bus->rd_rp(&((dev)->mt76), __VA_ARGS__)
1072 #define mt76_mcu_restart(dev, ...) (dev)->mt76.mcu_ops->mcu_restart(&((dev)->mt76))
1074 #define mt76_set(dev, offset, val) mt76_rmw(dev, offset, 0, val)
1075 #define mt76_clear(dev, offset, val) mt76_rmw(dev, offset, val, 0)
1077 #define mt76_get_field(_dev, _reg, _field) \
1078 FIELD_GET(_field, mt76_rr(dev, _reg))
1080 #define mt76_rmw_field(_dev, _reg, _field, _val) \
1081 mt76_rmw(_dev, _reg, _field, FIELD_PREP(_field, _val))
1083 #define __mt76_rmw_field(_dev, _reg, _field, _val) \
1084 __mt76_rmw(_dev, _reg, _field, FIELD_PREP(_field, _val))
1086 #define mt76_hw(dev) (dev)->mphy.hw
1088 bool __mt76_poll(struct mt76_dev
*dev
, u32 offset
, u32 mask
, u32 val
,
1091 #define mt76_poll(dev, ...) __mt76_poll(&((dev)->mt76), __VA_ARGS__)
1093 bool ____mt76_poll_msec(struct mt76_dev
*dev
, u32 offset
, u32 mask
, u32 val
,
1094 int timeout
, int kick
);
1095 #define __mt76_poll_msec(...) ____mt76_poll_msec(__VA_ARGS__, 10)
1096 #define mt76_poll_msec(dev, ...) ____mt76_poll_msec(&((dev)->mt76), __VA_ARGS__, 10)
1097 #define mt76_poll_msec_tick(dev, ...) ____mt76_poll_msec(&((dev)->mt76), __VA_ARGS__)
1099 void mt76_mmio_init(struct mt76_dev
*dev
, void __iomem
*regs
);
1100 void mt76_pci_disable_aspm(struct pci_dev
*pdev
);
1101 bool mt76_pci_aspm_supported(struct pci_dev
*pdev
);
1103 static inline u16
mt76_chip(struct mt76_dev
*dev
)
1105 return dev
->rev
>> 16;
1108 static inline u16
mt76_rev(struct mt76_dev
*dev
)
1110 return dev
->rev
& 0xffff;
1113 void mt76_wed_release_rx_buf(struct mtk_wed_device
*wed
);
1114 void mt76_wed_offload_disable(struct mtk_wed_device
*wed
);
1115 void mt76_wed_reset_complete(struct mtk_wed_device
*wed
);
1116 void mt76_wed_dma_reset(struct mt76_dev
*dev
);
1117 int mt76_wed_net_setup_tc(struct ieee80211_hw
*hw
, struct ieee80211_vif
*vif
,
1118 struct net_device
*netdev
, enum tc_setup_type type
,
1120 #ifdef CONFIG_NET_MEDIATEK_SOC_WED
1121 u32
mt76_wed_init_rx_buf(struct mtk_wed_device
*wed
, int size
);
1122 int mt76_wed_offload_enable(struct mtk_wed_device
*wed
);
1123 int mt76_wed_dma_setup(struct mt76_dev
*dev
, struct mt76_queue
*q
, bool reset
);
1125 static inline u32
mt76_wed_init_rx_buf(struct mtk_wed_device
*wed
, int size
)
1130 static inline int mt76_wed_offload_enable(struct mtk_wed_device
*wed
)
1135 static inline int mt76_wed_dma_setup(struct mt76_dev
*dev
, struct mt76_queue
*q
,
1140 #endif /* CONFIG_NET_MEDIATEK_SOC_WED */
1142 #define mt76xx_chip(dev) mt76_chip(&((dev)->mt76))
1143 #define mt76xx_rev(dev) mt76_rev(&((dev)->mt76))
1145 #define mt76_init_queues(dev, ...) (dev)->mt76.queue_ops->init(&((dev)->mt76), __VA_ARGS__)
1146 #define mt76_queue_alloc(dev, ...) (dev)->mt76.queue_ops->alloc(&((dev)->mt76), __VA_ARGS__)
1147 #define mt76_tx_queue_skb_raw(dev, ...) (dev)->mt76.queue_ops->tx_queue_skb_raw(&((dev)->mt76), __VA_ARGS__)
1148 #define mt76_tx_queue_skb(dev, ...) (dev)->mt76.queue_ops->tx_queue_skb(&((dev)->mphy), __VA_ARGS__)
1149 #define mt76_queue_rx_reset(dev, ...) (dev)->mt76.queue_ops->rx_reset(&((dev)->mt76), __VA_ARGS__)
1150 #define mt76_queue_tx_cleanup(dev, ...) (dev)->mt76.queue_ops->tx_cleanup(&((dev)->mt76), __VA_ARGS__)
1151 #define mt76_queue_rx_cleanup(dev, ...) (dev)->mt76.queue_ops->rx_cleanup(&((dev)->mt76), __VA_ARGS__)
1152 #define mt76_queue_kick(dev, ...) (dev)->mt76.queue_ops->kick(&((dev)->mt76), __VA_ARGS__)
1153 #define mt76_queue_reset(dev, ...) (dev)->mt76.queue_ops->reset_q(&((dev)->mt76), __VA_ARGS__)
1155 #define mt76_for_each_q_rx(dev, i) \
1156 for (i = 0; i < ARRAY_SIZE((dev)->q_rx); i++) \
1157 if ((dev)->q_rx[i].ndesc)
1159 struct mt76_dev
*mt76_alloc_device(struct device
*pdev
, unsigned int size
,
1160 const struct ieee80211_ops
*ops
,
1161 const struct mt76_driver_ops
*drv_ops
);
1162 int mt76_register_device(struct mt76_dev
*dev
, bool vht
,
1163 struct ieee80211_rate
*rates
, int n_rates
);
1164 void mt76_unregister_device(struct mt76_dev
*dev
);
1165 void mt76_free_device(struct mt76_dev
*dev
);
1166 void mt76_unregister_phy(struct mt76_phy
*phy
);
1168 struct mt76_phy
*mt76_alloc_phy(struct mt76_dev
*dev
, unsigned int size
,
1169 const struct ieee80211_ops
*ops
,
1171 int mt76_register_phy(struct mt76_phy
*phy
, bool vht
,
1172 struct ieee80211_rate
*rates
, int n_rates
);
1174 struct dentry
*mt76_register_debugfs_fops(struct mt76_phy
*phy
,
1175 const struct file_operations
*ops
);
1176 static inline struct dentry
*mt76_register_debugfs(struct mt76_dev
*dev
)
1178 return mt76_register_debugfs_fops(&dev
->phy
, NULL
);
1181 int mt76_queues_read(struct seq_file
*s
, void *data
);
1182 void mt76_seq_puts_array(struct seq_file
*file
, const char *str
,
1185 int mt76_eeprom_init(struct mt76_dev
*dev
, int len
);
1186 void mt76_eeprom_override(struct mt76_phy
*phy
);
1187 int mt76_get_of_data_from_mtd(struct mt76_dev
*dev
, void *eep
, int offset
, int len
);
1188 int mt76_get_of_data_from_nvmem(struct mt76_dev
*dev
, void *eep
,
1189 const char *cell_name
, int len
);
1192 mt76_init_queue(struct mt76_dev
*dev
, int qid
, int idx
, int n_desc
,
1193 int ring_base
, void *wed
, u32 flags
);
1194 u16
mt76_calculate_default_rate(struct mt76_phy
*phy
,
1195 struct ieee80211_vif
*vif
, int rateidx
);
1196 static inline int mt76_init_tx_queue(struct mt76_phy
*phy
, int qid
, int idx
,
1197 int n_desc
, int ring_base
, void *wed
,
1200 struct mt76_queue
*q
;
1202 q
= mt76_init_queue(phy
->dev
, qid
, idx
, n_desc
, ring_base
, wed
, flags
);
1211 static inline int mt76_init_mcu_queue(struct mt76_dev
*dev
, int qid
, int idx
,
1212 int n_desc
, int ring_base
)
1214 struct mt76_queue
*q
;
1216 q
= mt76_init_queue(dev
, qid
, idx
, n_desc
, ring_base
, NULL
, 0);
1220 dev
->q_mcu
[qid
] = q
;
1225 static inline struct mt76_phy
*
1226 mt76_dev_phy(struct mt76_dev
*dev
, u8 phy_idx
)
1228 if ((phy_idx
== MT_BAND1
&& dev
->phys
[phy_idx
]) ||
1229 (phy_idx
== MT_BAND2
&& dev
->phys
[phy_idx
]))
1230 return dev
->phys
[phy_idx
];
1235 static inline struct ieee80211_hw
*
1236 mt76_phy_hw(struct mt76_dev
*dev
, u8 phy_idx
)
1238 return mt76_dev_phy(dev
, phy_idx
)->hw
;
1242 mt76_get_txwi_ptr(struct mt76_dev
*dev
, struct mt76_txwi_cache
*t
)
1244 return (u8
*)t
- dev
->drv
->txwi_size
;
1247 /* increment with wrap-around */
1248 static inline int mt76_incr(int val
, int size
)
1250 return (val
+ 1) & (size
- 1);
1253 /* decrement with wrap-around */
1254 static inline int mt76_decr(int val
, int size
)
1256 return (val
- 1) & (size
- 1);
1259 u8
mt76_ac_to_hwq(u8 ac
);
1261 static inline struct ieee80211_txq
*
1262 mtxq_to_txq(struct mt76_txq
*mtxq
)
1266 return container_of(ptr
, struct ieee80211_txq
, drv_priv
);
1269 static inline struct ieee80211_sta
*
1270 wcid_to_sta(struct mt76_wcid
*wcid
)
1274 if (!wcid
|| !wcid
->sta
)
1278 ptr
= wcid
->def_wcid
;
1280 return container_of(ptr
, struct ieee80211_sta
, drv_priv
);
1283 static inline struct mt76_tx_cb
*mt76_tx_skb_cb(struct sk_buff
*skb
)
1285 BUILD_BUG_ON(sizeof(struct mt76_tx_cb
) >
1286 sizeof(IEEE80211_SKB_CB(skb
)->status
.status_driver_data
));
1287 return ((void *)IEEE80211_SKB_CB(skb
)->status
.status_driver_data
);
1290 static inline void *mt76_skb_get_hdr(struct sk_buff
*skb
)
1292 struct mt76_rx_status mstat
;
1293 u8
*data
= skb
->data
;
1295 /* Alignment concerns */
1296 BUILD_BUG_ON(sizeof(struct ieee80211_radiotap_he
) % 4);
1297 BUILD_BUG_ON(sizeof(struct ieee80211_radiotap_he_mu
) % 4);
1299 mstat
= *((struct mt76_rx_status
*)skb
->cb
);
1301 if (mstat
.flag
& RX_FLAG_RADIOTAP_HE
)
1302 data
+= sizeof(struct ieee80211_radiotap_he
);
1303 if (mstat
.flag
& RX_FLAG_RADIOTAP_HE_MU
)
1304 data
+= sizeof(struct ieee80211_radiotap_he_mu
);
1309 static inline void mt76_insert_hdr_pad(struct sk_buff
*skb
)
1311 int len
= ieee80211_get_hdrlen_from_skb(skb
);
1317 memmove(skb
->data
, skb
->data
+ 2, len
);
1320 skb
->data
[len
+ 1] = 0;
1323 static inline bool mt76_is_skb_pktid(u8 pktid
)
1325 if (pktid
& MT_PACKET_ID_HAS_RATE
)
1328 return pktid
>= MT_PACKET_ID_FIRST
;
1331 static inline u8
mt76_tx_power_nss_delta(u8 nss
)
1333 static const u8 nss_delta
[4] = { 0, 6, 9, 12 };
1336 return (idx
< ARRAY_SIZE(nss_delta
)) ? nss_delta
[idx
] : 0;
1339 static inline bool mt76_testmode_enabled(struct mt76_phy
*phy
)
1341 #ifdef CONFIG_NL80211_TESTMODE
1342 return phy
->test
.state
!= MT76_TM_STATE_OFF
;
1348 static inline bool mt76_is_testmode_skb(struct mt76_dev
*dev
,
1349 struct sk_buff
*skb
,
1350 struct ieee80211_hw
**hw
)
1352 #ifdef CONFIG_NL80211_TESTMODE
1355 for (i
= 0; i
< ARRAY_SIZE(dev
->phys
); i
++) {
1356 struct mt76_phy
*phy
= dev
->phys
[i
];
1358 if (phy
&& skb
== phy
->test
.tx_skb
) {
1359 *hw
= dev
->phys
[i
]->hw
;
1369 void mt76_rx(struct mt76_dev
*dev
, enum mt76_rxq_id q
, struct sk_buff
*skb
);
1370 void mt76_tx(struct mt76_phy
*dev
, struct ieee80211_sta
*sta
,
1371 struct mt76_wcid
*wcid
, struct sk_buff
*skb
);
1372 void mt76_wake_tx_queue(struct ieee80211_hw
*hw
, struct ieee80211_txq
*txq
);
1373 void mt76_stop_tx_queues(struct mt76_phy
*phy
, struct ieee80211_sta
*sta
,
1375 void mt76_tx_check_agg_ssn(struct ieee80211_sta
*sta
, struct sk_buff
*skb
);
1376 void mt76_txq_schedule(struct mt76_phy
*phy
, enum mt76_txq_id qid
);
1377 void mt76_txq_schedule_all(struct mt76_phy
*phy
);
1378 void mt76_tx_worker_run(struct mt76_dev
*dev
);
1379 void mt76_tx_worker(struct mt76_worker
*w
);
1380 void mt76_release_buffered_frames(struct ieee80211_hw
*hw
,
1381 struct ieee80211_sta
*sta
,
1382 u16 tids
, int nframes
,
1383 enum ieee80211_frame_release_type reason
,
1385 bool mt76_has_tx_pending(struct mt76_phy
*phy
);
1386 int mt76_update_channel(struct mt76_phy
*phy
);
1387 void mt76_update_survey(struct mt76_phy
*phy
);
1388 void mt76_update_survey_active_time(struct mt76_phy
*phy
, ktime_t time
);
1389 int mt76_get_survey(struct ieee80211_hw
*hw
, int idx
,
1390 struct survey_info
*survey
);
1391 int mt76_rx_signal(u8 chain_mask
, s8
*chain_signal
);
1392 void mt76_set_stream_caps(struct mt76_phy
*phy
, bool vht
);
1394 int mt76_rx_aggr_start(struct mt76_dev
*dev
, struct mt76_wcid
*wcid
, u8 tid
,
1396 void mt76_rx_aggr_stop(struct mt76_dev
*dev
, struct mt76_wcid
*wcid
, u8 tid
);
1398 void mt76_wcid_key_setup(struct mt76_dev
*dev
, struct mt76_wcid
*wcid
,
1399 struct ieee80211_key_conf
*key
);
1401 void mt76_tx_status_lock(struct mt76_dev
*dev
, struct sk_buff_head
*list
)
1402 __acquires(&dev
->status_lock
);
1403 void mt76_tx_status_unlock(struct mt76_dev
*dev
, struct sk_buff_head
*list
)
1404 __releases(&dev
->status_lock
);
1406 int mt76_tx_status_skb_add(struct mt76_dev
*dev
, struct mt76_wcid
*wcid
,
1407 struct sk_buff
*skb
);
1408 struct sk_buff
*mt76_tx_status_skb_get(struct mt76_dev
*dev
,
1409 struct mt76_wcid
*wcid
, int pktid
,
1410 struct sk_buff_head
*list
);
1411 void mt76_tx_status_skb_done(struct mt76_dev
*dev
, struct sk_buff
*skb
,
1412 struct sk_buff_head
*list
);
1413 void __mt76_tx_complete_skb(struct mt76_dev
*dev
, u16 wcid
, struct sk_buff
*skb
,
1414 struct list_head
*free_list
);
1416 mt76_tx_complete_skb(struct mt76_dev
*dev
, u16 wcid
, struct sk_buff
*skb
)
1418 __mt76_tx_complete_skb(dev
, wcid
, skb
, NULL
);
1421 void mt76_tx_status_check(struct mt76_dev
*dev
, bool flush
);
1422 int mt76_sta_state(struct ieee80211_hw
*hw
, struct ieee80211_vif
*vif
,
1423 struct ieee80211_sta
*sta
,
1424 enum ieee80211_sta_state old_state
,
1425 enum ieee80211_sta_state new_state
);
1426 void __mt76_sta_remove(struct mt76_dev
*dev
, struct ieee80211_vif
*vif
,
1427 struct ieee80211_sta
*sta
);
1428 void mt76_sta_pre_rcu_remove(struct ieee80211_hw
*hw
, struct ieee80211_vif
*vif
,
1429 struct ieee80211_sta
*sta
);
1431 int mt76_get_min_avg_rssi(struct mt76_dev
*dev
, bool ext_phy
);
1433 int mt76_get_txpower(struct ieee80211_hw
*hw
, struct ieee80211_vif
*vif
,
1435 int mt76_init_sar_power(struct ieee80211_hw
*hw
,
1436 const struct cfg80211_sar_specs
*sar
);
1437 int mt76_get_sar_power(struct mt76_phy
*phy
,
1438 struct ieee80211_channel
*chan
,
1441 void mt76_csa_check(struct mt76_dev
*dev
);
1442 void mt76_csa_finish(struct mt76_dev
*dev
);
1444 int mt76_get_antenna(struct ieee80211_hw
*hw
, u32
*tx_ant
, u32
*rx_ant
);
1445 int mt76_set_tim(struct ieee80211_hw
*hw
, struct ieee80211_sta
*sta
, bool set
);
1446 void mt76_insert_ccmp_hdr(struct sk_buff
*skb
, u8 key_id
);
1447 int mt76_get_rate(struct mt76_dev
*dev
,
1448 struct ieee80211_supported_band
*sband
,
1450 void mt76_sw_scan(struct ieee80211_hw
*hw
, struct ieee80211_vif
*vif
,
1452 void mt76_sw_scan_complete(struct ieee80211_hw
*hw
,
1453 struct ieee80211_vif
*vif
);
1454 enum mt76_dfs_state
mt76_phy_dfs_state(struct mt76_phy
*phy
);
1455 int mt76_testmode_cmd(struct ieee80211_hw
*hw
, struct ieee80211_vif
*vif
,
1456 void *data
, int len
);
1457 int mt76_testmode_dump(struct ieee80211_hw
*hw
, struct sk_buff
*skb
,
1458 struct netlink_callback
*cb
, void *data
, int len
);
1459 int mt76_testmode_set_state(struct mt76_phy
*phy
, enum mt76_testmode_state state
);
1460 int mt76_testmode_alloc_skb(struct mt76_phy
*phy
, u32 len
);
1462 static inline void mt76_testmode_reset(struct mt76_phy
*phy
, bool disable
)
1464 #ifdef CONFIG_NL80211_TESTMODE
1465 enum mt76_testmode_state state
= MT76_TM_STATE_IDLE
;
1467 if (disable
|| phy
->test
.state
== MT76_TM_STATE_OFF
)
1468 state
= MT76_TM_STATE_OFF
;
1470 mt76_testmode_set_state(phy
, state
);
1476 static inline struct ieee80211_hw
*
1477 mt76_tx_status_get_hw(struct mt76_dev
*dev
, struct sk_buff
*skb
)
1479 struct ieee80211_tx_info
*info
= IEEE80211_SKB_CB(skb
);
1480 u8 phy_idx
= (info
->hw_queue
& MT_TX_HW_QUEUE_PHY
) >> 2;
1481 struct ieee80211_hw
*hw
= mt76_phy_hw(dev
, phy_idx
);
1483 info
->hw_queue
&= ~MT_TX_HW_QUEUE_PHY
;
1488 void mt76_put_txwi(struct mt76_dev
*dev
, struct mt76_txwi_cache
*t
);
1489 void mt76_put_rxwi(struct mt76_dev
*dev
, struct mt76_txwi_cache
*t
);
1490 struct mt76_txwi_cache
*mt76_get_rxwi(struct mt76_dev
*dev
);
1491 void mt76_free_pending_rxwi(struct mt76_dev
*dev
);
1492 void mt76_rx_complete(struct mt76_dev
*dev
, struct sk_buff_head
*frames
,
1493 struct napi_struct
*napi
);
1494 void mt76_rx_poll_complete(struct mt76_dev
*dev
, enum mt76_rxq_id q
,
1495 struct napi_struct
*napi
);
1496 void mt76_rx_aggr_reorder(struct sk_buff
*skb
, struct sk_buff_head
*frames
);
1497 void mt76_testmode_tx_pending(struct mt76_phy
*phy
);
1498 void mt76_queue_tx_complete(struct mt76_dev
*dev
, struct mt76_queue
*q
,
1499 struct mt76_queue_entry
*e
);
1500 int mt76_set_channel(struct mt76_phy
*phy
, struct cfg80211_chan_def
*chandef
,
1504 static inline bool mt76u_urb_error(struct urb
*urb
)
1506 return urb
->status
&&
1507 urb
->status
!= -ECONNRESET
&&
1508 urb
->status
!= -ESHUTDOWN
&&
1509 urb
->status
!= -ENOENT
;
1513 mt76u_bulk_msg(struct mt76_dev
*dev
, void *data
, int len
, int *actual_len
,
1514 int timeout
, int ep
)
1516 struct usb_interface
*uintf
= to_usb_interface(dev
->dev
);
1517 struct usb_device
*udev
= interface_to_usbdev(uintf
);
1518 struct mt76_usb
*usb
= &dev
->usb
;
1522 pipe
= usb_rcvbulkpipe(udev
, usb
->in_ep
[ep
]);
1524 pipe
= usb_sndbulkpipe(udev
, usb
->out_ep
[ep
]);
1526 return usb_bulk_msg(udev
, pipe
, data
, len
, actual_len
, timeout
);
1529 void mt76_ethtool_page_pool_stats(struct mt76_dev
*dev
, u64
*data
, int *index
);
1530 void mt76_ethtool_worker(struct mt76_ethtool_worker_info
*wi
,
1531 struct mt76_sta_stats
*stats
, bool eht
);
1532 int mt76_skb_adjust_pad(struct sk_buff
*skb
, int pad
);
1533 int __mt76u_vendor_request(struct mt76_dev
*dev
, u8 req
, u8 req_type
,
1534 u16 val
, u16 offset
, void *buf
, size_t len
);
1535 int mt76u_vendor_request(struct mt76_dev
*dev
, u8 req
,
1536 u8 req_type
, u16 val
, u16 offset
,
1537 void *buf
, size_t len
);
1538 void mt76u_single_wr(struct mt76_dev
*dev
, const u8 req
,
1539 const u16 offset
, const u32 val
);
1540 void mt76u_read_copy(struct mt76_dev
*dev
, u32 offset
,
1541 void *data
, int len
);
1542 u32
___mt76u_rr(struct mt76_dev
*dev
, u8 req
, u8 req_type
, u32 addr
);
1543 void ___mt76u_wr(struct mt76_dev
*dev
, u8 req
, u8 req_type
,
1545 int __mt76u_init(struct mt76_dev
*dev
, struct usb_interface
*intf
,
1546 struct mt76_bus_ops
*ops
);
1547 int mt76u_init(struct mt76_dev
*dev
, struct usb_interface
*intf
);
1548 int mt76u_alloc_mcu_queue(struct mt76_dev
*dev
);
1549 int mt76u_alloc_queues(struct mt76_dev
*dev
);
1550 void mt76u_stop_tx(struct mt76_dev
*dev
);
1551 void mt76u_stop_rx(struct mt76_dev
*dev
);
1552 int mt76u_resume_rx(struct mt76_dev
*dev
);
1553 void mt76u_queues_deinit(struct mt76_dev
*dev
);
1555 int mt76s_init(struct mt76_dev
*dev
, struct sdio_func
*func
,
1556 const struct mt76_bus_ops
*bus_ops
);
1557 int mt76s_alloc_rx_queue(struct mt76_dev
*dev
, enum mt76_rxq_id qid
);
1558 int mt76s_alloc_tx(struct mt76_dev
*dev
);
1559 void mt76s_deinit(struct mt76_dev
*dev
);
1560 void mt76s_sdio_irq(struct sdio_func
*func
);
1561 void mt76s_txrx_worker(struct mt76_sdio
*sdio
);
1562 bool mt76s_txqs_empty(struct mt76_dev
*dev
);
1563 int mt76s_hw_init(struct mt76_dev
*dev
, struct sdio_func
*func
,
1565 u32
mt76s_rr(struct mt76_dev
*dev
, u32 offset
);
1566 void mt76s_wr(struct mt76_dev
*dev
, u32 offset
, u32 val
);
1567 u32
mt76s_rmw(struct mt76_dev
*dev
, u32 offset
, u32 mask
, u32 val
);
1568 u32
mt76s_read_pcr(struct mt76_dev
*dev
);
1569 void mt76s_write_copy(struct mt76_dev
*dev
, u32 offset
,
1570 const void *data
, int len
);
1571 void mt76s_read_copy(struct mt76_dev
*dev
, u32 offset
,
1572 void *data
, int len
);
1573 int mt76s_wr_rp(struct mt76_dev
*dev
, u32 base
,
1574 const struct mt76_reg_pair
*data
,
1576 int mt76s_rd_rp(struct mt76_dev
*dev
, u32 base
,
1577 struct mt76_reg_pair
*data
, int len
);
1580 __mt76_mcu_msg_alloc(struct mt76_dev
*dev
, const void *data
,
1581 int len
, int data_len
, gfp_t gfp
);
1582 static inline struct sk_buff
*
1583 mt76_mcu_msg_alloc(struct mt76_dev
*dev
, const void *data
,
1586 return __mt76_mcu_msg_alloc(dev
, data
, data_len
, data_len
, GFP_KERNEL
);
1589 void mt76_mcu_rx_event(struct mt76_dev
*dev
, struct sk_buff
*skb
);
1590 struct sk_buff
*mt76_mcu_get_response(struct mt76_dev
*dev
,
1591 unsigned long expires
);
1592 int mt76_mcu_send_and_get_msg(struct mt76_dev
*dev
, int cmd
, const void *data
,
1593 int len
, bool wait_resp
, struct sk_buff
**ret
);
1594 int mt76_mcu_skb_send_and_get_msg(struct mt76_dev
*dev
, struct sk_buff
*skb
,
1595 int cmd
, bool wait_resp
, struct sk_buff
**ret
);
1596 int __mt76_mcu_send_firmware(struct mt76_dev
*dev
, int cmd
, const void *data
,
1597 int len
, int max_len
);
1599 mt76_mcu_send_firmware(struct mt76_dev
*dev
, int cmd
, const void *data
,
1602 int max_len
= 4096 - dev
->mcu_ops
->headroom
;
1604 return __mt76_mcu_send_firmware(dev
, cmd
, data
, len
, max_len
);
1608 mt76_mcu_send_msg(struct mt76_dev
*dev
, int cmd
, const void *data
, int len
,
1611 return mt76_mcu_send_and_get_msg(dev
, cmd
, data
, len
, wait_resp
, NULL
);
1615 mt76_mcu_skb_send_msg(struct mt76_dev
*dev
, struct sk_buff
*skb
, int cmd
,
1618 return mt76_mcu_skb_send_and_get_msg(dev
, skb
, cmd
, wait_resp
, NULL
);
1621 void mt76_set_irq_mask(struct mt76_dev
*dev
, u32 addr
, u32 clear
, u32 set
);
1623 struct device_node
*
1624 mt76_find_power_limits_node(struct mt76_dev
*dev
);
1625 struct device_node
*
1626 mt76_find_channel_node(struct device_node
*np
, struct ieee80211_channel
*chan
);
1628 s8
mt76_get_rate_power_limits(struct mt76_phy
*phy
,
1629 struct ieee80211_channel
*chan
,
1630 struct mt76_power_limits
*dest
,
1633 static inline bool mt76_queue_is_rx(struct mt76_dev
*dev
, struct mt76_queue
*q
)
1637 for (i
= 0; i
< ARRAY_SIZE(dev
->q_rx
); i
++) {
1638 if (q
== &dev
->q_rx
[i
])
1645 static inline bool mt76_queue_is_wed_tx_free(struct mt76_queue
*q
)
1647 return (q
->flags
& MT_QFLAG_WED
) &&
1648 FIELD_GET(MT_QFLAG_WED_TYPE
, q
->flags
) == MT76_WED_Q_TXFREE
;
1651 static inline bool mt76_queue_is_wed_rro(struct mt76_queue
*q
)
1653 return q
->flags
& MT_QFLAG_WED_RRO
;
1656 static inline bool mt76_queue_is_wed_rro_ind(struct mt76_queue
*q
)
1658 return mt76_queue_is_wed_rro(q
) &&
1659 FIELD_GET(MT_QFLAG_WED_TYPE
, q
->flags
) == MT76_WED_RRO_Q_IND
;
1662 static inline bool mt76_queue_is_wed_rro_data(struct mt76_queue
*q
)
1664 return mt76_queue_is_wed_rro(q
) &&
1665 (FIELD_GET(MT_QFLAG_WED_TYPE
, q
->flags
) == MT76_WED_RRO_Q_DATA
||
1666 FIELD_GET(MT_QFLAG_WED_TYPE
, q
->flags
) == MT76_WED_RRO_Q_MSDU_PG
);
1669 static inline bool mt76_queue_is_wed_rx(struct mt76_queue
*q
)
1671 if (!(q
->flags
& MT_QFLAG_WED
))
1674 return FIELD_GET(MT_QFLAG_WED_TYPE
, q
->flags
) == MT76_WED_Q_RX
||
1675 mt76_queue_is_wed_rro_ind(q
) || mt76_queue_is_wed_rro_data(q
);
1679 struct mt76_txwi_cache
*
1680 mt76_token_release(struct mt76_dev
*dev
, int token
, bool *wake
);
1681 int mt76_token_consume(struct mt76_dev
*dev
, struct mt76_txwi_cache
**ptxwi
);
1682 void __mt76_set_tx_blocked(struct mt76_dev
*dev
, bool blocked
);
1683 struct mt76_txwi_cache
*mt76_rx_token_release(struct mt76_dev
*dev
, int token
);
1684 int mt76_rx_token_consume(struct mt76_dev
*dev
, void *ptr
,
1685 struct mt76_txwi_cache
*r
, dma_addr_t phys
);
1686 int mt76_create_page_pool(struct mt76_dev
*dev
, struct mt76_queue
*q
);
1687 static inline void mt76_put_page_pool_buf(void *buf
, bool allow_direct
)
1689 struct page
*page
= virt_to_head_page(buf
);
1691 page_pool_put_full_page(page
->pp
, page
, allow_direct
);
1694 static inline void *
1695 mt76_get_page_pool_buf(struct mt76_queue
*q
, u32
*offset
, u32 size
)
1699 page
= page_pool_dev_alloc_frag(q
->page_pool
, offset
, size
);
1703 return page_address(page
) + *offset
;
1706 static inline void mt76_set_tx_blocked(struct mt76_dev
*dev
, bool blocked
)
1708 spin_lock_bh(&dev
->token_lock
);
1709 __mt76_set_tx_blocked(dev
, blocked
);
1710 spin_unlock_bh(&dev
->token_lock
);
1714 mt76_token_get(struct mt76_dev
*dev
, struct mt76_txwi_cache
**ptxwi
)
1718 spin_lock_bh(&dev
->token_lock
);
1719 token
= idr_alloc(&dev
->token
, *ptxwi
, 0, dev
->token_size
, GFP_ATOMIC
);
1720 spin_unlock_bh(&dev
->token_lock
);
1725 static inline struct mt76_txwi_cache
*
1726 mt76_token_put(struct mt76_dev
*dev
, int token
)
1728 struct mt76_txwi_cache
*txwi
;
1730 spin_lock_bh(&dev
->token_lock
);
1731 txwi
= idr_remove(&dev
->token
, token
);
1732 spin_unlock_bh(&dev
->token_lock
);
1737 void mt76_wcid_init(struct mt76_wcid
*wcid
);
1738 void mt76_wcid_cleanup(struct mt76_dev
*dev
, struct mt76_wcid
*wcid
);