2 * Copyright (c) 2005-2011 Atheros Communications Inc.
3 * Copyright (c) 2011-2017 Qualcomm Atheros, Inc.
4 * Copyright (c) 2018, The Linux Foundation. All rights reserved.
6 * Permission to use, copy, modify, and/or distribute this software for any
7 * purpose with or without fee is hereby granted, provided that the above
8 * copyright notice and this permission notice appear in all copies.
10 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
11 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
12 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
13 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
14 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
15 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
16 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
22 #include <linux/bug.h>
23 #include <linux/interrupt.h>
24 #include <linux/dmapool.h>
25 #include <linux/hashtable.h>
26 #include <linux/kfifo.h>
27 #include <net/mac80211.h>
33 enum htt_dbg_stats_type
{
34 HTT_DBG_STATS_WAL_PDEV_TXRX
= 1 << 0,
35 HTT_DBG_STATS_RX_REORDER
= 1 << 1,
36 HTT_DBG_STATS_RX_RATE_INFO
= 1 << 2,
37 HTT_DBG_STATS_TX_PPDU_LOG
= 1 << 3,
38 HTT_DBG_STATS_TX_RATE_INFO
= 1 << 4,
39 /* bits 5-23 currently reserved */
41 HTT_DBG_NUM_STATS
/* keep this last */
44 enum htt_h2t_msg_type
{ /* host-to-target */
45 HTT_H2T_MSG_TYPE_VERSION_REQ
= 0,
46 HTT_H2T_MSG_TYPE_TX_FRM
= 1,
47 HTT_H2T_MSG_TYPE_RX_RING_CFG
= 2,
48 HTT_H2T_MSG_TYPE_STATS_REQ
= 3,
49 HTT_H2T_MSG_TYPE_SYNC
= 4,
50 HTT_H2T_MSG_TYPE_AGGR_CFG
= 5,
51 HTT_H2T_MSG_TYPE_FRAG_DESC_BANK_CFG
= 6,
53 /* This command is used for sending management frames in HTT < 3.0.
54 * HTT >= 3.0 uses TX_FRM for everything.
56 HTT_H2T_MSG_TYPE_MGMT_TX
= 7,
57 HTT_H2T_MSG_TYPE_TX_FETCH_RESP
= 11,
59 HTT_H2T_NUM_MSGS
/* keep this last */
67 u8 pad
[sizeof(u32
) - sizeof(struct htt_cmd_hdr
)];
71 * HTT tx MSDU descriptor
73 * The HTT tx MSDU descriptor is created by the host HTT SW for each
74 * tx MSDU. The HTT tx MSDU descriptor contains the information that
75 * the target firmware needs for the FW's tx processing, particularly
76 * for creating the HW msdu descriptor.
77 * The same HTT tx descriptor is used for HL and LL systems, though
78 * a few fields within the tx descriptor are used only by LL or
80 * The HTT tx descriptor is defined in two manners: by a struct with
81 * bitfields, and by a series of [dword offset, bit mask, bit shift]
83 * The target should use the struct def, for simplicitly and clarity,
84 * but the host shall use the bit-mast + bit-shift defs, to be endian-
85 * neutral. Specifically, the host shall use the get/set macros built
86 * around the mask + shift defs.
88 struct htt_data_tx_desc_frag
{
90 struct double_word_addr
{
93 } __packed dword_addr
;
94 struct triple_word_addr
{
98 } __packed tword_addr
;
102 struct htt_msdu_ext_desc
{
104 __le16 ip_identification
;
107 struct htt_data_tx_desc_frag frags
[6];
110 struct htt_msdu_ext_desc_64
{
112 __le16 ip_identification
;
115 struct htt_data_tx_desc_frag frags
[6];
118 #define HTT_MSDU_EXT_DESC_FLAG_IPV4_CSUM_ENABLE BIT(0)
119 #define HTT_MSDU_EXT_DESC_FLAG_UDP_IPV4_CSUM_ENABLE BIT(1)
120 #define HTT_MSDU_EXT_DESC_FLAG_UDP_IPV6_CSUM_ENABLE BIT(2)
121 #define HTT_MSDU_EXT_DESC_FLAG_TCP_IPV4_CSUM_ENABLE BIT(3)
122 #define HTT_MSDU_EXT_DESC_FLAG_TCP_IPV6_CSUM_ENABLE BIT(4)
124 #define HTT_MSDU_CHECKSUM_ENABLE (HTT_MSDU_EXT_DESC_FLAG_IPV4_CSUM_ENABLE \
125 | HTT_MSDU_EXT_DESC_FLAG_UDP_IPV4_CSUM_ENABLE \
126 | HTT_MSDU_EXT_DESC_FLAG_UDP_IPV6_CSUM_ENABLE \
127 | HTT_MSDU_EXT_DESC_FLAG_TCP_IPV4_CSUM_ENABLE \
128 | HTT_MSDU_EXT_DESC_FLAG_TCP_IPV6_CSUM_ENABLE)
130 #define HTT_MSDU_EXT_DESC_FLAG_IPV4_CSUM_ENABLE_64 BIT(16)
131 #define HTT_MSDU_EXT_DESC_FLAG_UDP_IPV4_CSUM_ENABLE_64 BIT(17)
132 #define HTT_MSDU_EXT_DESC_FLAG_UDP_IPV6_CSUM_ENABLE_64 BIT(18)
133 #define HTT_MSDU_EXT_DESC_FLAG_TCP_IPV4_CSUM_ENABLE_64 BIT(19)
134 #define HTT_MSDU_EXT_DESC_FLAG_TCP_IPV6_CSUM_ENABLE_64 BIT(20)
135 #define HTT_MSDU_EXT_DESC_FLAG_PARTIAL_CSUM_ENABLE_64 BIT(21)
137 #define HTT_MSDU_CHECKSUM_ENABLE_64 (HTT_MSDU_EXT_DESC_FLAG_IPV4_CSUM_ENABLE_64 \
138 | HTT_MSDU_EXT_DESC_FLAG_UDP_IPV4_CSUM_ENABLE_64 \
139 | HTT_MSDU_EXT_DESC_FLAG_UDP_IPV6_CSUM_ENABLE_64 \
140 | HTT_MSDU_EXT_DESC_FLAG_TCP_IPV4_CSUM_ENABLE_64 \
141 | HTT_MSDU_EXT_DESC_FLAG_TCP_IPV6_CSUM_ENABLE_64)
143 enum htt_data_tx_desc_flags0
{
144 HTT_DATA_TX_DESC_FLAGS0_MAC_HDR_PRESENT
= 1 << 0,
145 HTT_DATA_TX_DESC_FLAGS0_NO_AGGR
= 1 << 1,
146 HTT_DATA_TX_DESC_FLAGS0_NO_ENCRYPT
= 1 << 2,
147 HTT_DATA_TX_DESC_FLAGS0_NO_CLASSIFY
= 1 << 3,
148 HTT_DATA_TX_DESC_FLAGS0_RSVD0
= 1 << 4
149 #define HTT_DATA_TX_DESC_FLAGS0_PKT_TYPE_MASK 0xE0
150 #define HTT_DATA_TX_DESC_FLAGS0_PKT_TYPE_LSB 5
153 enum htt_data_tx_desc_flags1
{
154 #define HTT_DATA_TX_DESC_FLAGS1_VDEV_ID_BITS 6
155 #define HTT_DATA_TX_DESC_FLAGS1_VDEV_ID_MASK 0x003F
156 #define HTT_DATA_TX_DESC_FLAGS1_VDEV_ID_LSB 0
157 #define HTT_DATA_TX_DESC_FLAGS1_EXT_TID_BITS 5
158 #define HTT_DATA_TX_DESC_FLAGS1_EXT_TID_MASK 0x07C0
159 #define HTT_DATA_TX_DESC_FLAGS1_EXT_TID_LSB 6
160 HTT_DATA_TX_DESC_FLAGS1_POSTPONED
= 1 << 11,
161 HTT_DATA_TX_DESC_FLAGS1_MORE_IN_BATCH
= 1 << 12,
162 HTT_DATA_TX_DESC_FLAGS1_CKSUM_L3_OFFLOAD
= 1 << 13,
163 HTT_DATA_TX_DESC_FLAGS1_CKSUM_L4_OFFLOAD
= 1 << 14,
164 HTT_DATA_TX_DESC_FLAGS1_RSVD1
= 1 << 15
167 enum htt_data_tx_ext_tid
{
168 HTT_DATA_TX_EXT_TID_NON_QOS_MCAST_BCAST
= 16,
169 HTT_DATA_TX_EXT_TID_MGMT
= 17,
170 HTT_DATA_TX_EXT_TID_INVALID
= 31
173 #define HTT_INVALID_PEERID 0xFFFF
176 * htt_data_tx_desc - used for data tx path
178 * Note: vdev_id irrelevant for pkt_type == raw and no_classify == 1.
179 * ext_tid: for qos-data frames (0-15), see %HTT_DATA_TX_EXT_TID_
180 * for special kinds of tids
181 * postponed: only for HL hosts. indicates if this is a resend
182 * (HL hosts manage queues on the host )
183 * more_in_batch: only for HL hosts. indicates if more packets are
184 * pending. this allows target to wait and aggregate
185 * freq: 0 means home channel of given vdev. intended for offchannel
187 struct htt_data_tx_desc
{
188 u8 flags0
; /* %HTT_DATA_TX_DESC_FLAGS0_ */
189 __le16 flags1
; /* %HTT_DATA_TX_DESC_FLAGS1_ */
198 } __packed offchan_tx
;
200 u8 prefetch
[0]; /* start of frame, for FW classification engine */
203 struct htt_data_tx_desc_64
{
204 u8 flags0
; /* %HTT_DATA_TX_DESC_FLAGS0_ */
205 __le16 flags1
; /* %HTT_DATA_TX_DESC_FLAGS1_ */
214 } __packed offchan_tx
;
216 u8 prefetch
[0]; /* start of frame, for FW classification engine */
219 enum htt_rx_ring_flags
{
220 HTT_RX_RING_FLAGS_MAC80211_HDR
= 1 << 0,
221 HTT_RX_RING_FLAGS_MSDU_PAYLOAD
= 1 << 1,
222 HTT_RX_RING_FLAGS_PPDU_START
= 1 << 2,
223 HTT_RX_RING_FLAGS_PPDU_END
= 1 << 3,
224 HTT_RX_RING_FLAGS_MPDU_START
= 1 << 4,
225 HTT_RX_RING_FLAGS_MPDU_END
= 1 << 5,
226 HTT_RX_RING_FLAGS_MSDU_START
= 1 << 6,
227 HTT_RX_RING_FLAGS_MSDU_END
= 1 << 7,
228 HTT_RX_RING_FLAGS_RX_ATTENTION
= 1 << 8,
229 HTT_RX_RING_FLAGS_FRAG_INFO
= 1 << 9,
230 HTT_RX_RING_FLAGS_UNICAST_RX
= 1 << 10,
231 HTT_RX_RING_FLAGS_MULTICAST_RX
= 1 << 11,
232 HTT_RX_RING_FLAGS_CTRL_RX
= 1 << 12,
233 HTT_RX_RING_FLAGS_MGMT_RX
= 1 << 13,
234 HTT_RX_RING_FLAGS_NULL_RX
= 1 << 14,
235 HTT_RX_RING_FLAGS_PHY_DATA_RX
= 1 << 15
238 #define HTT_RX_RING_SIZE_MIN 128
239 #define HTT_RX_RING_SIZE_MAX 2048
240 #define HTT_RX_RING_SIZE HTT_RX_RING_SIZE_MAX
241 #define HTT_RX_RING_FILL_LEVEL (((HTT_RX_RING_SIZE) / 2) - 1)
242 #define HTT_RX_RING_FILL_LEVEL_DUAL_MAC (HTT_RX_RING_SIZE - 1)
244 struct htt_rx_ring_setup_ring32
{
245 __le32 fw_idx_shadow_reg_paddr
;
246 __le32 rx_ring_base_paddr
;
247 __le16 rx_ring_len
; /* in 4-byte words */
248 __le16 rx_ring_bufsize
; /* rx skb size - in bytes */
249 __le16 flags
; /* %HTT_RX_RING_FLAGS_ */
250 __le16 fw_idx_init_val
;
252 /* the following offsets are in 4-byte units */
253 __le16 mac80211_hdr_offset
;
254 __le16 msdu_payload_offset
;
255 __le16 ppdu_start_offset
;
256 __le16 ppdu_end_offset
;
257 __le16 mpdu_start_offset
;
258 __le16 mpdu_end_offset
;
259 __le16 msdu_start_offset
;
260 __le16 msdu_end_offset
;
261 __le16 rx_attention_offset
;
262 __le16 frag_info_offset
;
265 struct htt_rx_ring_setup_ring64
{
266 __le64 fw_idx_shadow_reg_paddr
;
267 __le64 rx_ring_base_paddr
;
268 __le16 rx_ring_len
; /* in 4-byte words */
269 __le16 rx_ring_bufsize
; /* rx skb size - in bytes */
270 __le16 flags
; /* %HTT_RX_RING_FLAGS_ */
271 __le16 fw_idx_init_val
;
273 /* the following offsets are in 4-byte units */
274 __le16 mac80211_hdr_offset
;
275 __le16 msdu_payload_offset
;
276 __le16 ppdu_start_offset
;
277 __le16 ppdu_end_offset
;
278 __le16 mpdu_start_offset
;
279 __le16 mpdu_end_offset
;
280 __le16 msdu_start_offset
;
281 __le16 msdu_end_offset
;
282 __le16 rx_attention_offset
;
283 __le16 frag_info_offset
;
286 struct htt_rx_ring_setup_hdr
{
287 u8 num_rings
; /* supported values: 1, 2 */
291 struct htt_rx_ring_setup_32
{
292 struct htt_rx_ring_setup_hdr hdr
;
293 struct htt_rx_ring_setup_ring32 rings
[0];
296 struct htt_rx_ring_setup_64
{
297 struct htt_rx_ring_setup_hdr hdr
;
298 struct htt_rx_ring_setup_ring64 rings
[0];
302 * htt_stats_req - request target to send specified statistics
304 * @msg_type: hardcoded %HTT_H2T_MSG_TYPE_STATS_REQ
305 * @upload_types: see %htt_dbg_stats_type. this is 24bit field actually
306 * so make sure its little-endian.
307 * @reset_types: see %htt_dbg_stats_type. this is 24bit field actually
308 * so make sure its little-endian.
309 * @cfg_val: stat_type specific configuration
310 * @stat_type: see %htt_dbg_stats_type
311 * @cookie_lsb: used for confirmation message from target->host
312 * @cookie_msb: ditto as %cookie
314 struct htt_stats_req
{
328 #define HTT_STATS_REQ_CFG_STAT_TYPE_INVALID 0xff
331 * htt_oob_sync_req - request out-of-band sync
333 * The HTT SYNC tells the target to suspend processing of subsequent
334 * HTT host-to-target messages until some other target agent locally
335 * informs the target HTT FW that the current sync counter is equal to
336 * or greater than (in a modulo sense) the sync counter specified in
339 * This allows other host-target components to synchronize their operation
340 * with HTT, e.g. to ensure that tx frames don't get transmitted until a
341 * security key has been downloaded to and activated by the target.
342 * In the absence of any explicit synchronization counter value
343 * specification, the target HTT FW will use zero as the default current
346 * The HTT target FW will suspend its host->target message processing as long
347 * as 0 < (in-band sync counter - out-of-band sync counter) & 0xff < 128.
349 struct htt_oob_sync_req
{
354 struct htt_aggr_conf
{
355 u8 max_num_ampdu_subframes
;
356 /* amsdu_subframes is limited by 0x1F mask */
357 u8 max_num_amsdu_subframes
;
360 #define HTT_MGMT_FRM_HDR_DOWNLOAD_LEN 32
361 struct htt_mgmt_tx_desc_qca99x0
{
365 struct htt_mgmt_tx_desc
{
366 u8 pad
[sizeof(u32
) - sizeof(struct htt_cmd_hdr
)];
371 u8 hdr
[HTT_MGMT_FRM_HDR_DOWNLOAD_LEN
];
373 struct htt_mgmt_tx_desc_qca99x0 qca99x0
;
377 enum htt_mgmt_tx_status
{
378 HTT_MGMT_TX_STATUS_OK
= 0,
379 HTT_MGMT_TX_STATUS_RETRY
= 1,
380 HTT_MGMT_TX_STATUS_DROP
= 2
383 /*=== target -> host messages ===============================================*/
385 enum htt_main_t2h_msg_type
{
386 HTT_MAIN_T2H_MSG_TYPE_VERSION_CONF
= 0x0,
387 HTT_MAIN_T2H_MSG_TYPE_RX_IND
= 0x1,
388 HTT_MAIN_T2H_MSG_TYPE_RX_FLUSH
= 0x2,
389 HTT_MAIN_T2H_MSG_TYPE_PEER_MAP
= 0x3,
390 HTT_MAIN_T2H_MSG_TYPE_PEER_UNMAP
= 0x4,
391 HTT_MAIN_T2H_MSG_TYPE_RX_ADDBA
= 0x5,
392 HTT_MAIN_T2H_MSG_TYPE_RX_DELBA
= 0x6,
393 HTT_MAIN_T2H_MSG_TYPE_TX_COMPL_IND
= 0x7,
394 HTT_MAIN_T2H_MSG_TYPE_PKTLOG
= 0x8,
395 HTT_MAIN_T2H_MSG_TYPE_STATS_CONF
= 0x9,
396 HTT_MAIN_T2H_MSG_TYPE_RX_FRAG_IND
= 0xa,
397 HTT_MAIN_T2H_MSG_TYPE_SEC_IND
= 0xb,
398 HTT_MAIN_T2H_MSG_TYPE_TX_INSPECT_IND
= 0xd,
399 HTT_MAIN_T2H_MSG_TYPE_MGMT_TX_COMPL_IND
= 0xe,
400 HTT_MAIN_T2H_MSG_TYPE_TX_CREDIT_UPDATE_IND
= 0xf,
401 HTT_MAIN_T2H_MSG_TYPE_RX_PN_IND
= 0x10,
402 HTT_MAIN_T2H_MSG_TYPE_RX_OFFLOAD_DELIVER_IND
= 0x11,
403 HTT_MAIN_T2H_MSG_TYPE_TEST
,
405 HTT_MAIN_T2H_NUM_MSGS
408 enum htt_10x_t2h_msg_type
{
409 HTT_10X_T2H_MSG_TYPE_VERSION_CONF
= 0x0,
410 HTT_10X_T2H_MSG_TYPE_RX_IND
= 0x1,
411 HTT_10X_T2H_MSG_TYPE_RX_FLUSH
= 0x2,
412 HTT_10X_T2H_MSG_TYPE_PEER_MAP
= 0x3,
413 HTT_10X_T2H_MSG_TYPE_PEER_UNMAP
= 0x4,
414 HTT_10X_T2H_MSG_TYPE_RX_ADDBA
= 0x5,
415 HTT_10X_T2H_MSG_TYPE_RX_DELBA
= 0x6,
416 HTT_10X_T2H_MSG_TYPE_TX_COMPL_IND
= 0x7,
417 HTT_10X_T2H_MSG_TYPE_PKTLOG
= 0x8,
418 HTT_10X_T2H_MSG_TYPE_STATS_CONF
= 0x9,
419 HTT_10X_T2H_MSG_TYPE_RX_FRAG_IND
= 0xa,
420 HTT_10X_T2H_MSG_TYPE_SEC_IND
= 0xb,
421 HTT_10X_T2H_MSG_TYPE_RC_UPDATE_IND
= 0xc,
422 HTT_10X_T2H_MSG_TYPE_TX_INSPECT_IND
= 0xd,
423 HTT_10X_T2H_MSG_TYPE_TEST
= 0xe,
424 HTT_10X_T2H_MSG_TYPE_CHAN_CHANGE
= 0xf,
425 HTT_10X_T2H_MSG_TYPE_AGGR_CONF
= 0x11,
426 HTT_10X_T2H_MSG_TYPE_STATS_NOUPLOAD
= 0x12,
427 HTT_10X_T2H_MSG_TYPE_MGMT_TX_COMPL_IND
= 0x13,
432 enum htt_tlv_t2h_msg_type
{
433 HTT_TLV_T2H_MSG_TYPE_VERSION_CONF
= 0x0,
434 HTT_TLV_T2H_MSG_TYPE_RX_IND
= 0x1,
435 HTT_TLV_T2H_MSG_TYPE_RX_FLUSH
= 0x2,
436 HTT_TLV_T2H_MSG_TYPE_PEER_MAP
= 0x3,
437 HTT_TLV_T2H_MSG_TYPE_PEER_UNMAP
= 0x4,
438 HTT_TLV_T2H_MSG_TYPE_RX_ADDBA
= 0x5,
439 HTT_TLV_T2H_MSG_TYPE_RX_DELBA
= 0x6,
440 HTT_TLV_T2H_MSG_TYPE_TX_COMPL_IND
= 0x7,
441 HTT_TLV_T2H_MSG_TYPE_PKTLOG
= 0x8,
442 HTT_TLV_T2H_MSG_TYPE_STATS_CONF
= 0x9,
443 HTT_TLV_T2H_MSG_TYPE_RX_FRAG_IND
= 0xa,
444 HTT_TLV_T2H_MSG_TYPE_SEC_IND
= 0xb,
445 HTT_TLV_T2H_MSG_TYPE_RC_UPDATE_IND
= 0xc, /* deprecated */
446 HTT_TLV_T2H_MSG_TYPE_TX_INSPECT_IND
= 0xd,
447 HTT_TLV_T2H_MSG_TYPE_MGMT_TX_COMPL_IND
= 0xe,
448 HTT_TLV_T2H_MSG_TYPE_TX_CREDIT_UPDATE_IND
= 0xf,
449 HTT_TLV_T2H_MSG_TYPE_RX_PN_IND
= 0x10,
450 HTT_TLV_T2H_MSG_TYPE_RX_OFFLOAD_DELIVER_IND
= 0x11,
451 HTT_TLV_T2H_MSG_TYPE_RX_IN_ORD_PADDR_IND
= 0x12,
453 HTT_TLV_T2H_MSG_TYPE_WDI_IPA_OP_RESPONSE
= 0x14,
454 HTT_TLV_T2H_MSG_TYPE_CHAN_CHANGE
= 0x15,
455 HTT_TLV_T2H_MSG_TYPE_RX_OFLD_PKT_ERR
= 0x16,
456 HTT_TLV_T2H_MSG_TYPE_TEST
,
461 enum htt_10_4_t2h_msg_type
{
462 HTT_10_4_T2H_MSG_TYPE_VERSION_CONF
= 0x0,
463 HTT_10_4_T2H_MSG_TYPE_RX_IND
= 0x1,
464 HTT_10_4_T2H_MSG_TYPE_RX_FLUSH
= 0x2,
465 HTT_10_4_T2H_MSG_TYPE_PEER_MAP
= 0x3,
466 HTT_10_4_T2H_MSG_TYPE_PEER_UNMAP
= 0x4,
467 HTT_10_4_T2H_MSG_TYPE_RX_ADDBA
= 0x5,
468 HTT_10_4_T2H_MSG_TYPE_RX_DELBA
= 0x6,
469 HTT_10_4_T2H_MSG_TYPE_TX_COMPL_IND
= 0x7,
470 HTT_10_4_T2H_MSG_TYPE_PKTLOG
= 0x8,
471 HTT_10_4_T2H_MSG_TYPE_STATS_CONF
= 0x9,
472 HTT_10_4_T2H_MSG_TYPE_RX_FRAG_IND
= 0xa,
473 HTT_10_4_T2H_MSG_TYPE_SEC_IND
= 0xb,
474 HTT_10_4_T2H_MSG_TYPE_RC_UPDATE_IND
= 0xc,
475 HTT_10_4_T2H_MSG_TYPE_TX_INSPECT_IND
= 0xd,
476 HTT_10_4_T2H_MSG_TYPE_MGMT_TX_COMPL_IND
= 0xe,
477 HTT_10_4_T2H_MSG_TYPE_CHAN_CHANGE
= 0xf,
478 HTT_10_4_T2H_MSG_TYPE_TX_CREDIT_UPDATE_IND
= 0x10,
479 HTT_10_4_T2H_MSG_TYPE_RX_PN_IND
= 0x11,
480 HTT_10_4_T2H_MSG_TYPE_RX_OFFLOAD_DELIVER_IND
= 0x12,
481 HTT_10_4_T2H_MSG_TYPE_TEST
= 0x13,
482 HTT_10_4_T2H_MSG_TYPE_EN_STATS
= 0x14,
483 HTT_10_4_T2H_MSG_TYPE_AGGR_CONF
= 0x15,
484 HTT_10_4_T2H_MSG_TYPE_TX_FETCH_IND
= 0x16,
485 HTT_10_4_T2H_MSG_TYPE_TX_FETCH_CONFIRM
= 0x17,
486 HTT_10_4_T2H_MSG_TYPE_STATS_NOUPLOAD
= 0x18,
487 /* 0x19 to 0x2f are reserved */
488 HTT_10_4_T2H_MSG_TYPE_TX_MODE_SWITCH_IND
= 0x30,
489 HTT_10_4_T2H_MSG_TYPE_PEER_STATS
= 0x31,
491 HTT_10_4_T2H_NUM_MSGS
494 enum htt_t2h_msg_type
{
495 HTT_T2H_MSG_TYPE_VERSION_CONF
,
496 HTT_T2H_MSG_TYPE_RX_IND
,
497 HTT_T2H_MSG_TYPE_RX_FLUSH
,
498 HTT_T2H_MSG_TYPE_PEER_MAP
,
499 HTT_T2H_MSG_TYPE_PEER_UNMAP
,
500 HTT_T2H_MSG_TYPE_RX_ADDBA
,
501 HTT_T2H_MSG_TYPE_RX_DELBA
,
502 HTT_T2H_MSG_TYPE_TX_COMPL_IND
,
503 HTT_T2H_MSG_TYPE_PKTLOG
,
504 HTT_T2H_MSG_TYPE_STATS_CONF
,
505 HTT_T2H_MSG_TYPE_RX_FRAG_IND
,
506 HTT_T2H_MSG_TYPE_SEC_IND
,
507 HTT_T2H_MSG_TYPE_RC_UPDATE_IND
,
508 HTT_T2H_MSG_TYPE_TX_INSPECT_IND
,
509 HTT_T2H_MSG_TYPE_MGMT_TX_COMPLETION
,
510 HTT_T2H_MSG_TYPE_TX_CREDIT_UPDATE_IND
,
511 HTT_T2H_MSG_TYPE_RX_PN_IND
,
512 HTT_T2H_MSG_TYPE_RX_OFFLOAD_DELIVER_IND
,
513 HTT_T2H_MSG_TYPE_RX_IN_ORD_PADDR_IND
,
514 HTT_T2H_MSG_TYPE_WDI_IPA_OP_RESPONSE
,
515 HTT_T2H_MSG_TYPE_CHAN_CHANGE
,
516 HTT_T2H_MSG_TYPE_RX_OFLD_PKT_ERR
,
517 HTT_T2H_MSG_TYPE_AGGR_CONF
,
518 HTT_T2H_MSG_TYPE_STATS_NOUPLOAD
,
519 HTT_T2H_MSG_TYPE_TEST
,
520 HTT_T2H_MSG_TYPE_EN_STATS
,
521 HTT_T2H_MSG_TYPE_TX_FETCH_IND
,
522 HTT_T2H_MSG_TYPE_TX_FETCH_CONFIRM
,
523 HTT_T2H_MSG_TYPE_TX_MODE_SWITCH_IND
,
524 HTT_T2H_MSG_TYPE_PEER_STATS
,
530 * htt_resp_hdr - header for target-to-host messages
532 * msg_type: see htt_t2h_msg_type
534 struct htt_resp_hdr
{
538 #define HTT_RESP_HDR_MSG_TYPE_OFFSET 0
539 #define HTT_RESP_HDR_MSG_TYPE_MASK 0xff
540 #define HTT_RESP_HDR_MSG_TYPE_LSB 0
542 /* htt_ver_resp - response sent for htt_ver_req */
543 struct htt_ver_resp
{
549 #define HTT_MGMT_TX_CMPL_FLAG_ACK_RSSI BIT(0)
551 #define HTT_MGMT_TX_CMPL_INFO_ACK_RSSI_MASK GENMASK(7, 0)
553 struct htt_mgmt_tx_completion
{
563 #define HTT_RX_INDICATION_INFO0_EXT_TID_MASK (0x1F)
564 #define HTT_RX_INDICATION_INFO0_EXT_TID_LSB (0)
565 #define HTT_RX_INDICATION_INFO0_FLUSH_VALID (1 << 5)
566 #define HTT_RX_INDICATION_INFO0_RELEASE_VALID (1 << 6)
568 #define HTT_RX_INDICATION_INFO1_FLUSH_START_SEQNO_MASK 0x0000003F
569 #define HTT_RX_INDICATION_INFO1_FLUSH_START_SEQNO_LSB 0
570 #define HTT_RX_INDICATION_INFO1_FLUSH_END_SEQNO_MASK 0x00000FC0
571 #define HTT_RX_INDICATION_INFO1_FLUSH_END_SEQNO_LSB 6
572 #define HTT_RX_INDICATION_INFO1_RELEASE_START_SEQNO_MASK 0x0003F000
573 #define HTT_RX_INDICATION_INFO1_RELEASE_START_SEQNO_LSB 12
574 #define HTT_RX_INDICATION_INFO1_RELEASE_END_SEQNO_MASK 0x00FC0000
575 #define HTT_RX_INDICATION_INFO1_RELEASE_END_SEQNO_LSB 18
576 #define HTT_RX_INDICATION_INFO1_NUM_MPDU_RANGES_MASK 0xFF000000
577 #define HTT_RX_INDICATION_INFO1_NUM_MPDU_RANGES_LSB 24
579 #define HTT_TX_CMPL_FLAG_DATA_RSSI BIT(0)
581 struct htt_rx_indication_hdr
{
582 u8 info0
; /* %HTT_RX_INDICATION_INFO0_ */
584 __le32 info1
; /* %HTT_RX_INDICATION_INFO1_ */
587 #define HTT_RX_INDICATION_INFO0_PHY_ERR_VALID (1 << 0)
588 #define HTT_RX_INDICATION_INFO0_LEGACY_RATE_MASK (0x1E)
589 #define HTT_RX_INDICATION_INFO0_LEGACY_RATE_LSB (1)
590 #define HTT_RX_INDICATION_INFO0_LEGACY_RATE_CCK (1 << 5)
591 #define HTT_RX_INDICATION_INFO0_END_VALID (1 << 6)
592 #define HTT_RX_INDICATION_INFO0_START_VALID (1 << 7)
594 #define HTT_RX_INDICATION_INFO1_VHT_SIG_A1_MASK 0x00FFFFFF
595 #define HTT_RX_INDICATION_INFO1_VHT_SIG_A1_LSB 0
596 #define HTT_RX_INDICATION_INFO1_PREAMBLE_TYPE_MASK 0xFF000000
597 #define HTT_RX_INDICATION_INFO1_PREAMBLE_TYPE_LSB 24
599 #define HTT_RX_INDICATION_INFO2_VHT_SIG_A1_MASK 0x00FFFFFF
600 #define HTT_RX_INDICATION_INFO2_VHT_SIG_A1_LSB 0
601 #define HTT_RX_INDICATION_INFO2_SERVICE_MASK 0xFF000000
602 #define HTT_RX_INDICATION_INFO2_SERVICE_LSB 24
604 enum htt_rx_legacy_rate
{
615 HTT_RX_CCK_11_LP
= 0,
616 HTT_RX_CCK_5_5_LP
= 1,
625 enum htt_rx_legacy_rate_type
{
626 HTT_RX_LEGACY_RATE_OFDM
= 0,
627 HTT_RX_LEGACY_RATE_CCK
630 enum htt_rx_preamble_type
{
633 HTT_RX_HT_WITH_TXBF
= 0x9,
635 HTT_RX_VHT_WITH_TXBF
= 0xD,
639 * Fields: phy_err_valid, phy_err_code, tsf,
640 * usec_timestamp, sub_usec_timestamp
641 * ..are valid only if end_valid == 1.
643 * Fields: rssi_chains, legacy_rate_type,
644 * legacy_rate_cck, preamble_type, service,
646 * ..are valid only if start_valid == 1;
648 struct htt_rx_indication_ppdu
{
650 u8 sub_usec_timestamp
;
652 u8 info0
; /* HTT_RX_INDICATION_INFO0_ */
658 } __packed rssi_chains
[4];
660 __le32 usec_timestamp
;
661 __le32 info1
; /* HTT_RX_INDICATION_INFO1_ */
662 __le32 info2
; /* HTT_RX_INDICATION_INFO2_ */
665 enum htt_rx_mpdu_status
{
666 HTT_RX_IND_MPDU_STATUS_UNKNOWN
= 0x0,
667 HTT_RX_IND_MPDU_STATUS_OK
,
668 HTT_RX_IND_MPDU_STATUS_ERR_FCS
,
669 HTT_RX_IND_MPDU_STATUS_ERR_DUP
,
670 HTT_RX_IND_MPDU_STATUS_ERR_REPLAY
,
671 HTT_RX_IND_MPDU_STATUS_ERR_INV_PEER
,
672 /* only accept EAPOL frames */
673 HTT_RX_IND_MPDU_STATUS_UNAUTH_PEER
,
674 HTT_RX_IND_MPDU_STATUS_OUT_OF_SYNC
,
675 /* Non-data in promiscuous mode */
676 HTT_RX_IND_MPDU_STATUS_MGMT_CTRL
,
677 HTT_RX_IND_MPDU_STATUS_TKIP_MIC_ERR
,
678 HTT_RX_IND_MPDU_STATUS_DECRYPT_ERR
,
679 HTT_RX_IND_MPDU_STATUS_MPDU_LENGTH_ERR
,
680 HTT_RX_IND_MPDU_STATUS_ENCRYPT_REQUIRED_ERR
,
681 HTT_RX_IND_MPDU_STATUS_PRIVACY_ERR
,
684 * MISC: discard for unspecified reasons.
685 * Leave this enum value last.
687 HTT_RX_IND_MPDU_STATUS_ERR_MISC
= 0xFF
690 struct htt_rx_indication_mpdu_range
{
692 u8 mpdu_range_status
; /* %htt_rx_mpdu_status */
697 struct htt_rx_indication_prefix
{
698 __le16 fw_rx_desc_bytes
;
703 struct htt_rx_indication
{
704 struct htt_rx_indication_hdr hdr
;
705 struct htt_rx_indication_ppdu ppdu
;
706 struct htt_rx_indication_prefix prefix
;
709 * the following fields are both dynamically sized, so
710 * take care addressing them
713 /* the size of this is %fw_rx_desc_bytes */
714 struct fw_rx_desc_base fw_desc
;
717 * %mpdu_ranges starts after &%prefix + roundup(%fw_rx_desc_bytes, 4)
718 * and has %num_mpdu_ranges elements.
720 struct htt_rx_indication_mpdu_range mpdu_ranges
[0];
723 /* High latency version of the RX indication */
724 struct htt_rx_indication_hl
{
725 struct htt_rx_indication_hdr hdr
;
726 struct htt_rx_indication_ppdu ppdu
;
727 struct htt_rx_indication_prefix prefix
;
728 struct fw_rx_desc_hl fw_desc
;
729 struct htt_rx_indication_mpdu_range mpdu_ranges
[0];
732 static inline struct htt_rx_indication_mpdu_range
*
733 htt_rx_ind_get_mpdu_ranges(struct htt_rx_indication
*rx_ind
)
737 ptr
+= sizeof(rx_ind
->hdr
)
738 + sizeof(rx_ind
->ppdu
)
739 + sizeof(rx_ind
->prefix
)
740 + roundup(__le16_to_cpu(rx_ind
->prefix
.fw_rx_desc_bytes
), 4);
744 static inline struct htt_rx_indication_mpdu_range
*
745 htt_rx_ind_get_mpdu_ranges_hl(struct htt_rx_indication_hl
*rx_ind
)
749 ptr
+= sizeof(rx_ind
->hdr
)
750 + sizeof(rx_ind
->ppdu
)
751 + sizeof(rx_ind
->prefix
)
752 + sizeof(rx_ind
->fw_desc
);
756 enum htt_rx_flush_mpdu_status
{
757 HTT_RX_FLUSH_MPDU_DISCARD
= 0,
758 HTT_RX_FLUSH_MPDU_REORDER
= 1,
762 * htt_rx_flush - discard or reorder given range of mpdus
764 * Note: host must check if all sequence numbers between
765 * [seq_num_start, seq_num_end-1] are valid.
767 struct htt_rx_flush
{
771 u8 mpdu_status
; /* %htt_rx_flush_mpdu_status */
772 u8 seq_num_start
; /* it is 6 LSBs of 802.11 seq no */
773 u8 seq_num_end
; /* it is 6 LSBs of 802.11 seq no */
776 struct htt_rx_peer_map
{
784 struct htt_rx_peer_unmap
{
789 enum htt_security_types
{
795 HTT_SECURITY_TKIP_NOMIC
,
796 HTT_SECURITY_AES_CCMP
,
799 HTT_NUM_SECURITY_TYPES
/* keep this last! */
802 enum htt_security_flags
{
803 #define HTT_SECURITY_TYPE_MASK 0x7F
804 #define HTT_SECURITY_TYPE_LSB 0
805 HTT_SECURITY_IS_UNICAST
= 1 << 7
808 struct htt_security_indication
{
810 /* dont use bitfields; undefined behaviour */
811 u8 flags
; /* %htt_security_flags */
813 u8 security_type
:7, /* %htt_security_types */
822 #define HTT_RX_BA_INFO0_TID_MASK 0x000F
823 #define HTT_RX_BA_INFO0_TID_LSB 0
824 #define HTT_RX_BA_INFO0_PEER_ID_MASK 0xFFF0
825 #define HTT_RX_BA_INFO0_PEER_ID_LSB 4
827 struct htt_rx_addba
{
829 __le16 info0
; /* %HTT_RX_BA_INFO0_ */
832 struct htt_rx_delba
{
834 __le16 info0
; /* %HTT_RX_BA_INFO0_ */
837 enum htt_data_tx_status
{
838 HTT_DATA_TX_STATUS_OK
= 0,
839 HTT_DATA_TX_STATUS_DISCARD
= 1,
840 HTT_DATA_TX_STATUS_NO_ACK
= 2,
841 HTT_DATA_TX_STATUS_POSTPONE
= 3, /* HL only */
842 HTT_DATA_TX_STATUS_DOWNLOAD_FAIL
= 128
845 enum htt_data_tx_flags
{
846 #define HTT_DATA_TX_STATUS_MASK 0x07
847 #define HTT_DATA_TX_STATUS_LSB 0
848 #define HTT_DATA_TX_TID_MASK 0x78
849 #define HTT_DATA_TX_TID_LSB 3
850 HTT_DATA_TX_TID_INVALID
= 1 << 7
853 #define HTT_TX_COMPL_INV_MSDU_ID 0xFFFF
855 struct htt_data_tx_completion
{
865 u8 flags2
; /* HTT_TX_CMPL_FLAG_DATA_RSSI */
866 __le16 msdus
[0]; /* variable length based on %num_msdus */
869 struct htt_tx_compl_ind_base
{
871 u16 payload
[1/*or more*/];
874 struct htt_rc_tx_done_params
{
878 u32 num_enqued
; /* 1 for non-AMPDU */
880 u32 num_failed
; /* for AMPDU */
886 struct htt_rc_update
{
892 struct htt_rc_tx_done_params params
[0]; /* variable length %num_elems */
895 /* see htt_rx_indication for similar fields and descriptions */
896 struct htt_rx_fragment_indication
{
898 u8 info0
; /* %HTT_RX_FRAG_IND_INFO0_ */
905 __le32 info1
; /* %HTT_RX_FRAG_IND_INFO1_ */
906 __le16 fw_rx_desc_bytes
;
909 u8 fw_msdu_rx_desc
[0];
912 #define HTT_RX_FRAG_IND_INFO0_EXT_TID_MASK 0x1F
913 #define HTT_RX_FRAG_IND_INFO0_EXT_TID_LSB 0
914 #define HTT_RX_FRAG_IND_INFO0_FLUSH_VALID_MASK 0x20
915 #define HTT_RX_FRAG_IND_INFO0_FLUSH_VALID_LSB 5
917 #define HTT_RX_FRAG_IND_INFO1_FLUSH_SEQ_NUM_START_MASK 0x0000003F
918 #define HTT_RX_FRAG_IND_INFO1_FLUSH_SEQ_NUM_START_LSB 0
919 #define HTT_RX_FRAG_IND_INFO1_FLUSH_SEQ_NUM_END_MASK 0x00000FC0
920 #define HTT_RX_FRAG_IND_INFO1_FLUSH_SEQ_NUM_END_LSB 6
922 struct htt_rx_pn_ind
{
932 struct htt_rx_offload_msdu
{
941 struct htt_rx_offload_ind
{
946 struct htt_rx_in_ord_msdu_desc
{
953 struct htt_rx_in_ord_msdu_desc_ext
{
960 struct htt_rx_in_ord_ind
{
967 struct htt_rx_in_ord_msdu_desc msdu_descs32
[0];
968 struct htt_rx_in_ord_msdu_desc_ext msdu_descs64
[0];
972 #define HTT_RX_IN_ORD_IND_INFO_TID_MASK 0x0000001f
973 #define HTT_RX_IN_ORD_IND_INFO_TID_LSB 0
974 #define HTT_RX_IN_ORD_IND_INFO_OFFLOAD_MASK 0x00000020
975 #define HTT_RX_IN_ORD_IND_INFO_OFFLOAD_LSB 5
976 #define HTT_RX_IN_ORD_IND_INFO_FRAG_MASK 0x00000040
977 #define HTT_RX_IN_ORD_IND_INFO_FRAG_LSB 6
980 * target -> host test message definition
982 * The following field definitions describe the format of the test
983 * message sent from the target to the host.
984 * The message consists of a 4-octet header, followed by a variable
985 * number of 32-bit integer values, followed by a variable number
986 * of 8-bit character values.
989 * |-----------------------------------------------------------|
990 * | num chars | num ints | msg type |
991 * |-----------------------------------------------------------|
993 * |-----------------------------------------------------------|
995 * |-----------------------------------------------------------|
997 * |-----------------------------------------------------------|
998 * | char 3 | char 2 | char 1 | char 0 |
999 * |-----------------------------------------------------------|
1000 * | | | ... | char 4 |
1001 * |-----------------------------------------------------------|
1004 * Purpose: identifies this as a test message
1005 * Value: HTT_MSG_TYPE_TEST
1008 * Purpose: indicate how many 32-bit integers follow the message header
1011 * Purpose: indicate how many 8-bit characters follow the series of integers
1013 struct htt_rx_test
{
1017 /* payload consists of 2 lists:
1018 * a) num_ints * sizeof(__le32)
1019 * b) num_chars * sizeof(u8) aligned to 4bytes
1024 static inline __le32
*htt_rx_test_get_ints(struct htt_rx_test
*rx_test
)
1026 return (__le32
*)rx_test
->payload
;
1029 static inline u8
*htt_rx_test_get_chars(struct htt_rx_test
*rx_test
)
1031 return rx_test
->payload
+ (rx_test
->num_ints
* sizeof(__le32
));
1035 * target -> host packet log message
1037 * The following field definitions describe the format of the packet log
1038 * message sent from the target to the host.
1039 * The message consists of a 4-octet header,followed by a variable number
1040 * of 32-bit character values.
1042 * |31 24|23 16|15 8|7 0|
1043 * |-----------------------------------------------------------|
1044 * | | | | msg type |
1045 * |-----------------------------------------------------------|
1047 * |-----------------------------------------------------------|
1050 * Purpose: identifies this as a test message
1051 * Value: HTT_MSG_TYPE_PACKETLOG
1053 struct htt_pktlog_msg
{
1058 struct htt_dbg_stats_rx_reorder_stats
{
1059 /* Non QoS MPDUs received */
1060 __le32 deliver_non_qos
;
1062 /* MPDUs received in-order */
1063 __le32 deliver_in_order
;
1065 /* Flush due to reorder timer expired */
1066 __le32 deliver_flush_timeout
;
1068 /* Flush due to move out of window */
1069 __le32 deliver_flush_oow
;
1071 /* Flush due to DELBA */
1072 __le32 deliver_flush_delba
;
1074 /* MPDUs dropped due to FCS error */
1077 /* MPDUs dropped due to monitor mode non-data packet */
1080 /* MPDUs dropped due to invalid peer */
1081 __le32 invalid_peer
;
1083 /* MPDUs dropped due to duplication (non aggregation) */
1084 __le32 dup_non_aggr
;
1086 /* MPDUs dropped due to processed before */
1089 /* MPDUs dropped due to duplicate in reorder queue */
1090 __le32 dup_in_reorder
;
1092 /* Reorder timeout happened */
1093 __le32 reorder_timeout
;
1095 /* invalid bar ssn */
1096 __le32 invalid_bar_ssn
;
1098 /* reorder reset due to bar ssn */
1102 struct htt_dbg_stats_wal_tx_stats
{
1103 /* Num HTT cookies queued to dispatch list */
1106 /* Num HTT cookies dispatched */
1107 __le32 comp_delivered
;
1109 /* Num MSDU queued to WAL */
1112 /* Num MPDU queue to WAL */
1115 /* Num MSDUs dropped by WMM limit */
1118 /* Num Local frames queued */
1119 __le32 local_enqued
;
1121 /* Num Local frames done */
1124 /* Num queued to HW */
1127 /* Num PPDU reaped from HW */
1133 /* Num PPDUs cleaned up in TX abort */
1136 /* Num MPDUs requed by SW */
1137 __le32 mpdus_requed
;
1139 /* excessive retries */
1142 /* data hw rate code */
1145 /* Scheduler self triggers */
1146 __le32 self_triggers
;
1148 /* frames dropped due to excessive sw retries */
1149 __le32 sw_retry_failure
;
1151 /* illegal rate phy errors */
1152 __le32 illgl_rate_phy_err
;
1154 /* wal pdev continuous xretry */
1155 __le32 pdev_cont_xretry
;
1157 /* wal pdev continuous xretry */
1158 __le32 pdev_tx_timeout
;
1160 /* wal pdev resets */
1163 __le32 phy_underrun
;
1165 /* MPDU is more than txop limit */
1169 struct htt_dbg_stats_wal_rx_stats
{
1170 /* Cnts any change in ring routing mid-ppdu */
1171 __le32 mid_ppdu_route_change
;
1173 /* Total number of statuses processed */
1176 /* Extra frags on rings 0-3 */
1182 /* MSDUs / MPDUs delivered to HTT */
1186 /* MSDUs / MPDUs delivered to local stack */
1190 /* AMSDUs that have more MSDUs than the status ring size */
1191 __le32 oversize_amsdu
;
1193 /* Number of PHY errors */
1196 /* Number of PHY errors drops */
1197 __le32 phy_err_drop
;
1199 /* Number of mpdu errors - FCS, MIC, ENC etc. */
1203 struct htt_dbg_stats_wal_peer_stats
{
1204 __le32 dummy
; /* REMOVE THIS ONCE REAL PEER STAT COUNTERS ARE ADDED */
1207 struct htt_dbg_stats_wal_pdev_txrx
{
1208 struct htt_dbg_stats_wal_tx_stats tx_stats
;
1209 struct htt_dbg_stats_wal_rx_stats rx_stats
;
1210 struct htt_dbg_stats_wal_peer_stats peer_stats
;
1213 struct htt_dbg_stats_rx_rate_info
{
1225 * htt_dbg_stats_status -
1226 * present - The requested stats have been delivered in full.
1227 * This indicates that either the stats information was contained
1228 * in its entirety within this message, or else this message
1229 * completes the delivery of the requested stats info that was
1230 * partially delivered through earlier STATS_CONF messages.
1231 * partial - The requested stats have been delivered in part.
1232 * One or more subsequent STATS_CONF messages with the same
1233 * cookie value will be sent to deliver the remainder of the
1235 * error - The requested stats could not be delivered, for example due
1236 * to a shortage of memory to construct a message holding the
1238 * invalid - The requested stat type is either not recognized, or the
1239 * target is configured to not gather the stats type in question.
1240 * - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
1241 * series_done - This special value indicates that no further stats info
1242 * elements are present within a series of stats info elems
1243 * (within a stats upload confirmation message).
1245 enum htt_dbg_stats_status
{
1246 HTT_DBG_STATS_STATUS_PRESENT
= 0,
1247 HTT_DBG_STATS_STATUS_PARTIAL
= 1,
1248 HTT_DBG_STATS_STATUS_ERROR
= 2,
1249 HTT_DBG_STATS_STATUS_INVALID
= 3,
1250 HTT_DBG_STATS_STATUS_SERIES_DONE
= 7
1254 * target -> host statistics upload
1256 * The following field definitions describe the format of the HTT target
1257 * to host stats upload confirmation message.
1258 * The message contains a cookie echoed from the HTT host->target stats
1259 * upload request, which identifies which request the confirmation is
1260 * for, and a series of tag-length-value stats information elements.
1261 * The tag-length header for each stats info element also includes a
1262 * status field, to indicate whether the request for the stat type in
1263 * question was fully met, partially met, unable to be met, or invalid
1264 * (if the stat type in question is disabled in the target).
1265 * A special value of all 1's in this status field is used to indicate
1266 * the end of the series of stats info elements.
1269 * |31 16|15 8|7 5|4 0|
1270 * |------------------------------------------------------------|
1271 * | reserved | msg type |
1272 * |------------------------------------------------------------|
1274 * |------------------------------------------------------------|
1276 * |------------------------------------------------------------|
1277 * | stats entry length | reserved | S |stat type|
1278 * |------------------------------------------------------------|
1280 * | type-specific stats info |
1282 * |------------------------------------------------------------|
1283 * | stats entry length | reserved | S |stat type|
1284 * |------------------------------------------------------------|
1286 * | type-specific stats info |
1288 * |------------------------------------------------------------|
1289 * | n/a | reserved | 111 | n/a |
1290 * |------------------------------------------------------------|
1294 * Purpose: identifies this is a statistics upload confirmation message
1298 * Purpose: Provide a mechanism to match a target->host stats confirmation
1299 * message with its preceding host->target stats request message.
1300 * Value: LSBs of the opaque cookie specified by the host-side requestor
1303 * Purpose: Provide a mechanism to match a target->host stats confirmation
1304 * message with its preceding host->target stats request message.
1305 * Value: MSBs of the opaque cookie specified by the host-side requestor
1307 * Stats Information Element tag-length header fields:
1310 * Purpose: identifies the type of statistics info held in the
1311 * following information element
1312 * Value: htt_dbg_stats_type
1315 * Purpose: indicate whether the requested stats are present
1316 * Value: htt_dbg_stats_status, including a special value (0x7) to mark
1317 * the completion of the stats entry series
1320 * Purpose: indicate the stats information size
1321 * Value: This field specifies the number of bytes of stats information
1322 * that follows the element tag-length header.
1323 * It is expected but not required that this length is a multiple of
1324 * 4 bytes. Even if the length is not an integer multiple of 4, the
1325 * subsequent stats entry header will begin on a 4-byte aligned
1329 #define HTT_STATS_CONF_ITEM_INFO_STAT_TYPE_MASK 0x1F
1330 #define HTT_STATS_CONF_ITEM_INFO_STAT_TYPE_LSB 0
1331 #define HTT_STATS_CONF_ITEM_INFO_STATUS_MASK 0xE0
1332 #define HTT_STATS_CONF_ITEM_INFO_STATUS_LSB 5
1334 struct htt_stats_conf_item
{
1338 u8 stat_type
:5; /* %HTT_DBG_STATS_ */
1339 u8 status
:3; /* %HTT_DBG_STATS_STATUS_ */
1344 u8 payload
[0]; /* roundup(length, 4) long */
1347 struct htt_stats_conf
{
1352 /* each item has variable length! */
1353 struct htt_stats_conf_item items
[0];
1356 static inline struct htt_stats_conf_item
*htt_stats_conf_next_item(
1357 const struct htt_stats_conf_item
*item
)
1359 return (void *)item
+ sizeof(*item
) + roundup(item
->length
, 4);
1363 * host -> target FRAG DESCRIPTOR/MSDU_EXT DESC bank
1365 * The following field definitions describe the format of the HTT host
1366 * to target frag_desc/msdu_ext bank configuration message.
1367 * The message contains the based address and the min and max id of the
1368 * MSDU_EXT/FRAG_DESC that will be used by the HTT to map MSDU DESC and
1369 * MSDU_EXT/FRAG_DESC.
1370 * HTT will use id in HTT descriptor instead sending the frag_desc_ptr.
1371 * For QCA988X HW the firmware will use fragment_desc_ptr but in WIFI2.0
1372 * the hardware does the mapping/translation.
1374 * Total banks that can be configured is configured to 16.
1376 * This should be called before any TX has be initiated by the HTT
1378 * |31 16|15 8|7 5|4 0|
1379 * |------------------------------------------------------------|
1380 * | DESC_SIZE | NUM_BANKS | RES |SWP|pdev| msg type |
1381 * |------------------------------------------------------------|
1382 * | BANK0_BASE_ADDRESS |
1383 * |------------------------------------------------------------|
1385 * |------------------------------------------------------------|
1386 * | BANK15_BASE_ADDRESS |
1387 * |------------------------------------------------------------|
1388 * | BANK0_MAX_ID | BANK0_MIN_ID |
1389 * |------------------------------------------------------------|
1391 * |------------------------------------------------------------|
1392 * | BANK15_MAX_ID | BANK15_MIN_ID |
1393 * |------------------------------------------------------------|
1398 * - BANKx_BASE_ADDRESS
1400 * Purpose: Provide a mechanism to specify the base address of the MSDU_EXT
1401 * bank physical/bus address.
1404 * Purpose: Provide a mechanism to specify the min index that needs to
1408 * Purpose: Provide a mechanism to specify the max index that needs to
1411 struct htt_frag_desc_bank_id
{
1416 /* real is 16 but it wouldn't fit in the max htt message size
1417 * so we use a conservatively safe value for now
1419 #define HTT_FRAG_DESC_BANK_MAX 4
1421 #define HTT_FRAG_DESC_BANK_CFG_INFO_PDEV_ID_MASK 0x03
1422 #define HTT_FRAG_DESC_BANK_CFG_INFO_PDEV_ID_LSB 0
1423 #define HTT_FRAG_DESC_BANK_CFG_INFO_SWAP BIT(2)
1424 #define HTT_FRAG_DESC_BANK_CFG_INFO_Q_STATE_VALID BIT(3)
1425 #define HTT_FRAG_DESC_BANK_CFG_INFO_Q_STATE_DEPTH_TYPE_MASK BIT(4)
1426 #define HTT_FRAG_DESC_BANK_CFG_INFO_Q_STATE_DEPTH_TYPE_LSB 4
1428 enum htt_q_depth_type
{
1429 HTT_Q_DEPTH_TYPE_BYTES
= 0,
1430 HTT_Q_DEPTH_TYPE_MSDUS
= 1,
1433 #define HTT_TX_Q_STATE_NUM_PEERS (TARGET_10_4_NUM_QCACHE_PEERS_MAX + \
1434 TARGET_10_4_NUM_VDEVS)
1435 #define HTT_TX_Q_STATE_NUM_TIDS 8
1436 #define HTT_TX_Q_STATE_ENTRY_SIZE 1
1437 #define HTT_TX_Q_STATE_ENTRY_MULTIPLIER 0
1440 * htt_q_state_conf - part of htt_frag_desc_bank_cfg for host q state config
1442 * Defines host q state format and behavior. See htt_q_state.
1444 * @record_size: Defines the size of each host q entry in bytes. In practice
1445 * however firmware (at least 10.4.3-00191) ignores this host
1446 * configuration value and uses hardcoded value of 1.
1447 * @record_multiplier: This is valid only when q depth type is MSDUs. It
1448 * defines the exponent for the power of 2 multiplication.
1450 struct htt_q_state_conf
{
1455 u8 record_multiplier
;
1459 struct htt_frag_desc_bank_cfg32
{
1460 u8 info
; /* HTT_FRAG_DESC_BANK_CFG_INFO_ */
1463 __le32 bank_base_addrs
[HTT_FRAG_DESC_BANK_MAX
];
1464 struct htt_frag_desc_bank_id bank_id
[HTT_FRAG_DESC_BANK_MAX
];
1465 struct htt_q_state_conf q_state
;
1468 struct htt_frag_desc_bank_cfg64
{
1469 u8 info
; /* HTT_FRAG_DESC_BANK_CFG_INFO_ */
1472 __le64 bank_base_addrs
[HTT_FRAG_DESC_BANK_MAX
];
1473 struct htt_frag_desc_bank_id bank_id
[HTT_FRAG_DESC_BANK_MAX
];
1474 struct htt_q_state_conf q_state
;
1477 #define HTT_TX_Q_STATE_ENTRY_COEFFICIENT 128
1478 #define HTT_TX_Q_STATE_ENTRY_FACTOR_MASK 0x3f
1479 #define HTT_TX_Q_STATE_ENTRY_FACTOR_LSB 0
1480 #define HTT_TX_Q_STATE_ENTRY_EXP_MASK 0xc0
1481 #define HTT_TX_Q_STATE_ENTRY_EXP_LSB 6
1484 * htt_q_state - shared between host and firmware via DMA
1486 * This structure is used for the host to expose it's software queue state to
1487 * firmware so that its rate control can schedule fetch requests for optimized
1488 * performance. This is most notably used for MU-MIMO aggregation when multiple
1489 * MU clients are connected.
1491 * @count: Each element defines the host queue depth. When q depth type was
1492 * configured as HTT_Q_DEPTH_TYPE_BYTES then each entry is defined as:
1493 * FACTOR * 128 * 8^EXP (see HTT_TX_Q_STATE_ENTRY_FACTOR_MASK and
1494 * HTT_TX_Q_STATE_ENTRY_EXP_MASK). When q depth type was configured as
1495 * HTT_Q_DEPTH_TYPE_MSDUS the number of packets is scaled by 2 **
1496 * record_multiplier (see htt_q_state_conf).
1497 * @map: Used by firmware to quickly check which host queues are not empty. It
1498 * is a bitmap simply saying.
1499 * @seq: Used by firmware to quickly check if the host queues were updated
1500 * since it last checked.
1502 * FIXME: Is the q_state map[] size calculation really correct?
1504 struct htt_q_state
{
1505 u8 count
[HTT_TX_Q_STATE_NUM_TIDS
][HTT_TX_Q_STATE_NUM_PEERS
];
1506 u32 map
[HTT_TX_Q_STATE_NUM_TIDS
][(HTT_TX_Q_STATE_NUM_PEERS
+ 31) / 32];
1510 #define HTT_TX_FETCH_RECORD_INFO_PEER_ID_MASK 0x0fff
1511 #define HTT_TX_FETCH_RECORD_INFO_PEER_ID_LSB 0
1512 #define HTT_TX_FETCH_RECORD_INFO_TID_MASK 0xf000
1513 #define HTT_TX_FETCH_RECORD_INFO_TID_LSB 12
1515 struct htt_tx_fetch_record
{
1516 __le16 info
; /* HTT_TX_FETCH_IND_RECORD_INFO_ */
1521 struct htt_tx_fetch_ind
{
1523 __le16 fetch_seq_num
;
1525 __le16 num_resp_ids
;
1527 struct htt_tx_fetch_record records
[0];
1528 __le32 resp_ids
[0]; /* ath10k_htt_get_tx_fetch_ind_resp_ids() */
1531 static inline void *
1532 ath10k_htt_get_tx_fetch_ind_resp_ids(struct htt_tx_fetch_ind
*ind
)
1534 return (void *)&ind
->records
[le16_to_cpu(ind
->num_records
)];
1537 struct htt_tx_fetch_resp
{
1540 __le16 fetch_seq_num
;
1543 struct htt_tx_fetch_record records
[0];
1546 struct htt_tx_fetch_confirm
{
1548 __le16 num_resp_ids
;
1552 enum htt_tx_mode_switch_mode
{
1553 HTT_TX_MODE_SWITCH_PUSH
= 0,
1554 HTT_TX_MODE_SWITCH_PUSH_PULL
= 1,
1557 #define HTT_TX_MODE_SWITCH_IND_INFO0_ENABLE BIT(0)
1558 #define HTT_TX_MODE_SWITCH_IND_INFO0_NUM_RECORDS_MASK 0xfffe
1559 #define HTT_TX_MODE_SWITCH_IND_INFO0_NUM_RECORDS_LSB 1
1561 #define HTT_TX_MODE_SWITCH_IND_INFO1_MODE_MASK 0x0003
1562 #define HTT_TX_MODE_SWITCH_IND_INFO1_MODE_LSB 0
1563 #define HTT_TX_MODE_SWITCH_IND_INFO1_THRESHOLD_MASK 0xfffc
1564 #define HTT_TX_MODE_SWITCH_IND_INFO1_THRESHOLD_LSB 2
1566 #define HTT_TX_MODE_SWITCH_RECORD_INFO0_PEER_ID_MASK 0x0fff
1567 #define HTT_TX_MODE_SWITCH_RECORD_INFO0_PEER_ID_LSB 0
1568 #define HTT_TX_MODE_SWITCH_RECORD_INFO0_TID_MASK 0xf000
1569 #define HTT_TX_MODE_SWITCH_RECORD_INFO0_TID_LSB 12
1571 struct htt_tx_mode_switch_record
{
1572 __le16 info0
; /* HTT_TX_MODE_SWITCH_RECORD_INFO0_ */
1573 __le16 num_max_msdus
;
1576 struct htt_tx_mode_switch_ind
{
1578 __le16 info0
; /* HTT_TX_MODE_SWITCH_IND_INFO0_ */
1579 __le16 info1
; /* HTT_TX_MODE_SWITCH_IND_INFO1_ */
1581 struct htt_tx_mode_switch_record records
[0];
1584 struct htt_channel_change
{
1587 __le32 center_freq1
;
1588 __le32 center_freq2
;
1592 struct htt_per_peer_tx_stats_ind
{
1595 __le32 failed_bytes
;
1607 struct htt_peer_tx_stats
{
1614 #define ATH10K_10_2_TX_STATS_OFFSET 136
1615 #define PEER_STATS_FOR_NO_OF_PPDUS 4
1617 struct ath10k_10_2_peer_tx_stats
{
1618 u8 ratecode
[PEER_STATS_FOR_NO_OF_PPDUS
];
1619 u8 success_pkts
[PEER_STATS_FOR_NO_OF_PPDUS
];
1620 __le16 success_bytes
[PEER_STATS_FOR_NO_OF_PPDUS
];
1621 u8 retry_pkts
[PEER_STATS_FOR_NO_OF_PPDUS
];
1622 __le16 retry_bytes
[PEER_STATS_FOR_NO_OF_PPDUS
];
1623 u8 failed_pkts
[PEER_STATS_FOR_NO_OF_PPDUS
];
1624 __le16 failed_bytes
[PEER_STATS_FOR_NO_OF_PPDUS
];
1625 u8 flags
[PEER_STATS_FOR_NO_OF_PPDUS
];
1632 /* WEP: 24-bit PN */
1635 /* TKIP or CCMP: 48-bit PN */
1638 /* WAPI: 128-bit PN */
1643 struct htt_cmd_hdr hdr
;
1645 struct htt_ver_req ver_req
;
1646 struct htt_mgmt_tx_desc mgmt_tx
;
1647 struct htt_data_tx_desc data_tx
;
1648 struct htt_rx_ring_setup_32 rx_setup_32
;
1649 struct htt_rx_ring_setup_64 rx_setup_64
;
1650 struct htt_stats_req stats_req
;
1651 struct htt_oob_sync_req oob_sync_req
;
1652 struct htt_aggr_conf aggr_conf
;
1653 struct htt_frag_desc_bank_cfg32 frag_desc_bank_cfg32
;
1654 struct htt_frag_desc_bank_cfg64 frag_desc_bank_cfg64
;
1655 struct htt_tx_fetch_resp tx_fetch_resp
;
1660 struct htt_resp_hdr hdr
;
1662 struct htt_ver_resp ver_resp
;
1663 struct htt_mgmt_tx_completion mgmt_tx_completion
;
1664 struct htt_data_tx_completion data_tx_completion
;
1665 struct htt_rx_indication rx_ind
;
1666 struct htt_rx_indication_hl rx_ind_hl
;
1667 struct htt_rx_fragment_indication rx_frag_ind
;
1668 struct htt_rx_peer_map peer_map
;
1669 struct htt_rx_peer_unmap peer_unmap
;
1670 struct htt_rx_flush rx_flush
;
1671 struct htt_rx_addba rx_addba
;
1672 struct htt_rx_delba rx_delba
;
1673 struct htt_security_indication security_indication
;
1674 struct htt_rc_update rc_update
;
1675 struct htt_rx_test rx_test
;
1676 struct htt_pktlog_msg pktlog_msg
;
1677 struct htt_stats_conf stats_conf
;
1678 struct htt_rx_pn_ind rx_pn_ind
;
1679 struct htt_rx_offload_ind rx_offload_ind
;
1680 struct htt_rx_in_ord_ind rx_in_ord_ind
;
1681 struct htt_tx_fetch_ind tx_fetch_ind
;
1682 struct htt_tx_fetch_confirm tx_fetch_confirm
;
1683 struct htt_tx_mode_switch_ind tx_mode_switch_ind
;
1684 struct htt_channel_change chan_change
;
1685 struct htt_peer_tx_stats peer_tx_stats
;
1689 /*** host side structures follow ***/
1691 struct htt_tx_done
{
1697 enum htt_tx_compl_state
{
1698 HTT_TX_COMPL_STATE_NONE
,
1699 HTT_TX_COMPL_STATE_ACK
,
1700 HTT_TX_COMPL_STATE_NOACK
,
1701 HTT_TX_COMPL_STATE_DISCARD
,
1704 struct htt_peer_map_event
{
1710 struct htt_peer_unmap_event
{
1714 struct ath10k_htt_txbuf_32
{
1715 struct htt_data_tx_desc_frag frags
[2];
1716 struct ath10k_htc_hdr htc_hdr
;
1717 struct htt_cmd_hdr cmd_hdr
;
1718 struct htt_data_tx_desc cmd_tx
;
1721 struct ath10k_htt_txbuf_64
{
1722 struct htt_data_tx_desc_frag frags
[2];
1723 struct ath10k_htc_hdr htc_hdr
;
1724 struct htt_cmd_hdr cmd_hdr
;
1725 struct htt_data_tx_desc_64 cmd_tx
;
1730 enum ath10k_htc_ep_id eid
;
1732 u8 target_version_major
;
1733 u8 target_version_minor
;
1734 struct completion target_version_received
;
1738 const enum htt_t2h_msg_type
*t2h_msg_types
;
1739 u32 t2h_msg_types_max
;
1743 * Ring of network buffer objects - This ring is
1744 * used exclusively by the host SW. This ring
1745 * mirrors the dev_addrs_ring that is shared
1746 * between the host SW and the MAC HW. The host SW
1747 * uses this netbufs ring to locate the network
1748 * buffer objects whose data buffers the HW has
1751 struct sk_buff
**netbufs_ring
;
1753 /* This is used only with firmware supporting IN_ORD_IND.
1755 * With Full Rx Reorder the HTT Rx Ring is more of a temporary
1756 * buffer ring from which buffer addresses are copied by the
1757 * firmware to MAC Rx ring. Firmware then delivers IN_ORD_IND
1758 * pointing to specific (re-ordered) buffers.
1760 * FIXME: With kernel generic hashing functions there's a lot
1761 * of hash collisions for sk_buffs.
1764 DECLARE_HASHTABLE(skb_table
, 4);
1767 * Ring of buffer addresses -
1768 * This ring holds the "physical" device address of the
1769 * rx buffers the host SW provides for the MAC HW to
1773 __le64
*paddrs_ring_64
;
1774 __le32
*paddrs_ring_32
;
1778 * Base address of ring, as a "physical" device address
1779 * rather than a CPU address.
1781 dma_addr_t base_paddr
;
1783 /* how many elems in the ring (power of 2) */
1787 unsigned int size_mask
;
1789 /* how many rx buffers to keep in the ring */
1792 /* how many rx buffers (full+empty) are in the ring */
1796 * alloc_idx - where HTT SW has deposited empty buffers
1797 * This is allocated in consistent mem, so that the FW can
1798 * read this variable, and program the HW's FW_IDX reg with
1799 * the value of this shadow register.
1806 /* where HTT SW has processed bufs filled by rx MAC DMA */
1808 unsigned int msdu_payld
;
1812 * refill_retry_timer - timer triggered when the ring is
1813 * not refilled to the level expected
1815 struct timer_list refill_retry_timer
;
1817 /* Protects access to all rx ring buffer state variables */
1821 unsigned int prefetch_len
;
1823 /* Protects access to pending_tx, num_pending_tx */
1825 int max_num_pending_tx
;
1827 int num_pending_mgmt_tx
;
1828 struct idr pending_tx
;
1829 wait_queue_head_t empty_tx_wq
;
1831 /* FIFO for storing tx done status {ack, no-ack, discard} and msdu id */
1832 DECLARE_KFIFO_PTR(txdone_fifo
, struct htt_tx_done
);
1834 /* set if host-fw communication goes haywire
1835 * used to avoid further failures
1838 atomic_t num_mpdus_ready
;
1840 /* This is used to group tx/rx completions separately and process them
1841 * in batches to reduce cache stalls
1843 struct sk_buff_head rx_msdus_q
;
1844 struct sk_buff_head rx_in_ord_compl_q
;
1845 struct sk_buff_head tx_fetch_ind_q
;
1847 /* rx_status template */
1848 struct ieee80211_rx_status rx_status
;
1853 struct htt_msdu_ext_desc
*vaddr_desc_32
;
1854 struct htt_msdu_ext_desc_64
*vaddr_desc_64
;
1862 struct ath10k_htt_txbuf_32
*vaddr_txbuff_32
;
1863 struct ath10k_htt_txbuf_64
*vaddr_txbuff_64
;
1870 struct htt_q_state
*vaddr
;
1872 u16 num_push_allowed
;
1875 enum htt_tx_mode_switch_mode mode
;
1876 enum htt_q_depth_type type
;
1879 bool tx_mem_allocated
;
1880 const struct ath10k_htt_tx_ops
*tx_ops
;
1881 const struct ath10k_htt_rx_ops
*rx_ops
;
1884 struct ath10k_htt_tx_ops
{
1885 int (*htt_send_rx_ring_cfg
)(struct ath10k_htt
*htt
);
1886 int (*htt_send_frag_desc_bank_cfg
)(struct ath10k_htt
*htt
);
1887 int (*htt_alloc_frag_desc
)(struct ath10k_htt
*htt
);
1888 void (*htt_free_frag_desc
)(struct ath10k_htt
*htt
);
1889 int (*htt_tx
)(struct ath10k_htt
*htt
, enum ath10k_hw_txrx_mode txmode
,
1890 struct sk_buff
*msdu
);
1891 int (*htt_alloc_txbuff
)(struct ath10k_htt
*htt
);
1892 void (*htt_free_txbuff
)(struct ath10k_htt
*htt
);
1895 static inline int ath10k_htt_send_rx_ring_cfg(struct ath10k_htt
*htt
)
1897 if (!htt
->tx_ops
->htt_send_rx_ring_cfg
)
1900 return htt
->tx_ops
->htt_send_rx_ring_cfg(htt
);
1903 static inline int ath10k_htt_send_frag_desc_bank_cfg(struct ath10k_htt
*htt
)
1905 if (!htt
->tx_ops
->htt_send_frag_desc_bank_cfg
)
1908 return htt
->tx_ops
->htt_send_frag_desc_bank_cfg(htt
);
1911 static inline int ath10k_htt_alloc_frag_desc(struct ath10k_htt
*htt
)
1913 if (!htt
->tx_ops
->htt_alloc_frag_desc
)
1916 return htt
->tx_ops
->htt_alloc_frag_desc(htt
);
1919 static inline void ath10k_htt_free_frag_desc(struct ath10k_htt
*htt
)
1921 if (htt
->tx_ops
->htt_free_frag_desc
)
1922 htt
->tx_ops
->htt_free_frag_desc(htt
);
1925 static inline int ath10k_htt_tx(struct ath10k_htt
*htt
,
1926 enum ath10k_hw_txrx_mode txmode
,
1927 struct sk_buff
*msdu
)
1929 return htt
->tx_ops
->htt_tx(htt
, txmode
, msdu
);
1932 static inline int ath10k_htt_alloc_txbuff(struct ath10k_htt
*htt
)
1934 if (!htt
->tx_ops
->htt_alloc_txbuff
)
1937 return htt
->tx_ops
->htt_alloc_txbuff(htt
);
1940 static inline void ath10k_htt_free_txbuff(struct ath10k_htt
*htt
)
1942 if (htt
->tx_ops
->htt_free_txbuff
)
1943 htt
->tx_ops
->htt_free_txbuff(htt
);
1946 struct ath10k_htt_rx_ops
{
1947 size_t (*htt_get_rx_ring_size
)(struct ath10k_htt
*htt
);
1948 void (*htt_config_paddrs_ring
)(struct ath10k_htt
*htt
, void *vaddr
);
1949 void (*htt_set_paddrs_ring
)(struct ath10k_htt
*htt
, dma_addr_t paddr
,
1951 void* (*htt_get_vaddr_ring
)(struct ath10k_htt
*htt
);
1952 void (*htt_reset_paddrs_ring
)(struct ath10k_htt
*htt
, int idx
);
1955 static inline size_t ath10k_htt_get_rx_ring_size(struct ath10k_htt
*htt
)
1957 if (!htt
->rx_ops
->htt_get_rx_ring_size
)
1960 return htt
->rx_ops
->htt_get_rx_ring_size(htt
);
1963 static inline void ath10k_htt_config_paddrs_ring(struct ath10k_htt
*htt
,
1966 if (htt
->rx_ops
->htt_config_paddrs_ring
)
1967 htt
->rx_ops
->htt_config_paddrs_ring(htt
, vaddr
);
1970 static inline void ath10k_htt_set_paddrs_ring(struct ath10k_htt
*htt
,
1974 if (htt
->rx_ops
->htt_set_paddrs_ring
)
1975 htt
->rx_ops
->htt_set_paddrs_ring(htt
, paddr
, idx
);
1978 static inline void *ath10k_htt_get_vaddr_ring(struct ath10k_htt
*htt
)
1980 if (!htt
->rx_ops
->htt_get_vaddr_ring
)
1983 return htt
->rx_ops
->htt_get_vaddr_ring(htt
);
1986 static inline void ath10k_htt_reset_paddrs_ring(struct ath10k_htt
*htt
, int idx
)
1988 if (htt
->rx_ops
->htt_reset_paddrs_ring
)
1989 htt
->rx_ops
->htt_reset_paddrs_ring(htt
, idx
);
1992 #define RX_HTT_HDR_STATUS_LEN 64
1994 /* This structure layout is programmed via rx ring setup
1995 * so that FW knows how to transfer the rx descriptor to the host.
1996 * Buffers like this are placed on the rx ring.
1998 struct htt_rx_desc
{
2000 /* This field is filled on the host using the msdu buffer
2001 * from htt_rx_indication
2003 struct fw_rx_desc_base fw_desc
;
2007 struct rx_attention attention
;
2008 struct rx_frag_info frag_info
;
2009 struct rx_mpdu_start mpdu_start
;
2010 struct rx_msdu_start msdu_start
;
2011 struct rx_msdu_end msdu_end
;
2012 struct rx_mpdu_end mpdu_end
;
2013 struct rx_ppdu_start ppdu_start
;
2014 struct rx_ppdu_end ppdu_end
;
2016 u8 rx_hdr_status
[RX_HTT_HDR_STATUS_LEN
];
2020 #define HTT_RX_DESC_HL_INFO_SEQ_NUM_MASK 0x00000fff
2021 #define HTT_RX_DESC_HL_INFO_SEQ_NUM_LSB 0
2022 #define HTT_RX_DESC_HL_INFO_ENCRYPTED_MASK 0x00001000
2023 #define HTT_RX_DESC_HL_INFO_ENCRYPTED_LSB 12
2024 #define HTT_RX_DESC_HL_INFO_CHAN_INFO_PRESENT_MASK 0x00002000
2025 #define HTT_RX_DESC_HL_INFO_CHAN_INFO_PRESENT_LSB 13
2026 #define HTT_RX_DESC_HL_INFO_MCAST_BCAST_MASK 0x00008000
2027 #define HTT_RX_DESC_HL_INFO_MCAST_BCAST_LSB 15
2028 #define HTT_RX_DESC_HL_INFO_FRAGMENT_MASK 0x00010000
2029 #define HTT_RX_DESC_HL_INFO_FRAGMENT_LSB 16
2030 #define HTT_RX_DESC_HL_INFO_KEY_ID_OCT_MASK 0x01fe0000
2031 #define HTT_RX_DESC_HL_INFO_KEY_ID_OCT_LSB 17
2033 struct htt_rx_desc_base_hl
{
2034 __le32 info
; /* HTT_RX_DESC_HL_INFO_ */
2037 struct htt_rx_chan_info
{
2038 __le16 primary_chan_center_freq_mhz
;
2039 __le16 contig_chan1_center_freq_mhz
;
2040 __le16 contig_chan2_center_freq_mhz
;
2045 #define HTT_RX_DESC_ALIGN 8
2047 #define HTT_MAC_ADDR_LEN 6
2051 * Should be: sizeof(struct htt_host_rx_desc) + max rx MSDU size,
2052 * rounded up to a cache line size.
2054 #define HTT_RX_BUF_SIZE 1920
2055 #define HTT_RX_MSDU_SIZE (HTT_RX_BUF_SIZE - (int)sizeof(struct htt_rx_desc))
2057 /* Refill a bunch of RX buffers for each refill round so that FW/HW can handle
2058 * aggregated traffic more nicely.
2060 #define ATH10K_HTT_MAX_NUM_REFILL 100
2063 * DMA_MAP expects the buffer to be an integral number of cache lines.
2064 * Rather than checking the actual cache line size, this code makes a
2065 * conservative estimate of what the cache line size could be.
2067 #define HTT_LOG2_MAX_CACHE_LINE_SIZE 7 /* 2^7 = 128 */
2068 #define HTT_MAX_CACHE_LINE_SIZE_MASK ((1 << HTT_LOG2_MAX_CACHE_LINE_SIZE) - 1)
2070 /* These values are default in most firmware revisions and apparently are a
2071 * sweet spot performance wise.
2073 #define ATH10K_HTT_MAX_NUM_AMSDU_DEFAULT 3
2074 #define ATH10K_HTT_MAX_NUM_AMPDU_DEFAULT 64
2076 int ath10k_htt_connect(struct ath10k_htt
*htt
);
2077 int ath10k_htt_init(struct ath10k
*ar
);
2078 int ath10k_htt_setup(struct ath10k_htt
*htt
);
2080 int ath10k_htt_tx_start(struct ath10k_htt
*htt
);
2081 void ath10k_htt_tx_stop(struct ath10k_htt
*htt
);
2082 void ath10k_htt_tx_destroy(struct ath10k_htt
*htt
);
2083 void ath10k_htt_tx_free(struct ath10k_htt
*htt
);
2085 int ath10k_htt_rx_alloc(struct ath10k_htt
*htt
);
2086 int ath10k_htt_rx_ring_refill(struct ath10k
*ar
);
2087 void ath10k_htt_rx_free(struct ath10k_htt
*htt
);
2089 void ath10k_htt_htc_tx_complete(struct ath10k
*ar
, struct sk_buff
*skb
);
2090 void ath10k_htt_htc_t2h_msg_handler(struct ath10k
*ar
, struct sk_buff
*skb
);
2091 bool ath10k_htt_t2h_msg_handler(struct ath10k
*ar
, struct sk_buff
*skb
);
2092 int ath10k_htt_h2t_ver_req_msg(struct ath10k_htt
*htt
);
2093 int ath10k_htt_h2t_stats_req(struct ath10k_htt
*htt
, u8 mask
, u64 cookie
);
2094 int ath10k_htt_h2t_aggr_cfg_msg(struct ath10k_htt
*htt
,
2095 u8 max_subfrms_ampdu
,
2096 u8 max_subfrms_amsdu
);
2097 void ath10k_htt_hif_tx_complete(struct ath10k
*ar
, struct sk_buff
*skb
);
2098 int ath10k_htt_tx_fetch_resp(struct ath10k
*ar
,
2100 __le16 fetch_seq_num
,
2101 struct htt_tx_fetch_record
*records
,
2102 size_t num_records
);
2104 void ath10k_htt_tx_txq_update(struct ieee80211_hw
*hw
,
2105 struct ieee80211_txq
*txq
);
2106 void ath10k_htt_tx_txq_recalc(struct ieee80211_hw
*hw
,
2107 struct ieee80211_txq
*txq
);
2108 void ath10k_htt_tx_txq_sync(struct ath10k
*ar
);
2109 void ath10k_htt_tx_dec_pending(struct ath10k_htt
*htt
);
2110 int ath10k_htt_tx_inc_pending(struct ath10k_htt
*htt
);
2111 void ath10k_htt_tx_mgmt_dec_pending(struct ath10k_htt
*htt
);
2112 int ath10k_htt_tx_mgmt_inc_pending(struct ath10k_htt
*htt
, bool is_mgmt
,
2115 int ath10k_htt_tx_alloc_msdu_id(struct ath10k_htt
*htt
, struct sk_buff
*skb
);
2116 void ath10k_htt_tx_free_msdu_id(struct ath10k_htt
*htt
, u16 msdu_id
);
2117 int ath10k_htt_mgmt_tx(struct ath10k_htt
*htt
, struct sk_buff
*msdu
);
2118 void ath10k_htt_rx_pktlog_completion_handler(struct ath10k
*ar
,
2119 struct sk_buff
*skb
);
2120 int ath10k_htt_txrx_compl_task(struct ath10k
*ar
, int budget
);
2121 void ath10k_htt_set_tx_ops(struct ath10k_htt
*htt
);
2122 void ath10k_htt_set_rx_ops(struct ath10k_htt
*htt
);