1 // SPDX-License-Identifier: BSD-3-Clause-Clear
3 * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
6 #include <crypto/hash.h>
15 static void ath11k_dp_htt_htc_tx_complete(struct ath11k_base
*ab
,
18 dev_kfree_skb_any(skb
);
21 void ath11k_dp_peer_cleanup(struct ath11k
*ar
, int vdev_id
, const u8
*addr
)
23 struct ath11k_base
*ab
= ar
->ab
;
24 struct ath11k_peer
*peer
;
26 /* TODO: Any other peer specific DP cleanup */
28 spin_lock_bh(&ab
->base_lock
);
29 peer
= ath11k_peer_find(ab
, vdev_id
, addr
);
31 ath11k_warn(ab
, "failed to lookup peer %pM on vdev %d\n",
33 spin_unlock_bh(&ab
->base_lock
);
37 ath11k_peer_rx_tid_cleanup(ar
, peer
);
38 crypto_free_shash(peer
->tfm_mmic
);
39 spin_unlock_bh(&ab
->base_lock
);
42 int ath11k_dp_peer_setup(struct ath11k
*ar
, int vdev_id
, const u8
*addr
)
44 struct ath11k_base
*ab
= ar
->ab
;
45 struct ath11k_peer
*peer
;
49 /* NOTE: reo_dest ring id starts from 1 unlike mac_id which starts from 0 */
50 reo_dest
= ar
->dp
.mac_id
+ 1;
51 ret
= ath11k_wmi_set_peer_param(ar
, addr
, vdev_id
,
52 WMI_PEER_SET_DEFAULT_ROUTING
,
53 DP_RX_HASH_ENABLE
| (reo_dest
<< 1));
56 ath11k_warn(ab
, "failed to set default routing %d peer :%pM vdev_id :%d\n",
61 for (tid
= 0; tid
<= IEEE80211_NUM_TIDS
; tid
++) {
62 ret
= ath11k_peer_rx_tid_setup(ar
, addr
, vdev_id
, tid
, 1, 0,
65 ath11k_warn(ab
, "failed to setup rxd tid queue for tid %d: %d\n",
71 ret
= ath11k_peer_rx_frag_setup(ar
, addr
, vdev_id
);
73 ath11k_warn(ab
, "failed to setup rx defrag context\n");
77 /* TODO: Setup other peer specific resource used in data path */
82 spin_lock_bh(&ab
->base_lock
);
84 peer
= ath11k_peer_find(ab
, vdev_id
, addr
);
86 ath11k_warn(ab
, "failed to find the peer to del rx tid\n");
87 spin_unlock_bh(&ab
->base_lock
);
91 for (; tid
>= 0; tid
--)
92 ath11k_peer_rx_tid_delete(ar
, peer
, tid
);
94 spin_unlock_bh(&ab
->base_lock
);
99 void ath11k_dp_srng_cleanup(struct ath11k_base
*ab
, struct dp_srng
*ring
)
101 if (!ring
->vaddr_unaligned
)
104 dma_free_coherent(ab
->dev
, ring
->size
, ring
->vaddr_unaligned
,
105 ring
->paddr_unaligned
);
107 ring
->vaddr_unaligned
= NULL
;
110 static int ath11k_dp_srng_find_ring_in_mask(int ring_num
, const u8
*grp_mask
)
113 u8 mask
= 1 << ring_num
;
115 for (ext_group_num
= 0; ext_group_num
< ATH11K_EXT_IRQ_GRP_NUM_MAX
;
117 if (mask
& grp_mask
[ext_group_num
])
118 return ext_group_num
;
124 static int ath11k_dp_srng_calculate_msi_group(struct ath11k_base
*ab
,
125 enum hal_ring_type type
, int ring_num
)
130 case HAL_WBM2SW_RELEASE
:
132 grp_mask
= &ab
->hw_params
.ring_mask
->tx
[0];
133 } else if (ring_num
== 3) {
134 grp_mask
= &ab
->hw_params
.ring_mask
->rx_wbm_rel
[0];
140 case HAL_REO_EXCEPTION
:
141 grp_mask
= &ab
->hw_params
.ring_mask
->rx_err
[0];
144 grp_mask
= &ab
->hw_params
.ring_mask
->rx
[0];
147 grp_mask
= &ab
->hw_params
.ring_mask
->reo_status
[0];
149 case HAL_RXDMA_MONITOR_STATUS
:
150 case HAL_RXDMA_MONITOR_DST
:
151 grp_mask
= &ab
->hw_params
.ring_mask
->rx_mon_status
[0];
154 grp_mask
= &ab
->hw_params
.ring_mask
->rxdma2host
[0];
157 grp_mask
= &ab
->hw_params
.ring_mask
->host2rxdma
[0];
159 case HAL_RXDMA_MONITOR_BUF
:
163 case HAL_SW2WBM_RELEASE
:
164 case HAL_WBM_IDLE_LINK
:
166 case HAL_REO_REINJECT
:
169 case HAL_CE_DST_STATUS
:
174 return ath11k_dp_srng_find_ring_in_mask(ring_num
, grp_mask
);
177 static void ath11k_dp_srng_msi_setup(struct ath11k_base
*ab
,
178 struct hal_srng_params
*ring_params
,
179 enum hal_ring_type type
, int ring_num
)
181 int msi_group_number
, msi_data_count
;
182 u32 msi_data_start
, msi_irq_start
, addr_lo
, addr_hi
;
185 ret
= ath11k_get_user_msi_vector(ab
, "DP",
186 &msi_data_count
, &msi_data_start
,
191 msi_group_number
= ath11k_dp_srng_calculate_msi_group(ab
, type
,
193 if (msi_group_number
< 0) {
194 ath11k_dbg(ab
, ATH11K_DBG_PCI
,
195 "ring not part of an ext_group; ring_type: %d,ring_num %d",
197 ring_params
->msi_addr
= 0;
198 ring_params
->msi_data
= 0;
202 if (msi_group_number
> msi_data_count
) {
203 ath11k_dbg(ab
, ATH11K_DBG_PCI
,
204 "multiple msi_groups share one msi, msi_group_num %d",
208 ath11k_get_msi_address(ab
, &addr_lo
, &addr_hi
);
210 ring_params
->msi_addr
= addr_lo
;
211 ring_params
->msi_addr
|= (dma_addr_t
)(((uint64_t)addr_hi
) << 32);
212 ring_params
->msi_data
= (msi_group_number
% msi_data_count
)
214 ring_params
->flags
|= HAL_SRNG_FLAGS_MSI_INTR
;
217 int ath11k_dp_srng_setup(struct ath11k_base
*ab
, struct dp_srng
*ring
,
218 enum hal_ring_type type
, int ring_num
,
219 int mac_id
, int num_entries
)
221 struct hal_srng_params params
= { 0 };
222 int entry_sz
= ath11k_hal_srng_get_entrysize(ab
, type
);
223 int max_entries
= ath11k_hal_srng_get_max_entries(ab
, type
);
226 if (max_entries
< 0 || entry_sz
< 0)
229 if (num_entries
> max_entries
)
230 num_entries
= max_entries
;
232 ring
->size
= (num_entries
* entry_sz
) + HAL_RING_BASE_ALIGN
- 1;
233 ring
->vaddr_unaligned
= dma_alloc_coherent(ab
->dev
, ring
->size
,
234 &ring
->paddr_unaligned
,
236 if (!ring
->vaddr_unaligned
)
239 ring
->vaddr
= PTR_ALIGN(ring
->vaddr_unaligned
, HAL_RING_BASE_ALIGN
);
240 ring
->paddr
= ring
->paddr_unaligned
+ ((unsigned long)ring
->vaddr
-
241 (unsigned long)ring
->vaddr_unaligned
);
243 params
.ring_base_vaddr
= ring
->vaddr
;
244 params
.ring_base_paddr
= ring
->paddr
;
245 params
.num_entries
= num_entries
;
246 ath11k_dp_srng_msi_setup(ab
, ¶ms
, type
, ring_num
+ mac_id
);
250 params
.intr_batch_cntr_thres_entries
=
251 HAL_SRNG_INT_BATCH_THRESHOLD_RX
;
252 params
.intr_timer_thres_us
= HAL_SRNG_INT_TIMER_THRESHOLD_RX
;
255 case HAL_RXDMA_MONITOR_BUF
:
256 case HAL_RXDMA_MONITOR_STATUS
:
257 params
.low_threshold
= num_entries
>> 3;
258 params
.flags
|= HAL_SRNG_FLAGS_LOW_THRESH_INTR_EN
;
259 params
.intr_batch_cntr_thres_entries
= 0;
260 params
.intr_timer_thres_us
= HAL_SRNG_INT_TIMER_THRESHOLD_RX
;
262 case HAL_WBM2SW_RELEASE
:
264 params
.intr_batch_cntr_thres_entries
=
265 HAL_SRNG_INT_BATCH_THRESHOLD_TX
;
266 params
.intr_timer_thres_us
=
267 HAL_SRNG_INT_TIMER_THRESHOLD_TX
;
270 /* follow through when ring_num >= 3 */
272 case HAL_REO_EXCEPTION
:
273 case HAL_REO_REINJECT
:
279 case HAL_WBM_IDLE_LINK
:
280 case HAL_SW2WBM_RELEASE
:
282 case HAL_RXDMA_MONITOR_DST
:
283 case HAL_RXDMA_MONITOR_DESC
:
284 params
.intr_batch_cntr_thres_entries
=
285 HAL_SRNG_INT_BATCH_THRESHOLD_OTHER
;
286 params
.intr_timer_thres_us
= HAL_SRNG_INT_TIMER_THRESHOLD_OTHER
;
288 case HAL_RXDMA_DIR_BUF
:
291 ath11k_warn(ab
, "Not a valid ring type in dp :%d\n", type
);
295 ret
= ath11k_hal_srng_setup(ab
, type
, ring_num
, mac_id
, ¶ms
);
297 ath11k_warn(ab
, "failed to setup srng: %d ring_id %d\n",
307 void ath11k_dp_stop_shadow_timers(struct ath11k_base
*ab
)
311 if (!ab
->hw_params
.supports_shadow_regs
)
314 for (i
= 0; i
< DP_TCL_NUM_RING_MAX
; i
++)
315 ath11k_dp_shadow_stop_timer(ab
, &ab
->dp
.tx_ring_timer
[i
]);
317 ath11k_dp_shadow_stop_timer(ab
, &ab
->dp
.reo_cmd_timer
);
320 static void ath11k_dp_srng_common_cleanup(struct ath11k_base
*ab
)
322 struct ath11k_dp
*dp
= &ab
->dp
;
325 ath11k_dp_stop_shadow_timers(ab
);
326 ath11k_dp_srng_cleanup(ab
, &dp
->wbm_desc_rel_ring
);
327 ath11k_dp_srng_cleanup(ab
, &dp
->tcl_cmd_ring
);
328 ath11k_dp_srng_cleanup(ab
, &dp
->tcl_status_ring
);
329 for (i
= 0; i
< DP_TCL_NUM_RING_MAX
; i
++) {
330 ath11k_dp_srng_cleanup(ab
, &dp
->tx_ring
[i
].tcl_data_ring
);
331 ath11k_dp_srng_cleanup(ab
, &dp
->tx_ring
[i
].tcl_comp_ring
);
333 ath11k_dp_srng_cleanup(ab
, &dp
->reo_reinject_ring
);
334 ath11k_dp_srng_cleanup(ab
, &dp
->rx_rel_ring
);
335 ath11k_dp_srng_cleanup(ab
, &dp
->reo_except_ring
);
336 ath11k_dp_srng_cleanup(ab
, &dp
->reo_cmd_ring
);
337 ath11k_dp_srng_cleanup(ab
, &dp
->reo_status_ring
);
340 static int ath11k_dp_srng_common_setup(struct ath11k_base
*ab
)
342 struct ath11k_dp
*dp
= &ab
->dp
;
343 struct hal_srng
*srng
;
347 ret
= ath11k_dp_srng_setup(ab
, &dp
->wbm_desc_rel_ring
,
348 HAL_SW2WBM_RELEASE
, 0, 0,
349 DP_WBM_RELEASE_RING_SIZE
);
351 ath11k_warn(ab
, "failed to set up wbm2sw_release ring :%d\n",
356 ret
= ath11k_dp_srng_setup(ab
, &dp
->tcl_cmd_ring
, HAL_TCL_CMD
, 0, 0,
357 DP_TCL_CMD_RING_SIZE
);
359 ath11k_warn(ab
, "failed to set up tcl_cmd ring :%d\n", ret
);
363 ret
= ath11k_dp_srng_setup(ab
, &dp
->tcl_status_ring
, HAL_TCL_STATUS
,
364 0, 0, DP_TCL_STATUS_RING_SIZE
);
366 ath11k_warn(ab
, "failed to set up tcl_status ring :%d\n", ret
);
370 for (i
= 0; i
< DP_TCL_NUM_RING_MAX
; i
++) {
371 ret
= ath11k_dp_srng_setup(ab
, &dp
->tx_ring
[i
].tcl_data_ring
,
373 DP_TCL_DATA_RING_SIZE
);
375 ath11k_warn(ab
, "failed to set up tcl_data ring (%d) :%d\n",
380 ret
= ath11k_dp_srng_setup(ab
, &dp
->tx_ring
[i
].tcl_comp_ring
,
381 HAL_WBM2SW_RELEASE
, i
, 0,
382 DP_TX_COMP_RING_SIZE
);
384 ath11k_warn(ab
, "failed to set up tcl_comp ring (%d) :%d\n",
389 srng
= &ab
->hal
.srng_list
[dp
->tx_ring
[i
].tcl_data_ring
.ring_id
];
390 ath11k_hal_tx_init_data_ring(ab
, srng
);
392 ath11k_dp_shadow_init_timer(ab
, &dp
->tx_ring_timer
[i
],
393 ATH11K_SHADOW_DP_TIMER_INTERVAL
,
394 dp
->tx_ring
[i
].tcl_data_ring
.ring_id
);
397 ret
= ath11k_dp_srng_setup(ab
, &dp
->reo_reinject_ring
, HAL_REO_REINJECT
,
398 0, 0, DP_REO_REINJECT_RING_SIZE
);
400 ath11k_warn(ab
, "failed to set up reo_reinject ring :%d\n",
405 ret
= ath11k_dp_srng_setup(ab
, &dp
->rx_rel_ring
, HAL_WBM2SW_RELEASE
,
406 3, 0, DP_RX_RELEASE_RING_SIZE
);
408 ath11k_warn(ab
, "failed to set up rx_rel ring :%d\n", ret
);
412 ret
= ath11k_dp_srng_setup(ab
, &dp
->reo_except_ring
, HAL_REO_EXCEPTION
,
413 0, 0, DP_REO_EXCEPTION_RING_SIZE
);
415 ath11k_warn(ab
, "failed to set up reo_exception ring :%d\n",
420 ret
= ath11k_dp_srng_setup(ab
, &dp
->reo_cmd_ring
, HAL_REO_CMD
,
421 0, 0, DP_REO_CMD_RING_SIZE
);
423 ath11k_warn(ab
, "failed to set up reo_cmd ring :%d\n", ret
);
427 srng
= &ab
->hal
.srng_list
[dp
->reo_cmd_ring
.ring_id
];
428 ath11k_hal_reo_init_cmd_ring(ab
, srng
);
430 ath11k_dp_shadow_init_timer(ab
, &dp
->reo_cmd_timer
,
431 ATH11K_SHADOW_CTRL_TIMER_INTERVAL
,
432 dp
->reo_cmd_ring
.ring_id
);
434 ret
= ath11k_dp_srng_setup(ab
, &dp
->reo_status_ring
, HAL_REO_STATUS
,
435 0, 0, DP_REO_STATUS_RING_SIZE
);
437 ath11k_warn(ab
, "failed to set up reo_status ring :%d\n", ret
);
441 /* When hash based routing of rx packet is enabled, 32 entries to map
442 * the hash values to the ring will be configured. Each hash entry uses
443 * three bits to map to a particular ring. The ring mapping will be
444 * 0:TCL, 1:SW1, 2:SW2, 3:SW3, 4:SW4, 5:Release, 6:FW and 7:Not used.
446 ring_hash_map
= HAL_HASH_ROUTING_RING_SW1
<< 0 |
447 HAL_HASH_ROUTING_RING_SW2
<< 3 |
448 HAL_HASH_ROUTING_RING_SW3
<< 6 |
449 HAL_HASH_ROUTING_RING_SW4
<< 9 |
450 HAL_HASH_ROUTING_RING_SW1
<< 12 |
451 HAL_HASH_ROUTING_RING_SW2
<< 15 |
452 HAL_HASH_ROUTING_RING_SW3
<< 18 |
453 HAL_HASH_ROUTING_RING_SW4
<< 21;
455 ath11k_hal_reo_hw_setup(ab
, ring_hash_map
);
460 ath11k_dp_srng_common_cleanup(ab
);
465 static void ath11k_dp_scatter_idle_link_desc_cleanup(struct ath11k_base
*ab
)
467 struct ath11k_dp
*dp
= &ab
->dp
;
468 struct hal_wbm_idle_scatter_list
*slist
= dp
->scatter_list
;
471 for (i
= 0; i
< DP_IDLE_SCATTER_BUFS_MAX
; i
++) {
475 dma_free_coherent(ab
->dev
, HAL_WBM_IDLE_SCATTER_BUF_SIZE_MAX
,
476 slist
[i
].vaddr
, slist
[i
].paddr
);
477 slist
[i
].vaddr
= NULL
;
481 static int ath11k_dp_scatter_idle_link_desc_setup(struct ath11k_base
*ab
,
483 u32 n_link_desc_bank
,
487 struct ath11k_dp
*dp
= &ab
->dp
;
488 struct dp_link_desc_bank
*link_desc_banks
= dp
->link_desc_banks
;
489 struct hal_wbm_idle_scatter_list
*slist
= dp
->scatter_list
;
490 u32 n_entries_per_buf
;
491 int num_scatter_buf
, scatter_idx
;
492 struct hal_wbm_link_desc
*scatter_buf
;
493 int align_bytes
, n_entries
;
500 n_entries_per_buf
= HAL_WBM_IDLE_SCATTER_BUF_SIZE
/
501 ath11k_hal_srng_get_entrysize(ab
, HAL_WBM_IDLE_LINK
);
502 num_scatter_buf
= DIV_ROUND_UP(size
, HAL_WBM_IDLE_SCATTER_BUF_SIZE
);
504 if (num_scatter_buf
> DP_IDLE_SCATTER_BUFS_MAX
)
507 for (i
= 0; i
< num_scatter_buf
; i
++) {
508 slist
[i
].vaddr
= dma_alloc_coherent(ab
->dev
,
509 HAL_WBM_IDLE_SCATTER_BUF_SIZE_MAX
,
510 &slist
[i
].paddr
, GFP_KERNEL
);
511 if (!slist
[i
].vaddr
) {
518 scatter_buf
= slist
[scatter_idx
].vaddr
;
519 rem_entries
= n_entries_per_buf
;
521 for (i
= 0; i
< n_link_desc_bank
; i
++) {
522 align_bytes
= link_desc_banks
[i
].vaddr
-
523 link_desc_banks
[i
].vaddr_unaligned
;
524 n_entries
= (DP_LINK_DESC_ALLOC_SIZE_THRESH
- align_bytes
) /
526 paddr
= link_desc_banks
[i
].paddr
;
528 ath11k_hal_set_link_desc_addr(scatter_buf
, i
, paddr
);
530 paddr
+= HAL_LINK_DESC_SIZE
;
537 rem_entries
= n_entries_per_buf
;
539 scatter_buf
= slist
[scatter_idx
].vaddr
;
543 end_offset
= (scatter_buf
- slist
[scatter_idx
].vaddr
) *
544 sizeof(struct hal_wbm_link_desc
);
545 ath11k_hal_setup_link_idle_list(ab
, slist
, num_scatter_buf
,
546 n_link_desc
, end_offset
);
551 ath11k_dp_scatter_idle_link_desc_cleanup(ab
);
557 ath11k_dp_link_desc_bank_free(struct ath11k_base
*ab
,
558 struct dp_link_desc_bank
*link_desc_banks
)
562 for (i
= 0; i
< DP_LINK_DESC_BANKS_MAX
; i
++) {
563 if (link_desc_banks
[i
].vaddr_unaligned
) {
564 dma_free_coherent(ab
->dev
,
565 link_desc_banks
[i
].size
,
566 link_desc_banks
[i
].vaddr_unaligned
,
567 link_desc_banks
[i
].paddr_unaligned
);
568 link_desc_banks
[i
].vaddr_unaligned
= NULL
;
573 static int ath11k_dp_link_desc_bank_alloc(struct ath11k_base
*ab
,
574 struct dp_link_desc_bank
*desc_bank
,
575 int n_link_desc_bank
,
578 struct ath11k_dp
*dp
= &ab
->dp
;
581 int desc_sz
= DP_LINK_DESC_ALLOC_SIZE_THRESH
;
583 for (i
= 0; i
< n_link_desc_bank
; i
++) {
584 if (i
== (n_link_desc_bank
- 1) && last_bank_sz
)
585 desc_sz
= last_bank_sz
;
587 desc_bank
[i
].vaddr_unaligned
=
588 dma_alloc_coherent(ab
->dev
, desc_sz
,
589 &desc_bank
[i
].paddr_unaligned
,
591 if (!desc_bank
[i
].vaddr_unaligned
) {
596 desc_bank
[i
].vaddr
= PTR_ALIGN(desc_bank
[i
].vaddr_unaligned
,
597 HAL_LINK_DESC_ALIGN
);
598 desc_bank
[i
].paddr
= desc_bank
[i
].paddr_unaligned
+
599 ((unsigned long)desc_bank
[i
].vaddr
-
600 (unsigned long)desc_bank
[i
].vaddr_unaligned
);
601 desc_bank
[i
].size
= desc_sz
;
607 ath11k_dp_link_desc_bank_free(ab
, dp
->link_desc_banks
);
612 void ath11k_dp_link_desc_cleanup(struct ath11k_base
*ab
,
613 struct dp_link_desc_bank
*desc_bank
,
614 u32 ring_type
, struct dp_srng
*ring
)
616 ath11k_dp_link_desc_bank_free(ab
, desc_bank
);
618 if (ring_type
!= HAL_RXDMA_MONITOR_DESC
) {
619 ath11k_dp_srng_cleanup(ab
, ring
);
620 ath11k_dp_scatter_idle_link_desc_cleanup(ab
);
624 static int ath11k_wbm_idle_ring_setup(struct ath11k_base
*ab
, u32
*n_link_desc
)
626 struct ath11k_dp
*dp
= &ab
->dp
;
627 u32 n_mpdu_link_desc
, n_mpdu_queue_desc
;
628 u32 n_tx_msdu_link_desc
, n_rx_msdu_link_desc
;
631 n_mpdu_link_desc
= (DP_NUM_TIDS_MAX
* DP_AVG_MPDUS_PER_TID_MAX
) /
632 HAL_NUM_MPDUS_PER_LINK_DESC
;
634 n_mpdu_queue_desc
= n_mpdu_link_desc
/
635 HAL_NUM_MPDU_LINKS_PER_QUEUE_DESC
;
637 n_tx_msdu_link_desc
= (DP_NUM_TIDS_MAX
* DP_AVG_FLOWS_PER_TID
*
638 DP_AVG_MSDUS_PER_FLOW
) /
639 HAL_NUM_TX_MSDUS_PER_LINK_DESC
;
641 n_rx_msdu_link_desc
= (DP_NUM_TIDS_MAX
* DP_AVG_MPDUS_PER_TID_MAX
*
642 DP_AVG_MSDUS_PER_MPDU
) /
643 HAL_NUM_RX_MSDUS_PER_LINK_DESC
;
645 *n_link_desc
= n_mpdu_link_desc
+ n_mpdu_queue_desc
+
646 n_tx_msdu_link_desc
+ n_rx_msdu_link_desc
;
648 if (*n_link_desc
& (*n_link_desc
- 1))
649 *n_link_desc
= 1 << fls(*n_link_desc
);
651 ret
= ath11k_dp_srng_setup(ab
, &dp
->wbm_idle_ring
,
652 HAL_WBM_IDLE_LINK
, 0, 0, *n_link_desc
);
654 ath11k_warn(ab
, "failed to setup wbm_idle_ring: %d\n", ret
);
660 int ath11k_dp_link_desc_setup(struct ath11k_base
*ab
,
661 struct dp_link_desc_bank
*link_desc_banks
,
662 u32 ring_type
, struct hal_srng
*srng
,
666 u32 n_link_desc_bank
, last_bank_sz
;
667 u32 entry_sz
, align_bytes
, n_entries
;
672 tot_mem_sz
= n_link_desc
* HAL_LINK_DESC_SIZE
;
673 tot_mem_sz
+= HAL_LINK_DESC_ALIGN
;
675 if (tot_mem_sz
<= DP_LINK_DESC_ALLOC_SIZE_THRESH
) {
676 n_link_desc_bank
= 1;
677 last_bank_sz
= tot_mem_sz
;
679 n_link_desc_bank
= tot_mem_sz
/
680 (DP_LINK_DESC_ALLOC_SIZE_THRESH
-
681 HAL_LINK_DESC_ALIGN
);
682 last_bank_sz
= tot_mem_sz
%
683 (DP_LINK_DESC_ALLOC_SIZE_THRESH
-
684 HAL_LINK_DESC_ALIGN
);
687 n_link_desc_bank
+= 1;
690 if (n_link_desc_bank
> DP_LINK_DESC_BANKS_MAX
)
693 ret
= ath11k_dp_link_desc_bank_alloc(ab
, link_desc_banks
,
694 n_link_desc_bank
, last_bank_sz
);
698 /* Setup link desc idle list for HW internal usage */
699 entry_sz
= ath11k_hal_srng_get_entrysize(ab
, ring_type
);
700 tot_mem_sz
= entry_sz
* n_link_desc
;
702 /* Setup scatter desc list when the total memory requirement is more */
703 if (tot_mem_sz
> DP_LINK_DESC_ALLOC_SIZE_THRESH
&&
704 ring_type
!= HAL_RXDMA_MONITOR_DESC
) {
705 ret
= ath11k_dp_scatter_idle_link_desc_setup(ab
, tot_mem_sz
,
710 ath11k_warn(ab
, "failed to setup scatting idle list descriptor :%d\n",
712 goto fail_desc_bank_free
;
718 spin_lock_bh(&srng
->lock
);
720 ath11k_hal_srng_access_begin(ab
, srng
);
722 for (i
= 0; i
< n_link_desc_bank
; i
++) {
723 align_bytes
= link_desc_banks
[i
].vaddr
-
724 link_desc_banks
[i
].vaddr_unaligned
;
725 n_entries
= (link_desc_banks
[i
].size
- align_bytes
) /
727 paddr
= link_desc_banks
[i
].paddr
;
729 (desc
= ath11k_hal_srng_src_get_next_entry(ab
, srng
))) {
730 ath11k_hal_set_link_desc_addr((struct hal_wbm_link_desc
*)desc
,
733 paddr
+= HAL_LINK_DESC_SIZE
;
737 ath11k_hal_srng_access_end(ab
, srng
);
739 spin_unlock_bh(&srng
->lock
);
744 ath11k_dp_link_desc_bank_free(ab
, link_desc_banks
);
749 int ath11k_dp_service_srng(struct ath11k_base
*ab
,
750 struct ath11k_ext_irq_grp
*irq_grp
,
753 struct napi_struct
*napi
= &irq_grp
->napi
;
754 int grp_id
= irq_grp
->grp_id
;
757 int tot_work_done
= 0;
759 while (ab
->hw_params
.ring_mask
->tx
[grp_id
] >> i
) {
760 if (ab
->hw_params
.ring_mask
->tx
[grp_id
] & BIT(i
))
761 ath11k_dp_tx_completion_handler(ab
, i
);
765 if (ab
->hw_params
.ring_mask
->rx_err
[grp_id
]) {
766 work_done
= ath11k_dp_process_rx_err(ab
, napi
, budget
);
768 tot_work_done
+= work_done
;
773 if (ab
->hw_params
.ring_mask
->rx_wbm_rel
[grp_id
]) {
774 work_done
= ath11k_dp_rx_process_wbm_err(ab
,
778 tot_work_done
+= work_done
;
784 if (ab
->hw_params
.ring_mask
->rx
[grp_id
]) {
785 i
= fls(ab
->hw_params
.ring_mask
->rx
[grp_id
]) - 1;
786 work_done
= ath11k_dp_process_rx(ab
, i
, napi
,
789 tot_work_done
+= work_done
;
794 if (ab
->hw_params
.ring_mask
->rx_mon_status
[grp_id
]) {
795 for (i
= 0; i
< ab
->num_radios
; i
++) {
796 for (j
= 0; j
< ab
->hw_params
.num_rxmda_per_pdev
; j
++) {
797 int id
= i
* ab
->hw_params
.num_rxmda_per_pdev
+ j
;
799 if (ab
->hw_params
.ring_mask
->rx_mon_status
[grp_id
] &
802 ath11k_dp_rx_process_mon_rings(ab
,
806 tot_work_done
+= work_done
;
815 if (ab
->hw_params
.ring_mask
->reo_status
[grp_id
])
816 ath11k_dp_process_reo_status(ab
);
818 for (i
= 0; i
< ab
->num_radios
; i
++) {
819 for (j
= 0; j
< ab
->hw_params
.num_rxmda_per_pdev
; j
++) {
820 int id
= i
* ab
->hw_params
.num_rxmda_per_pdev
+ j
;
822 if (ab
->hw_params
.ring_mask
->rxdma2host
[grp_id
] & BIT(id
)) {
823 work_done
= ath11k_dp_process_rxdma_err(ab
, id
, budget
);
825 tot_work_done
+= work_done
;
831 if (ab
->hw_params
.ring_mask
->host2rxdma
[grp_id
] & BIT(id
)) {
832 struct ath11k
*ar
= ath11k_ab_to_ar(ab
, id
);
833 struct ath11k_pdev_dp
*dp
= &ar
->dp
;
834 struct dp_rxdma_ring
*rx_ring
= &dp
->rx_refill_buf_ring
;
836 ath11k_dp_rxbufs_replenish(ab
, id
, rx_ring
, 0,
837 HAL_RX_BUF_RBM_SW3_BM
);
841 /* TODO: Implement handler for other interrupts */
844 return tot_work_done
;
846 EXPORT_SYMBOL(ath11k_dp_service_srng
);
848 void ath11k_dp_pdev_free(struct ath11k_base
*ab
)
853 del_timer_sync(&ab
->mon_reap_timer
);
855 for (i
= 0; i
< ab
->num_radios
; i
++) {
856 ar
= ab
->pdevs
[i
].ar
;
857 ath11k_dp_rx_pdev_free(ab
, i
);
858 ath11k_debugfs_unregister(ar
);
859 ath11k_dp_rx_pdev_mon_detach(ar
);
863 void ath11k_dp_pdev_pre_alloc(struct ath11k_base
*ab
)
866 struct ath11k_pdev_dp
*dp
;
870 for (i
= 0; i
< ab
->num_radios
; i
++) {
871 ar
= ab
->pdevs
[i
].ar
;
874 idr_init(&dp
->rx_refill_buf_ring
.bufs_idr
);
875 spin_lock_init(&dp
->rx_refill_buf_ring
.idr_lock
);
876 atomic_set(&dp
->num_tx_pending
, 0);
877 init_waitqueue_head(&dp
->tx_empty_waitq
);
878 for (j
= 0; j
< ab
->hw_params
.num_rxmda_per_pdev
; j
++) {
879 idr_init(&dp
->rx_mon_status_refill_ring
[j
].bufs_idr
);
880 spin_lock_init(&dp
->rx_mon_status_refill_ring
[j
].idr_lock
);
882 idr_init(&dp
->rxdma_mon_buf_ring
.bufs_idr
);
883 spin_lock_init(&dp
->rxdma_mon_buf_ring
.idr_lock
);
887 int ath11k_dp_pdev_alloc(struct ath11k_base
*ab
)
893 /* TODO:Per-pdev rx ring unlike tx ring which is mapped to different AC's */
894 for (i
= 0; i
< ab
->num_radios
; i
++) {
895 ar
= ab
->pdevs
[i
].ar
;
896 ret
= ath11k_dp_rx_pdev_alloc(ab
, i
);
898 ath11k_warn(ab
, "failed to allocate pdev rx for pdev_id :%d\n",
902 ret
= ath11k_dp_rx_pdev_mon_attach(ar
);
904 ath11k_warn(ab
, "failed to initialize mon pdev %d\n",
913 ath11k_dp_pdev_free(ab
);
918 int ath11k_dp_htt_connect(struct ath11k_dp
*dp
)
920 struct ath11k_htc_svc_conn_req conn_req
;
921 struct ath11k_htc_svc_conn_resp conn_resp
;
924 memset(&conn_req
, 0, sizeof(conn_req
));
925 memset(&conn_resp
, 0, sizeof(conn_resp
));
927 conn_req
.ep_ops
.ep_tx_complete
= ath11k_dp_htt_htc_tx_complete
;
928 conn_req
.ep_ops
.ep_rx_complete
= ath11k_dp_htt_htc_t2h_msg_handler
;
930 /* connect to control service */
931 conn_req
.service_id
= ATH11K_HTC_SVC_ID_HTT_DATA_MSG
;
933 status
= ath11k_htc_connect_service(&dp
->ab
->htc
, &conn_req
,
939 dp
->eid
= conn_resp
.eid
;
944 static void ath11k_dp_update_vdev_search(struct ath11k_vif
*arvif
)
946 /* When v2_map_support is true:for STA mode, enable address
947 * search index, tcl uses ast_hash value in the descriptor.
948 * When v2_map_support is false: for STA mode, dont' enable
949 * address search index.
951 switch (arvif
->vdev_type
) {
952 case WMI_VDEV_TYPE_STA
:
953 if (arvif
->ar
->ab
->hw_params
.htt_peer_map_v2
) {
954 arvif
->hal_addr_search_flags
= HAL_TX_ADDRX_EN
;
955 arvif
->search_type
= HAL_TX_ADDR_SEARCH_INDEX
;
957 arvif
->hal_addr_search_flags
= HAL_TX_ADDRY_EN
;
958 arvif
->search_type
= HAL_TX_ADDR_SEARCH_DEFAULT
;
961 case WMI_VDEV_TYPE_AP
:
962 case WMI_VDEV_TYPE_IBSS
:
963 arvif
->hal_addr_search_flags
= HAL_TX_ADDRX_EN
;
964 arvif
->search_type
= HAL_TX_ADDR_SEARCH_DEFAULT
;
966 case WMI_VDEV_TYPE_MONITOR
:
972 void ath11k_dp_vdev_tx_attach(struct ath11k
*ar
, struct ath11k_vif
*arvif
)
974 arvif
->tcl_metadata
|= FIELD_PREP(HTT_TCL_META_DATA_TYPE
, 1) |
975 FIELD_PREP(HTT_TCL_META_DATA_VDEV_ID
,
977 FIELD_PREP(HTT_TCL_META_DATA_PDEV_ID
,
980 /* set HTT extension valid bit to 0 by default */
981 arvif
->tcl_metadata
&= ~HTT_TCL_META_DATA_VALID_HTT
;
983 ath11k_dp_update_vdev_search(arvif
);
986 static int ath11k_dp_tx_pending_cleanup(int buf_id
, void *skb
, void *ctx
)
988 struct ath11k_base
*ab
= (struct ath11k_base
*)ctx
;
989 struct sk_buff
*msdu
= skb
;
991 dma_unmap_single(ab
->dev
, ATH11K_SKB_CB(msdu
)->paddr
, msdu
->len
,
994 dev_kfree_skb_any(msdu
);
999 void ath11k_dp_free(struct ath11k_base
*ab
)
1001 struct ath11k_dp
*dp
= &ab
->dp
;
1004 ath11k_dp_link_desc_cleanup(ab
, dp
->link_desc_banks
,
1005 HAL_WBM_IDLE_LINK
, &dp
->wbm_idle_ring
);
1007 ath11k_dp_srng_common_cleanup(ab
);
1009 ath11k_dp_reo_cmd_list_cleanup(ab
);
1011 for (i
= 0; i
< DP_TCL_NUM_RING_MAX
; i
++) {
1012 spin_lock_bh(&dp
->tx_ring
[i
].tx_idr_lock
);
1013 idr_for_each(&dp
->tx_ring
[i
].txbuf_idr
,
1014 ath11k_dp_tx_pending_cleanup
, ab
);
1015 idr_destroy(&dp
->tx_ring
[i
].txbuf_idr
);
1016 spin_unlock_bh(&dp
->tx_ring
[i
].tx_idr_lock
);
1017 kfree(dp
->tx_ring
[i
].tx_status
);
1020 /* Deinit any SOC level resource */
1023 int ath11k_dp_alloc(struct ath11k_base
*ab
)
1025 struct ath11k_dp
*dp
= &ab
->dp
;
1026 struct hal_srng
*srng
= NULL
;
1028 u32 n_link_desc
= 0;
1034 INIT_LIST_HEAD(&dp
->reo_cmd_list
);
1035 INIT_LIST_HEAD(&dp
->reo_cmd_cache_flush_list
);
1036 spin_lock_init(&dp
->reo_cmd_lock
);
1038 dp
->reo_cmd_cache_flush_count
= 0;
1040 ret
= ath11k_wbm_idle_ring_setup(ab
, &n_link_desc
);
1042 ath11k_warn(ab
, "failed to setup wbm_idle_ring: %d\n", ret
);
1046 srng
= &ab
->hal
.srng_list
[dp
->wbm_idle_ring
.ring_id
];
1048 ret
= ath11k_dp_link_desc_setup(ab
, dp
->link_desc_banks
,
1049 HAL_WBM_IDLE_LINK
, srng
, n_link_desc
);
1051 ath11k_warn(ab
, "failed to setup link desc: %d\n", ret
);
1055 ret
= ath11k_dp_srng_common_setup(ab
);
1057 goto fail_link_desc_cleanup
;
1059 size
= sizeof(struct hal_wbm_release_ring
) * DP_TX_COMP_RING_SIZE
;
1061 for (i
= 0; i
< DP_TCL_NUM_RING_MAX
; i
++) {
1062 idr_init(&dp
->tx_ring
[i
].txbuf_idr
);
1063 spin_lock_init(&dp
->tx_ring
[i
].tx_idr_lock
);
1064 dp
->tx_ring
[i
].tcl_data_ring_id
= i
;
1066 dp
->tx_ring
[i
].tx_status_head
= 0;
1067 dp
->tx_ring
[i
].tx_status_tail
= DP_TX_COMP_RING_SIZE
- 1;
1068 dp
->tx_ring
[i
].tx_status
= kmalloc(size
, GFP_KERNEL
);
1069 if (!dp
->tx_ring
[i
].tx_status
) {
1071 goto fail_cmn_srng_cleanup
;
1075 for (i
= 0; i
< HAL_DSCP_TID_MAP_TBL_NUM_ENTRIES_MAX
; i
++)
1076 ath11k_hal_tx_set_dscp_tid_map(ab
, i
);
1078 /* Init any SOC level resource for DP */
1082 fail_cmn_srng_cleanup
:
1083 ath11k_dp_srng_common_cleanup(ab
);
1085 fail_link_desc_cleanup
:
1086 ath11k_dp_link_desc_cleanup(ab
, dp
->link_desc_banks
,
1087 HAL_WBM_IDLE_LINK
, &dp
->wbm_idle_ring
);
1092 static void ath11k_dp_shadow_timer_handler(struct timer_list
*t
)
1094 struct ath11k_hp_update_timer
*update_timer
= from_timer(update_timer
,
1096 struct ath11k_base
*ab
= update_timer
->ab
;
1097 struct hal_srng
*srng
= &ab
->hal
.srng_list
[update_timer
->ring_id
];
1099 spin_lock_bh(&srng
->lock
);
1101 /* when the timer is fired, the handler checks whether there
1102 * are new TX happened. The handler updates HP only when there
1103 * are no TX operations during the timeout interval, and stop
1104 * the timer. Timer will be started again when TX happens again.
1106 if (update_timer
->timer_tx_num
!= update_timer
->tx_num
) {
1107 update_timer
->timer_tx_num
= update_timer
->tx_num
;
1108 mod_timer(&update_timer
->timer
, jiffies
+
1109 msecs_to_jiffies(update_timer
->interval
));
1111 update_timer
->started
= false;
1112 ath11k_hal_srng_shadow_update_hp_tp(ab
, srng
);
1115 spin_unlock_bh(&srng
->lock
);
1118 void ath11k_dp_shadow_start_timer(struct ath11k_base
*ab
,
1119 struct hal_srng
*srng
,
1120 struct ath11k_hp_update_timer
*update_timer
)
1122 lockdep_assert_held(&srng
->lock
);
1124 if (!ab
->hw_params
.supports_shadow_regs
)
1127 update_timer
->tx_num
++;
1129 if (update_timer
->started
)
1132 update_timer
->started
= true;
1133 update_timer
->timer_tx_num
= update_timer
->tx_num
;
1134 mod_timer(&update_timer
->timer
, jiffies
+
1135 msecs_to_jiffies(update_timer
->interval
));
1138 void ath11k_dp_shadow_stop_timer(struct ath11k_base
*ab
,
1139 struct ath11k_hp_update_timer
*update_timer
)
1141 if (!ab
->hw_params
.supports_shadow_regs
)
1144 if (!update_timer
->init
)
1147 del_timer_sync(&update_timer
->timer
);
1150 void ath11k_dp_shadow_init_timer(struct ath11k_base
*ab
,
1151 struct ath11k_hp_update_timer
*update_timer
,
1152 u32 interval
, u32 ring_id
)
1154 if (!ab
->hw_params
.supports_shadow_regs
)
1157 update_timer
->tx_num
= 0;
1158 update_timer
->timer_tx_num
= 0;
1159 update_timer
->ab
= ab
;
1160 update_timer
->ring_id
= ring_id
;
1161 update_timer
->interval
= interval
;
1162 update_timer
->init
= true;
1163 timer_setup(&update_timer
->timer
,
1164 ath11k_dp_shadow_timer_handler
, 0);