1 // SPDX-License-Identifier: BSD-3-Clause-Clear
3 * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
4 * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
7 #include <crypto/hash.h>
16 static void ath11k_dp_htt_htc_tx_complete(struct ath11k_base
*ab
,
19 dev_kfree_skb_any(skb
);
22 void ath11k_dp_peer_cleanup(struct ath11k
*ar
, int vdev_id
, const u8
*addr
)
24 struct ath11k_base
*ab
= ar
->ab
;
25 struct ath11k_peer
*peer
;
27 /* TODO: Any other peer specific DP cleanup */
29 spin_lock_bh(&ab
->base_lock
);
30 peer
= ath11k_peer_find(ab
, vdev_id
, addr
);
32 ath11k_warn(ab
, "failed to lookup peer %pM on vdev %d\n",
34 spin_unlock_bh(&ab
->base_lock
);
38 ath11k_peer_rx_tid_cleanup(ar
, peer
);
39 peer
->dp_setup_done
= false;
40 crypto_free_shash(peer
->tfm_mmic
);
41 spin_unlock_bh(&ab
->base_lock
);
44 int ath11k_dp_peer_setup(struct ath11k
*ar
, int vdev_id
, const u8
*addr
)
46 struct ath11k_base
*ab
= ar
->ab
;
47 struct ath11k_peer
*peer
;
51 /* NOTE: reo_dest ring id starts from 1 unlike mac_id which starts from 0 */
52 reo_dest
= ar
->dp
.mac_id
+ 1;
53 ret
= ath11k_wmi_set_peer_param(ar
, addr
, vdev_id
,
54 WMI_PEER_SET_DEFAULT_ROUTING
,
55 DP_RX_HASH_ENABLE
| (reo_dest
<< 1));
58 ath11k_warn(ab
, "failed to set default routing %d peer :%pM vdev_id :%d\n",
63 for (tid
= 0; tid
<= IEEE80211_NUM_TIDS
; tid
++) {
64 ret
= ath11k_peer_rx_tid_setup(ar
, addr
, vdev_id
, tid
, 1, 0,
67 ath11k_warn(ab
, "failed to setup rxd tid queue for tid %d: %d\n",
73 ret
= ath11k_peer_rx_frag_setup(ar
, addr
, vdev_id
);
75 ath11k_warn(ab
, "failed to setup rx defrag context\n");
80 /* TODO: Setup other peer specific resource used in data path */
85 spin_lock_bh(&ab
->base_lock
);
87 peer
= ath11k_peer_find(ab
, vdev_id
, addr
);
89 ath11k_warn(ab
, "failed to find the peer to del rx tid\n");
90 spin_unlock_bh(&ab
->base_lock
);
94 for (; tid
>= 0; tid
--)
95 ath11k_peer_rx_tid_delete(ar
, peer
, tid
);
97 spin_unlock_bh(&ab
->base_lock
);
102 void ath11k_dp_srng_cleanup(struct ath11k_base
*ab
, struct dp_srng
*ring
)
104 if (!ring
->vaddr_unaligned
)
108 dma_unmap_single(ab
->dev
, ring
->paddr_unaligned
, ring
->size
,
110 kfree(ring
->vaddr_unaligned
);
112 dma_free_coherent(ab
->dev
, ring
->size
, ring
->vaddr_unaligned
,
113 ring
->paddr_unaligned
);
116 ring
->vaddr_unaligned
= NULL
;
119 static int ath11k_dp_srng_find_ring_in_mask(int ring_num
, const u8
*grp_mask
)
122 u8 mask
= 1 << ring_num
;
124 for (ext_group_num
= 0; ext_group_num
< ATH11K_EXT_IRQ_GRP_NUM_MAX
;
126 if (mask
& grp_mask
[ext_group_num
])
127 return ext_group_num
;
133 static int ath11k_dp_srng_calculate_msi_group(struct ath11k_base
*ab
,
134 enum hal_ring_type type
, int ring_num
)
139 case HAL_WBM2SW_RELEASE
:
140 if (ring_num
== DP_RX_RELEASE_RING_NUM
) {
141 grp_mask
= &ab
->hw_params
.ring_mask
->rx_wbm_rel
[0];
144 grp_mask
= &ab
->hw_params
.ring_mask
->tx
[0];
147 case HAL_REO_EXCEPTION
:
148 grp_mask
= &ab
->hw_params
.ring_mask
->rx_err
[0];
151 grp_mask
= &ab
->hw_params
.ring_mask
->rx
[0];
154 grp_mask
= &ab
->hw_params
.ring_mask
->reo_status
[0];
156 case HAL_RXDMA_MONITOR_STATUS
:
157 case HAL_RXDMA_MONITOR_DST
:
158 grp_mask
= &ab
->hw_params
.ring_mask
->rx_mon_status
[0];
161 grp_mask
= &ab
->hw_params
.ring_mask
->rxdma2host
[0];
164 grp_mask
= &ab
->hw_params
.ring_mask
->host2rxdma
[0];
166 case HAL_RXDMA_MONITOR_BUF
:
170 case HAL_SW2WBM_RELEASE
:
171 case HAL_WBM_IDLE_LINK
:
173 case HAL_REO_REINJECT
:
176 case HAL_CE_DST_STATUS
:
181 return ath11k_dp_srng_find_ring_in_mask(ring_num
, grp_mask
);
184 static void ath11k_dp_srng_msi_setup(struct ath11k_base
*ab
,
185 struct hal_srng_params
*ring_params
,
186 enum hal_ring_type type
, int ring_num
)
188 int msi_group_number
, msi_data_count
;
189 u32 msi_data_start
, msi_irq_start
, addr_lo
, addr_hi
;
192 ret
= ath11k_get_user_msi_vector(ab
, "DP",
193 &msi_data_count
, &msi_data_start
,
198 msi_group_number
= ath11k_dp_srng_calculate_msi_group(ab
, type
,
200 if (msi_group_number
< 0) {
201 ath11k_dbg(ab
, ATH11K_DBG_PCI
,
202 "ring not part of an ext_group; ring_type: %d,ring_num %d",
204 ring_params
->msi_addr
= 0;
205 ring_params
->msi_data
= 0;
209 if (msi_group_number
> msi_data_count
) {
210 ath11k_dbg(ab
, ATH11K_DBG_PCI
,
211 "multiple msi_groups share one msi, msi_group_num %d",
215 ath11k_get_msi_address(ab
, &addr_lo
, &addr_hi
);
217 ring_params
->msi_addr
= addr_lo
;
218 ring_params
->msi_addr
|= (dma_addr_t
)(((uint64_t)addr_hi
) << 32);
219 ring_params
->msi_data
= (msi_group_number
% msi_data_count
)
221 ring_params
->flags
|= HAL_SRNG_FLAGS_MSI_INTR
;
224 int ath11k_dp_srng_setup(struct ath11k_base
*ab
, struct dp_srng
*ring
,
225 enum hal_ring_type type
, int ring_num
,
226 int mac_id
, int num_entries
)
228 struct hal_srng_params params
= { 0 };
229 int entry_sz
= ath11k_hal_srng_get_entrysize(ab
, type
);
230 int max_entries
= ath11k_hal_srng_get_max_entries(ab
, type
);
234 if (max_entries
< 0 || entry_sz
< 0)
237 if (num_entries
> max_entries
)
238 num_entries
= max_entries
;
240 ring
->size
= (num_entries
* entry_sz
) + HAL_RING_BASE_ALIGN
- 1;
242 if (ab
->hw_params
.alloc_cacheable_memory
) {
243 /* Allocate the reo dst and tx completion rings from cacheable memory */
246 case HAL_WBM2SW_RELEASE
:
254 ring
->vaddr_unaligned
= kzalloc(ring
->size
, GFP_KERNEL
);
255 if (!ring
->vaddr_unaligned
)
258 ring
->paddr_unaligned
= dma_map_single(ab
->dev
,
259 ring
->vaddr_unaligned
,
262 if (dma_mapping_error(ab
->dev
, ring
->paddr_unaligned
)) {
263 kfree(ring
->vaddr_unaligned
);
264 ring
->vaddr_unaligned
= NULL
;
271 ring
->vaddr_unaligned
= dma_alloc_coherent(ab
->dev
, ring
->size
,
272 &ring
->paddr_unaligned
,
275 if (!ring
->vaddr_unaligned
)
278 ring
->vaddr
= PTR_ALIGN(ring
->vaddr_unaligned
, HAL_RING_BASE_ALIGN
);
279 ring
->paddr
= ring
->paddr_unaligned
+ ((unsigned long)ring
->vaddr
-
280 (unsigned long)ring
->vaddr_unaligned
);
282 params
.ring_base_vaddr
= ring
->vaddr
;
283 params
.ring_base_paddr
= ring
->paddr
;
284 params
.num_entries
= num_entries
;
285 ath11k_dp_srng_msi_setup(ab
, ¶ms
, type
, ring_num
+ mac_id
);
289 params
.intr_batch_cntr_thres_entries
=
290 HAL_SRNG_INT_BATCH_THRESHOLD_RX
;
291 params
.intr_timer_thres_us
= HAL_SRNG_INT_TIMER_THRESHOLD_RX
;
294 case HAL_RXDMA_MONITOR_BUF
:
295 case HAL_RXDMA_MONITOR_STATUS
:
296 params
.low_threshold
= num_entries
>> 3;
297 params
.flags
|= HAL_SRNG_FLAGS_LOW_THRESH_INTR_EN
;
298 params
.intr_batch_cntr_thres_entries
= 0;
299 params
.intr_timer_thres_us
= HAL_SRNG_INT_TIMER_THRESHOLD_RX
;
301 case HAL_WBM2SW_RELEASE
:
303 params
.intr_batch_cntr_thres_entries
=
304 HAL_SRNG_INT_BATCH_THRESHOLD_TX
;
305 params
.intr_timer_thres_us
=
306 HAL_SRNG_INT_TIMER_THRESHOLD_TX
;
309 /* follow through when ring_num >= 3 */
311 case HAL_REO_EXCEPTION
:
312 case HAL_REO_REINJECT
:
318 case HAL_WBM_IDLE_LINK
:
319 case HAL_SW2WBM_RELEASE
:
321 case HAL_RXDMA_MONITOR_DST
:
322 case HAL_RXDMA_MONITOR_DESC
:
323 params
.intr_batch_cntr_thres_entries
=
324 HAL_SRNG_INT_BATCH_THRESHOLD_OTHER
;
325 params
.intr_timer_thres_us
= HAL_SRNG_INT_TIMER_THRESHOLD_OTHER
;
327 case HAL_RXDMA_DIR_BUF
:
330 ath11k_warn(ab
, "Not a valid ring type in dp :%d\n", type
);
335 params
.flags
|= HAL_SRNG_FLAGS_CACHED
;
339 ret
= ath11k_hal_srng_setup(ab
, type
, ring_num
, mac_id
, ¶ms
);
341 ath11k_warn(ab
, "failed to setup srng: %d ring_id %d\n",
351 void ath11k_dp_stop_shadow_timers(struct ath11k_base
*ab
)
355 if (!ab
->hw_params
.supports_shadow_regs
)
358 for (i
= 0; i
< ab
->hw_params
.max_tx_ring
; i
++)
359 ath11k_dp_shadow_stop_timer(ab
, &ab
->dp
.tx_ring_timer
[i
]);
361 ath11k_dp_shadow_stop_timer(ab
, &ab
->dp
.reo_cmd_timer
);
364 static void ath11k_dp_srng_common_cleanup(struct ath11k_base
*ab
)
366 struct ath11k_dp
*dp
= &ab
->dp
;
369 ath11k_dp_stop_shadow_timers(ab
);
370 ath11k_dp_srng_cleanup(ab
, &dp
->wbm_desc_rel_ring
);
371 ath11k_dp_srng_cleanup(ab
, &dp
->tcl_cmd_ring
);
372 ath11k_dp_srng_cleanup(ab
, &dp
->tcl_status_ring
);
373 for (i
= 0; i
< ab
->hw_params
.max_tx_ring
; i
++) {
374 ath11k_dp_srng_cleanup(ab
, &dp
->tx_ring
[i
].tcl_data_ring
);
375 ath11k_dp_srng_cleanup(ab
, &dp
->tx_ring
[i
].tcl_comp_ring
);
377 ath11k_dp_srng_cleanup(ab
, &dp
->reo_reinject_ring
);
378 ath11k_dp_srng_cleanup(ab
, &dp
->rx_rel_ring
);
379 ath11k_dp_srng_cleanup(ab
, &dp
->reo_except_ring
);
380 ath11k_dp_srng_cleanup(ab
, &dp
->reo_cmd_ring
);
381 ath11k_dp_srng_cleanup(ab
, &dp
->reo_status_ring
);
384 static int ath11k_dp_srng_common_setup(struct ath11k_base
*ab
)
386 struct ath11k_dp
*dp
= &ab
->dp
;
387 struct hal_srng
*srng
;
391 ret
= ath11k_dp_srng_setup(ab
, &dp
->wbm_desc_rel_ring
,
392 HAL_SW2WBM_RELEASE
, 0, 0,
393 DP_WBM_RELEASE_RING_SIZE
);
395 ath11k_warn(ab
, "failed to set up wbm2sw_release ring :%d\n",
400 ret
= ath11k_dp_srng_setup(ab
, &dp
->tcl_cmd_ring
, HAL_TCL_CMD
, 0, 0,
401 DP_TCL_CMD_RING_SIZE
);
403 ath11k_warn(ab
, "failed to set up tcl_cmd ring :%d\n", ret
);
407 ret
= ath11k_dp_srng_setup(ab
, &dp
->tcl_status_ring
, HAL_TCL_STATUS
,
408 0, 0, DP_TCL_STATUS_RING_SIZE
);
410 ath11k_warn(ab
, "failed to set up tcl_status ring :%d\n", ret
);
414 for (i
= 0; i
< ab
->hw_params
.max_tx_ring
; i
++) {
415 tcl_num
= ab
->hw_params
.hal_params
->tcl2wbm_rbm_map
[i
].tcl_ring_num
;
416 wbm_num
= ab
->hw_params
.hal_params
->tcl2wbm_rbm_map
[i
].wbm_ring_num
;
418 ret
= ath11k_dp_srng_setup(ab
, &dp
->tx_ring
[i
].tcl_data_ring
,
419 HAL_TCL_DATA
, tcl_num
, 0,
420 ab
->hw_params
.tx_ring_size
);
422 ath11k_warn(ab
, "failed to set up tcl_data ring (%d) :%d\n",
427 ret
= ath11k_dp_srng_setup(ab
, &dp
->tx_ring
[i
].tcl_comp_ring
,
428 HAL_WBM2SW_RELEASE
, wbm_num
, 0,
429 DP_TX_COMP_RING_SIZE
);
431 ath11k_warn(ab
, "failed to set up tcl_comp ring (%d) :%d\n",
436 srng
= &ab
->hal
.srng_list
[dp
->tx_ring
[i
].tcl_data_ring
.ring_id
];
437 ath11k_hal_tx_init_data_ring(ab
, srng
);
439 ath11k_dp_shadow_init_timer(ab
, &dp
->tx_ring_timer
[i
],
440 ATH11K_SHADOW_DP_TIMER_INTERVAL
,
441 dp
->tx_ring
[i
].tcl_data_ring
.ring_id
);
444 ret
= ath11k_dp_srng_setup(ab
, &dp
->reo_reinject_ring
, HAL_REO_REINJECT
,
445 0, 0, DP_REO_REINJECT_RING_SIZE
);
447 ath11k_warn(ab
, "failed to set up reo_reinject ring :%d\n",
452 ret
= ath11k_dp_srng_setup(ab
, &dp
->rx_rel_ring
, HAL_WBM2SW_RELEASE
,
453 DP_RX_RELEASE_RING_NUM
, 0, DP_RX_RELEASE_RING_SIZE
);
455 ath11k_warn(ab
, "failed to set up rx_rel ring :%d\n", ret
);
459 ret
= ath11k_dp_srng_setup(ab
, &dp
->reo_except_ring
, HAL_REO_EXCEPTION
,
460 0, 0, DP_REO_EXCEPTION_RING_SIZE
);
462 ath11k_warn(ab
, "failed to set up reo_exception ring :%d\n",
467 ret
= ath11k_dp_srng_setup(ab
, &dp
->reo_cmd_ring
, HAL_REO_CMD
,
468 0, 0, DP_REO_CMD_RING_SIZE
);
470 ath11k_warn(ab
, "failed to set up reo_cmd ring :%d\n", ret
);
474 srng
= &ab
->hal
.srng_list
[dp
->reo_cmd_ring
.ring_id
];
475 ath11k_hal_reo_init_cmd_ring(ab
, srng
);
477 ath11k_dp_shadow_init_timer(ab
, &dp
->reo_cmd_timer
,
478 ATH11K_SHADOW_CTRL_TIMER_INTERVAL
,
479 dp
->reo_cmd_ring
.ring_id
);
481 ret
= ath11k_dp_srng_setup(ab
, &dp
->reo_status_ring
, HAL_REO_STATUS
,
482 0, 0, DP_REO_STATUS_RING_SIZE
);
484 ath11k_warn(ab
, "failed to set up reo_status ring :%d\n", ret
);
488 /* When hash based routing of rx packet is enabled, 32 entries to map
489 * the hash values to the ring will be configured.
491 ab
->hw_params
.hw_ops
->reo_setup(ab
);
496 ath11k_dp_srng_common_cleanup(ab
);
501 static void ath11k_dp_scatter_idle_link_desc_cleanup(struct ath11k_base
*ab
)
503 struct ath11k_dp
*dp
= &ab
->dp
;
504 struct hal_wbm_idle_scatter_list
*slist
= dp
->scatter_list
;
507 for (i
= 0; i
< DP_IDLE_SCATTER_BUFS_MAX
; i
++) {
511 dma_free_coherent(ab
->dev
, HAL_WBM_IDLE_SCATTER_BUF_SIZE_MAX
,
512 slist
[i
].vaddr
, slist
[i
].paddr
);
513 slist
[i
].vaddr
= NULL
;
517 static int ath11k_dp_scatter_idle_link_desc_setup(struct ath11k_base
*ab
,
519 u32 n_link_desc_bank
,
523 struct ath11k_dp
*dp
= &ab
->dp
;
524 struct dp_link_desc_bank
*link_desc_banks
= dp
->link_desc_banks
;
525 struct hal_wbm_idle_scatter_list
*slist
= dp
->scatter_list
;
526 u32 n_entries_per_buf
;
527 int num_scatter_buf
, scatter_idx
;
528 struct hal_wbm_link_desc
*scatter_buf
;
529 int align_bytes
, n_entries
;
536 n_entries_per_buf
= HAL_WBM_IDLE_SCATTER_BUF_SIZE
/
537 ath11k_hal_srng_get_entrysize(ab
, HAL_WBM_IDLE_LINK
);
538 num_scatter_buf
= DIV_ROUND_UP(size
, HAL_WBM_IDLE_SCATTER_BUF_SIZE
);
540 if (num_scatter_buf
> DP_IDLE_SCATTER_BUFS_MAX
)
543 for (i
= 0; i
< num_scatter_buf
; i
++) {
544 slist
[i
].vaddr
= dma_alloc_coherent(ab
->dev
,
545 HAL_WBM_IDLE_SCATTER_BUF_SIZE_MAX
,
546 &slist
[i
].paddr
, GFP_KERNEL
);
547 if (!slist
[i
].vaddr
) {
554 scatter_buf
= slist
[scatter_idx
].vaddr
;
555 rem_entries
= n_entries_per_buf
;
557 for (i
= 0; i
< n_link_desc_bank
; i
++) {
558 align_bytes
= link_desc_banks
[i
].vaddr
-
559 link_desc_banks
[i
].vaddr_unaligned
;
560 n_entries
= (DP_LINK_DESC_ALLOC_SIZE_THRESH
- align_bytes
) /
562 paddr
= link_desc_banks
[i
].paddr
;
564 ath11k_hal_set_link_desc_addr(scatter_buf
, i
, paddr
);
566 paddr
+= HAL_LINK_DESC_SIZE
;
573 rem_entries
= n_entries_per_buf
;
575 scatter_buf
= slist
[scatter_idx
].vaddr
;
579 end_offset
= (scatter_buf
- slist
[scatter_idx
].vaddr
) *
580 sizeof(struct hal_wbm_link_desc
);
581 ath11k_hal_setup_link_idle_list(ab
, slist
, num_scatter_buf
,
582 n_link_desc
, end_offset
);
587 ath11k_dp_scatter_idle_link_desc_cleanup(ab
);
593 ath11k_dp_link_desc_bank_free(struct ath11k_base
*ab
,
594 struct dp_link_desc_bank
*link_desc_banks
)
598 for (i
= 0; i
< DP_LINK_DESC_BANKS_MAX
; i
++) {
599 if (link_desc_banks
[i
].vaddr_unaligned
) {
600 dma_free_coherent(ab
->dev
,
601 link_desc_banks
[i
].size
,
602 link_desc_banks
[i
].vaddr_unaligned
,
603 link_desc_banks
[i
].paddr_unaligned
);
604 link_desc_banks
[i
].vaddr_unaligned
= NULL
;
609 static int ath11k_dp_link_desc_bank_alloc(struct ath11k_base
*ab
,
610 struct dp_link_desc_bank
*desc_bank
,
611 int n_link_desc_bank
,
614 struct ath11k_dp
*dp
= &ab
->dp
;
617 int desc_sz
= DP_LINK_DESC_ALLOC_SIZE_THRESH
;
619 for (i
= 0; i
< n_link_desc_bank
; i
++) {
620 if (i
== (n_link_desc_bank
- 1) && last_bank_sz
)
621 desc_sz
= last_bank_sz
;
623 desc_bank
[i
].vaddr_unaligned
=
624 dma_alloc_coherent(ab
->dev
, desc_sz
,
625 &desc_bank
[i
].paddr_unaligned
,
627 if (!desc_bank
[i
].vaddr_unaligned
) {
632 desc_bank
[i
].vaddr
= PTR_ALIGN(desc_bank
[i
].vaddr_unaligned
,
633 HAL_LINK_DESC_ALIGN
);
634 desc_bank
[i
].paddr
= desc_bank
[i
].paddr_unaligned
+
635 ((unsigned long)desc_bank
[i
].vaddr
-
636 (unsigned long)desc_bank
[i
].vaddr_unaligned
);
637 desc_bank
[i
].size
= desc_sz
;
643 ath11k_dp_link_desc_bank_free(ab
, dp
->link_desc_banks
);
648 void ath11k_dp_link_desc_cleanup(struct ath11k_base
*ab
,
649 struct dp_link_desc_bank
*desc_bank
,
650 u32 ring_type
, struct dp_srng
*ring
)
652 ath11k_dp_link_desc_bank_free(ab
, desc_bank
);
654 if (ring_type
!= HAL_RXDMA_MONITOR_DESC
) {
655 ath11k_dp_srng_cleanup(ab
, ring
);
656 ath11k_dp_scatter_idle_link_desc_cleanup(ab
);
660 static int ath11k_wbm_idle_ring_setup(struct ath11k_base
*ab
, u32
*n_link_desc
)
662 struct ath11k_dp
*dp
= &ab
->dp
;
663 u32 n_mpdu_link_desc
, n_mpdu_queue_desc
;
664 u32 n_tx_msdu_link_desc
, n_rx_msdu_link_desc
;
667 n_mpdu_link_desc
= (DP_NUM_TIDS_MAX
* DP_AVG_MPDUS_PER_TID_MAX
) /
668 HAL_NUM_MPDUS_PER_LINK_DESC
;
670 n_mpdu_queue_desc
= n_mpdu_link_desc
/
671 HAL_NUM_MPDU_LINKS_PER_QUEUE_DESC
;
673 n_tx_msdu_link_desc
= (DP_NUM_TIDS_MAX
* DP_AVG_FLOWS_PER_TID
*
674 DP_AVG_MSDUS_PER_FLOW
) /
675 HAL_NUM_TX_MSDUS_PER_LINK_DESC
;
677 n_rx_msdu_link_desc
= (DP_NUM_TIDS_MAX
* DP_AVG_MPDUS_PER_TID_MAX
*
678 DP_AVG_MSDUS_PER_MPDU
) /
679 HAL_NUM_RX_MSDUS_PER_LINK_DESC
;
681 *n_link_desc
= n_mpdu_link_desc
+ n_mpdu_queue_desc
+
682 n_tx_msdu_link_desc
+ n_rx_msdu_link_desc
;
684 if (*n_link_desc
& (*n_link_desc
- 1))
685 *n_link_desc
= 1 << fls(*n_link_desc
);
687 ret
= ath11k_dp_srng_setup(ab
, &dp
->wbm_idle_ring
,
688 HAL_WBM_IDLE_LINK
, 0, 0, *n_link_desc
);
690 ath11k_warn(ab
, "failed to setup wbm_idle_ring: %d\n", ret
);
696 int ath11k_dp_link_desc_setup(struct ath11k_base
*ab
,
697 struct dp_link_desc_bank
*link_desc_banks
,
698 u32 ring_type
, struct hal_srng
*srng
,
702 u32 n_link_desc_bank
, last_bank_sz
;
703 u32 entry_sz
, align_bytes
, n_entries
;
708 tot_mem_sz
= n_link_desc
* HAL_LINK_DESC_SIZE
;
709 tot_mem_sz
+= HAL_LINK_DESC_ALIGN
;
711 if (tot_mem_sz
<= DP_LINK_DESC_ALLOC_SIZE_THRESH
) {
712 n_link_desc_bank
= 1;
713 last_bank_sz
= tot_mem_sz
;
715 n_link_desc_bank
= tot_mem_sz
/
716 (DP_LINK_DESC_ALLOC_SIZE_THRESH
-
717 HAL_LINK_DESC_ALIGN
);
718 last_bank_sz
= tot_mem_sz
%
719 (DP_LINK_DESC_ALLOC_SIZE_THRESH
-
720 HAL_LINK_DESC_ALIGN
);
723 n_link_desc_bank
+= 1;
726 if (n_link_desc_bank
> DP_LINK_DESC_BANKS_MAX
)
729 ret
= ath11k_dp_link_desc_bank_alloc(ab
, link_desc_banks
,
730 n_link_desc_bank
, last_bank_sz
);
734 /* Setup link desc idle list for HW internal usage */
735 entry_sz
= ath11k_hal_srng_get_entrysize(ab
, ring_type
);
736 tot_mem_sz
= entry_sz
* n_link_desc
;
738 /* Setup scatter desc list when the total memory requirement is more */
739 if (tot_mem_sz
> DP_LINK_DESC_ALLOC_SIZE_THRESH
&&
740 ring_type
!= HAL_RXDMA_MONITOR_DESC
) {
741 ret
= ath11k_dp_scatter_idle_link_desc_setup(ab
, tot_mem_sz
,
746 ath11k_warn(ab
, "failed to setup scatting idle list descriptor :%d\n",
748 goto fail_desc_bank_free
;
754 spin_lock_bh(&srng
->lock
);
756 ath11k_hal_srng_access_begin(ab
, srng
);
758 for (i
= 0; i
< n_link_desc_bank
; i
++) {
759 align_bytes
= link_desc_banks
[i
].vaddr
-
760 link_desc_banks
[i
].vaddr_unaligned
;
761 n_entries
= (link_desc_banks
[i
].size
- align_bytes
) /
763 paddr
= link_desc_banks
[i
].paddr
;
765 (desc
= ath11k_hal_srng_src_get_next_entry(ab
, srng
))) {
766 ath11k_hal_set_link_desc_addr((struct hal_wbm_link_desc
*)desc
,
769 paddr
+= HAL_LINK_DESC_SIZE
;
773 ath11k_hal_srng_access_end(ab
, srng
);
775 spin_unlock_bh(&srng
->lock
);
780 ath11k_dp_link_desc_bank_free(ab
, link_desc_banks
);
785 int ath11k_dp_service_srng(struct ath11k_base
*ab
,
786 struct ath11k_ext_irq_grp
*irq_grp
,
789 struct napi_struct
*napi
= &irq_grp
->napi
;
790 const struct ath11k_hw_hal_params
*hal_params
;
791 int grp_id
= irq_grp
->grp_id
;
794 int tot_work_done
= 0;
796 for (i
= 0; i
< ab
->hw_params
.max_tx_ring
; i
++) {
797 if (BIT(ab
->hw_params
.hal_params
->tcl2wbm_rbm_map
[i
].wbm_ring_num
) &
798 ab
->hw_params
.ring_mask
->tx
[grp_id
])
799 ath11k_dp_tx_completion_handler(ab
, i
);
802 if (ab
->hw_params
.ring_mask
->rx_err
[grp_id
]) {
803 work_done
= ath11k_dp_process_rx_err(ab
, napi
, budget
);
805 tot_work_done
+= work_done
;
810 if (ab
->hw_params
.ring_mask
->rx_wbm_rel
[grp_id
]) {
811 work_done
= ath11k_dp_rx_process_wbm_err(ab
,
815 tot_work_done
+= work_done
;
821 if (ab
->hw_params
.ring_mask
->rx
[grp_id
]) {
822 i
= fls(ab
->hw_params
.ring_mask
->rx
[grp_id
]) - 1;
823 work_done
= ath11k_dp_process_rx(ab
, i
, napi
,
826 tot_work_done
+= work_done
;
831 if (ab
->hw_params
.ring_mask
->rx_mon_status
[grp_id
]) {
832 for (i
= 0; i
< ab
->num_radios
; i
++) {
833 for (j
= 0; j
< ab
->hw_params
.num_rxdma_per_pdev
; j
++) {
834 int id
= i
* ab
->hw_params
.num_rxdma_per_pdev
+ j
;
836 if (ab
->hw_params
.ring_mask
->rx_mon_status
[grp_id
] &
839 ath11k_dp_rx_process_mon_rings(ab
,
843 tot_work_done
+= work_done
;
852 if (ab
->hw_params
.ring_mask
->reo_status
[grp_id
])
853 ath11k_dp_process_reo_status(ab
);
855 for (i
= 0; i
< ab
->num_radios
; i
++) {
856 for (j
= 0; j
< ab
->hw_params
.num_rxdma_per_pdev
; j
++) {
857 int id
= i
* ab
->hw_params
.num_rxdma_per_pdev
+ j
;
859 if (ab
->hw_params
.ring_mask
->rxdma2host
[grp_id
] & BIT(id
)) {
860 work_done
= ath11k_dp_process_rxdma_err(ab
, id
, budget
);
862 tot_work_done
+= work_done
;
868 if (ab
->hw_params
.ring_mask
->host2rxdma
[grp_id
] & BIT(id
)) {
869 struct ath11k
*ar
= ath11k_ab_to_ar(ab
, id
);
870 struct ath11k_pdev_dp
*dp
= &ar
->dp
;
871 struct dp_rxdma_ring
*rx_ring
= &dp
->rx_refill_buf_ring
;
873 hal_params
= ab
->hw_params
.hal_params
;
874 ath11k_dp_rxbufs_replenish(ab
, id
, rx_ring
, 0,
875 hal_params
->rx_buf_rbm
);
879 /* TODO: Implement handler for other interrupts */
882 return tot_work_done
;
884 EXPORT_SYMBOL(ath11k_dp_service_srng
);
886 void ath11k_dp_pdev_free(struct ath11k_base
*ab
)
891 del_timer_sync(&ab
->mon_reap_timer
);
893 for (i
= 0; i
< ab
->num_radios
; i
++) {
894 ar
= ab
->pdevs
[i
].ar
;
895 ath11k_dp_rx_pdev_free(ab
, i
);
896 ath11k_debugfs_unregister(ar
);
897 ath11k_dp_rx_pdev_mon_detach(ar
);
901 void ath11k_dp_pdev_pre_alloc(struct ath11k_base
*ab
)
904 struct ath11k_pdev_dp
*dp
;
908 for (i
= 0; i
< ab
->num_radios
; i
++) {
909 ar
= ab
->pdevs
[i
].ar
;
912 idr_init(&dp
->rx_refill_buf_ring
.bufs_idr
);
913 spin_lock_init(&dp
->rx_refill_buf_ring
.idr_lock
);
914 atomic_set(&dp
->num_tx_pending
, 0);
915 init_waitqueue_head(&dp
->tx_empty_waitq
);
916 for (j
= 0; j
< ab
->hw_params
.num_rxdma_per_pdev
; j
++) {
917 idr_init(&dp
->rx_mon_status_refill_ring
[j
].bufs_idr
);
918 spin_lock_init(&dp
->rx_mon_status_refill_ring
[j
].idr_lock
);
920 idr_init(&dp
->rxdma_mon_buf_ring
.bufs_idr
);
921 spin_lock_init(&dp
->rxdma_mon_buf_ring
.idr_lock
);
925 int ath11k_dp_pdev_alloc(struct ath11k_base
*ab
)
931 /* TODO:Per-pdev rx ring unlike tx ring which is mapped to different AC's */
932 for (i
= 0; i
< ab
->num_radios
; i
++) {
933 ar
= ab
->pdevs
[i
].ar
;
934 ret
= ath11k_dp_rx_pdev_alloc(ab
, i
);
936 ath11k_warn(ab
, "failed to allocate pdev rx for pdev_id :%d\n",
940 ret
= ath11k_dp_rx_pdev_mon_attach(ar
);
942 ath11k_warn(ab
, "failed to initialize mon pdev %d\n",
951 ath11k_dp_pdev_free(ab
);
956 int ath11k_dp_htt_connect(struct ath11k_dp
*dp
)
958 struct ath11k_htc_svc_conn_req conn_req
;
959 struct ath11k_htc_svc_conn_resp conn_resp
;
962 memset(&conn_req
, 0, sizeof(conn_req
));
963 memset(&conn_resp
, 0, sizeof(conn_resp
));
965 conn_req
.ep_ops
.ep_tx_complete
= ath11k_dp_htt_htc_tx_complete
;
966 conn_req
.ep_ops
.ep_rx_complete
= ath11k_dp_htt_htc_t2h_msg_handler
;
968 /* connect to control service */
969 conn_req
.service_id
= ATH11K_HTC_SVC_ID_HTT_DATA_MSG
;
971 status
= ath11k_htc_connect_service(&dp
->ab
->htc
, &conn_req
,
977 dp
->eid
= conn_resp
.eid
;
982 static void ath11k_dp_update_vdev_search(struct ath11k_vif
*arvif
)
984 /* When v2_map_support is true:for STA mode, enable address
985 * search index, tcl uses ast_hash value in the descriptor.
986 * When v2_map_support is false: for STA mode, don't enable
987 * address search index.
989 switch (arvif
->vdev_type
) {
990 case WMI_VDEV_TYPE_STA
:
991 if (arvif
->ar
->ab
->hw_params
.htt_peer_map_v2
) {
992 arvif
->hal_addr_search_flags
= HAL_TX_ADDRX_EN
;
993 arvif
->search_type
= HAL_TX_ADDR_SEARCH_INDEX
;
995 arvif
->hal_addr_search_flags
= HAL_TX_ADDRY_EN
;
996 arvif
->search_type
= HAL_TX_ADDR_SEARCH_DEFAULT
;
999 case WMI_VDEV_TYPE_AP
:
1000 case WMI_VDEV_TYPE_IBSS
:
1001 arvif
->hal_addr_search_flags
= HAL_TX_ADDRX_EN
;
1002 arvif
->search_type
= HAL_TX_ADDR_SEARCH_DEFAULT
;
1004 case WMI_VDEV_TYPE_MONITOR
:
1010 void ath11k_dp_vdev_tx_attach(struct ath11k
*ar
, struct ath11k_vif
*arvif
)
1012 arvif
->tcl_metadata
|= FIELD_PREP(HTT_TCL_META_DATA_TYPE
, 1) |
1013 FIELD_PREP(HTT_TCL_META_DATA_VDEV_ID
,
1015 FIELD_PREP(HTT_TCL_META_DATA_PDEV_ID
,
1018 /* set HTT extension valid bit to 0 by default */
1019 arvif
->tcl_metadata
&= ~HTT_TCL_META_DATA_VALID_HTT
;
1021 ath11k_dp_update_vdev_search(arvif
);
1024 static int ath11k_dp_tx_pending_cleanup(int buf_id
, void *skb
, void *ctx
)
1026 struct ath11k_base
*ab
= ctx
;
1027 struct sk_buff
*msdu
= skb
;
1029 dma_unmap_single(ab
->dev
, ATH11K_SKB_CB(msdu
)->paddr
, msdu
->len
,
1032 dev_kfree_skb_any(msdu
);
1037 void ath11k_dp_free(struct ath11k_base
*ab
)
1039 struct ath11k_dp
*dp
= &ab
->dp
;
1042 ath11k_dp_link_desc_cleanup(ab
, dp
->link_desc_banks
,
1043 HAL_WBM_IDLE_LINK
, &dp
->wbm_idle_ring
);
1045 ath11k_dp_srng_common_cleanup(ab
);
1047 ath11k_dp_reo_cmd_list_cleanup(ab
);
1049 for (i
= 0; i
< ab
->hw_params
.max_tx_ring
; i
++) {
1050 spin_lock_bh(&dp
->tx_ring
[i
].tx_idr_lock
);
1051 idr_for_each(&dp
->tx_ring
[i
].txbuf_idr
,
1052 ath11k_dp_tx_pending_cleanup
, ab
);
1053 idr_destroy(&dp
->tx_ring
[i
].txbuf_idr
);
1054 spin_unlock_bh(&dp
->tx_ring
[i
].tx_idr_lock
);
1055 kfree(dp
->tx_ring
[i
].tx_status
);
1058 /* Deinit any SOC level resource */
1061 int ath11k_dp_alloc(struct ath11k_base
*ab
)
1063 struct ath11k_dp
*dp
= &ab
->dp
;
1064 struct hal_srng
*srng
= NULL
;
1066 u32 n_link_desc
= 0;
1072 INIT_LIST_HEAD(&dp
->reo_cmd_list
);
1073 INIT_LIST_HEAD(&dp
->reo_cmd_cache_flush_list
);
1074 INIT_LIST_HEAD(&dp
->dp_full_mon_mpdu_list
);
1075 spin_lock_init(&dp
->reo_cmd_lock
);
1077 dp
->reo_cmd_cache_flush_count
= 0;
1079 ret
= ath11k_wbm_idle_ring_setup(ab
, &n_link_desc
);
1081 ath11k_warn(ab
, "failed to setup wbm_idle_ring: %d\n", ret
);
1085 srng
= &ab
->hal
.srng_list
[dp
->wbm_idle_ring
.ring_id
];
1087 ret
= ath11k_dp_link_desc_setup(ab
, dp
->link_desc_banks
,
1088 HAL_WBM_IDLE_LINK
, srng
, n_link_desc
);
1090 ath11k_warn(ab
, "failed to setup link desc: %d\n", ret
);
1094 ret
= ath11k_dp_srng_common_setup(ab
);
1096 goto fail_link_desc_cleanup
;
1098 size
= sizeof(struct hal_wbm_release_ring
) * DP_TX_COMP_RING_SIZE
;
1100 for (i
= 0; i
< ab
->hw_params
.max_tx_ring
; i
++) {
1101 idr_init(&dp
->tx_ring
[i
].txbuf_idr
);
1102 spin_lock_init(&dp
->tx_ring
[i
].tx_idr_lock
);
1103 dp
->tx_ring
[i
].tcl_data_ring_id
= i
;
1105 dp
->tx_ring
[i
].tx_status_head
= 0;
1106 dp
->tx_ring
[i
].tx_status_tail
= DP_TX_COMP_RING_SIZE
- 1;
1107 dp
->tx_ring
[i
].tx_status
= kmalloc(size
, GFP_KERNEL
);
1108 if (!dp
->tx_ring
[i
].tx_status
) {
1110 goto fail_cmn_srng_cleanup
;
1114 for (i
= 0; i
< HAL_DSCP_TID_MAP_TBL_NUM_ENTRIES_MAX
; i
++)
1115 ath11k_hal_tx_set_dscp_tid_map(ab
, i
);
1117 /* Init any SOC level resource for DP */
1121 fail_cmn_srng_cleanup
:
1122 ath11k_dp_srng_common_cleanup(ab
);
1124 fail_link_desc_cleanup
:
1125 ath11k_dp_link_desc_cleanup(ab
, dp
->link_desc_banks
,
1126 HAL_WBM_IDLE_LINK
, &dp
->wbm_idle_ring
);
1131 static void ath11k_dp_shadow_timer_handler(struct timer_list
*t
)
1133 struct ath11k_hp_update_timer
*update_timer
= from_timer(update_timer
,
1135 struct ath11k_base
*ab
= update_timer
->ab
;
1136 struct hal_srng
*srng
= &ab
->hal
.srng_list
[update_timer
->ring_id
];
1138 spin_lock_bh(&srng
->lock
);
1140 /* when the timer is fired, the handler checks whether there
1141 * are new TX happened. The handler updates HP only when there
1142 * are no TX operations during the timeout interval, and stop
1143 * the timer. Timer will be started again when TX happens again.
1145 if (update_timer
->timer_tx_num
!= update_timer
->tx_num
) {
1146 update_timer
->timer_tx_num
= update_timer
->tx_num
;
1147 mod_timer(&update_timer
->timer
, jiffies
+
1148 msecs_to_jiffies(update_timer
->interval
));
1150 update_timer
->started
= false;
1151 ath11k_hal_srng_shadow_update_hp_tp(ab
, srng
);
1154 spin_unlock_bh(&srng
->lock
);
1157 void ath11k_dp_shadow_start_timer(struct ath11k_base
*ab
,
1158 struct hal_srng
*srng
,
1159 struct ath11k_hp_update_timer
*update_timer
)
1161 lockdep_assert_held(&srng
->lock
);
1163 if (!ab
->hw_params
.supports_shadow_regs
)
1166 update_timer
->tx_num
++;
1168 if (update_timer
->started
)
1171 update_timer
->started
= true;
1172 update_timer
->timer_tx_num
= update_timer
->tx_num
;
1173 mod_timer(&update_timer
->timer
, jiffies
+
1174 msecs_to_jiffies(update_timer
->interval
));
1177 void ath11k_dp_shadow_stop_timer(struct ath11k_base
*ab
,
1178 struct ath11k_hp_update_timer
*update_timer
)
1180 if (!ab
->hw_params
.supports_shadow_regs
)
1183 if (!update_timer
->init
)
1186 del_timer_sync(&update_timer
->timer
);
1189 void ath11k_dp_shadow_init_timer(struct ath11k_base
*ab
,
1190 struct ath11k_hp_update_timer
*update_timer
,
1191 u32 interval
, u32 ring_id
)
1193 if (!ab
->hw_params
.supports_shadow_regs
)
1196 update_timer
->tx_num
= 0;
1197 update_timer
->timer_tx_num
= 0;
1198 update_timer
->ab
= ab
;
1199 update_timer
->ring_id
= ring_id
;
1200 update_timer
->interval
= interval
;
1201 update_timer
->init
= true;
1202 timer_setup(&update_timer
->timer
,
1203 ath11k_dp_shadow_timer_handler
, 0);