1 // SPDX-License-Identifier: BSD-3-Clause-Clear
3 * Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
4 * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
7 #include <crypto/hash.h>
17 enum ath12k_dp_desc_type
{
22 static void ath12k_dp_htt_htc_tx_complete(struct ath12k_base
*ab
,
25 dev_kfree_skb_any(skb
);
28 void ath12k_dp_peer_cleanup(struct ath12k
*ar
, int vdev_id
, const u8
*addr
)
30 struct ath12k_base
*ab
= ar
->ab
;
31 struct ath12k_peer
*peer
;
33 /* TODO: Any other peer specific DP cleanup */
35 spin_lock_bh(&ab
->base_lock
);
36 peer
= ath12k_peer_find(ab
, vdev_id
, addr
);
38 ath12k_warn(ab
, "failed to lookup peer %pM on vdev %d\n",
40 spin_unlock_bh(&ab
->base_lock
);
44 ath12k_dp_rx_peer_tid_cleanup(ar
, peer
);
45 crypto_free_shash(peer
->tfm_mmic
);
46 peer
->dp_setup_done
= false;
47 spin_unlock_bh(&ab
->base_lock
);
50 int ath12k_dp_peer_setup(struct ath12k
*ar
, int vdev_id
, const u8
*addr
)
52 struct ath12k_base
*ab
= ar
->ab
;
53 struct ath12k_peer
*peer
;
57 /* NOTE: reo_dest ring id starts from 1 unlike mac_id which starts from 0 */
58 reo_dest
= ar
->dp
.mac_id
+ 1;
59 ret
= ath12k_wmi_set_peer_param(ar
, addr
, vdev_id
,
60 WMI_PEER_SET_DEFAULT_ROUTING
,
61 DP_RX_HASH_ENABLE
| (reo_dest
<< 1));
64 ath12k_warn(ab
, "failed to set default routing %d peer :%pM vdev_id :%d\n",
69 for (tid
= 0; tid
<= IEEE80211_NUM_TIDS
; tid
++) {
70 ret
= ath12k_dp_rx_peer_tid_setup(ar
, addr
, vdev_id
, tid
, 1, 0,
73 ath12k_warn(ab
, "failed to setup rxd tid queue for tid %d: %d\n",
79 ret
= ath12k_dp_rx_peer_frag_setup(ar
, addr
, vdev_id
);
81 ath12k_warn(ab
, "failed to setup rx defrag context\n");
85 /* TODO: Setup other peer specific resource used in data path */
90 spin_lock_bh(&ab
->base_lock
);
92 peer
= ath12k_peer_find(ab
, vdev_id
, addr
);
94 ath12k_warn(ab
, "failed to find the peer to del rx tid\n");
95 spin_unlock_bh(&ab
->base_lock
);
99 for (; tid
>= 0; tid
--)
100 ath12k_dp_rx_peer_tid_delete(ar
, peer
, tid
);
102 spin_unlock_bh(&ab
->base_lock
);
107 void ath12k_dp_srng_cleanup(struct ath12k_base
*ab
, struct dp_srng
*ring
)
109 if (!ring
->vaddr_unaligned
)
112 dma_free_coherent(ab
->dev
, ring
->size
, ring
->vaddr_unaligned
,
113 ring
->paddr_unaligned
);
115 ring
->vaddr_unaligned
= NULL
;
118 static int ath12k_dp_srng_find_ring_in_mask(int ring_num
, const u8
*grp_mask
)
121 u8 mask
= 1 << ring_num
;
123 for (ext_group_num
= 0; ext_group_num
< ATH12K_EXT_IRQ_GRP_NUM_MAX
;
125 if (mask
& grp_mask
[ext_group_num
])
126 return ext_group_num
;
132 static int ath12k_dp_srng_calculate_msi_group(struct ath12k_base
*ab
,
133 enum hal_ring_type type
, int ring_num
)
135 const struct ath12k_hal_tcl_to_wbm_rbm_map
*map
;
140 case HAL_WBM2SW_RELEASE
:
141 if (ring_num
== HAL_WBM2SW_REL_ERR_RING_NUM
) {
142 grp_mask
= &ab
->hw_params
->ring_mask
->rx_wbm_rel
[0];
145 map
= ab
->hw_params
->hal_ops
->tcl_to_wbm_rbm_map
;
146 for (i
= 0; i
< ab
->hw_params
->max_tx_ring
; i
++) {
147 if (ring_num
== map
[i
].wbm_ring_num
) {
153 grp_mask
= &ab
->hw_params
->ring_mask
->tx
[0];
156 case HAL_REO_EXCEPTION
:
157 grp_mask
= &ab
->hw_params
->ring_mask
->rx_err
[0];
160 grp_mask
= &ab
->hw_params
->ring_mask
->rx
[0];
163 grp_mask
= &ab
->hw_params
->ring_mask
->reo_status
[0];
165 case HAL_RXDMA_MONITOR_STATUS
:
166 case HAL_RXDMA_MONITOR_DST
:
167 grp_mask
= &ab
->hw_params
->ring_mask
->rx_mon_dest
[0];
169 case HAL_TX_MONITOR_DST
:
170 grp_mask
= &ab
->hw_params
->ring_mask
->tx_mon_dest
[0];
173 grp_mask
= &ab
->hw_params
->ring_mask
->host2rxdma
[0];
175 case HAL_RXDMA_MONITOR_BUF
:
179 case HAL_SW2WBM_RELEASE
:
180 case HAL_WBM_IDLE_LINK
:
182 case HAL_REO_REINJECT
:
185 case HAL_CE_DST_STATUS
:
190 return ath12k_dp_srng_find_ring_in_mask(ring_num
, grp_mask
);
193 static void ath12k_dp_srng_msi_setup(struct ath12k_base
*ab
,
194 struct hal_srng_params
*ring_params
,
195 enum hal_ring_type type
, int ring_num
)
197 int msi_group_number
, msi_data_count
;
198 u32 msi_data_start
, msi_irq_start
, addr_lo
, addr_hi
;
201 ret
= ath12k_hif_get_user_msi_vector(ab
, "DP",
202 &msi_data_count
, &msi_data_start
,
207 msi_group_number
= ath12k_dp_srng_calculate_msi_group(ab
, type
,
209 if (msi_group_number
< 0) {
210 ath12k_dbg(ab
, ATH12K_DBG_PCI
,
211 "ring not part of an ext_group; ring_type: %d,ring_num %d",
213 ring_params
->msi_addr
= 0;
214 ring_params
->msi_data
= 0;
218 if (msi_group_number
> msi_data_count
) {
219 ath12k_dbg(ab
, ATH12K_DBG_PCI
,
220 "multiple msi_groups share one msi, msi_group_num %d",
224 ath12k_hif_get_msi_address(ab
, &addr_lo
, &addr_hi
);
226 ring_params
->msi_addr
= addr_lo
;
227 ring_params
->msi_addr
|= (dma_addr_t
)(((uint64_t)addr_hi
) << 32);
228 ring_params
->msi_data
= (msi_group_number
% msi_data_count
)
230 ring_params
->flags
|= HAL_SRNG_FLAGS_MSI_INTR
;
233 int ath12k_dp_srng_setup(struct ath12k_base
*ab
, struct dp_srng
*ring
,
234 enum hal_ring_type type
, int ring_num
,
235 int mac_id
, int num_entries
)
237 struct hal_srng_params params
= { 0 };
238 int entry_sz
= ath12k_hal_srng_get_entrysize(ab
, type
);
239 int max_entries
= ath12k_hal_srng_get_max_entries(ab
, type
);
242 if (max_entries
< 0 || entry_sz
< 0)
245 if (num_entries
> max_entries
)
246 num_entries
= max_entries
;
248 ring
->size
= (num_entries
* entry_sz
) + HAL_RING_BASE_ALIGN
- 1;
249 ring
->vaddr_unaligned
= dma_alloc_coherent(ab
->dev
, ring
->size
,
250 &ring
->paddr_unaligned
,
252 if (!ring
->vaddr_unaligned
)
255 ring
->vaddr
= PTR_ALIGN(ring
->vaddr_unaligned
, HAL_RING_BASE_ALIGN
);
256 ring
->paddr
= ring
->paddr_unaligned
+ ((unsigned long)ring
->vaddr
-
257 (unsigned long)ring
->vaddr_unaligned
);
259 params
.ring_base_vaddr
= ring
->vaddr
;
260 params
.ring_base_paddr
= ring
->paddr
;
261 params
.num_entries
= num_entries
;
262 ath12k_dp_srng_msi_setup(ab
, ¶ms
, type
, ring_num
+ mac_id
);
266 params
.intr_batch_cntr_thres_entries
=
267 HAL_SRNG_INT_BATCH_THRESHOLD_RX
;
268 params
.intr_timer_thres_us
= HAL_SRNG_INT_TIMER_THRESHOLD_RX
;
271 case HAL_RXDMA_MONITOR_BUF
:
272 case HAL_RXDMA_MONITOR_STATUS
:
273 params
.low_threshold
= num_entries
>> 3;
274 params
.flags
|= HAL_SRNG_FLAGS_LOW_THRESH_INTR_EN
;
275 params
.intr_batch_cntr_thres_entries
= 0;
276 params
.intr_timer_thres_us
= HAL_SRNG_INT_TIMER_THRESHOLD_RX
;
278 case HAL_TX_MONITOR_DST
:
279 params
.low_threshold
= DP_TX_MONITOR_BUF_SIZE_MAX
>> 3;
280 params
.flags
|= HAL_SRNG_FLAGS_LOW_THRESH_INTR_EN
;
281 params
.intr_batch_cntr_thres_entries
= 0;
282 params
.intr_timer_thres_us
= HAL_SRNG_INT_TIMER_THRESHOLD_RX
;
284 case HAL_WBM2SW_RELEASE
:
285 if (ab
->hw_params
->hw_ops
->dp_srng_is_tx_comp_ring(ring_num
)) {
286 params
.intr_batch_cntr_thres_entries
=
287 HAL_SRNG_INT_BATCH_THRESHOLD_TX
;
288 params
.intr_timer_thres_us
=
289 HAL_SRNG_INT_TIMER_THRESHOLD_TX
;
292 /* follow through when ring_num != HAL_WBM2SW_REL_ERR_RING_NUM */
294 case HAL_REO_EXCEPTION
:
295 case HAL_REO_REINJECT
:
301 case HAL_WBM_IDLE_LINK
:
302 case HAL_SW2WBM_RELEASE
:
304 case HAL_RXDMA_MONITOR_DST
:
305 case HAL_RXDMA_MONITOR_DESC
:
306 params
.intr_batch_cntr_thres_entries
=
307 HAL_SRNG_INT_BATCH_THRESHOLD_OTHER
;
308 params
.intr_timer_thres_us
= HAL_SRNG_INT_TIMER_THRESHOLD_OTHER
;
310 case HAL_RXDMA_DIR_BUF
:
313 ath12k_warn(ab
, "Not a valid ring type in dp :%d\n", type
);
317 ret
= ath12k_hal_srng_setup(ab
, type
, ring_num
, mac_id
, ¶ms
);
319 ath12k_warn(ab
, "failed to setup srng: %d ring_id %d\n",
330 u32
ath12k_dp_tx_get_vdev_bank_config(struct ath12k_base
*ab
,
331 struct ath12k_link_vif
*arvif
)
334 struct ath12k_vif
*ahvif
= arvif
->ahvif
;
336 /* Only valid for raw frames with HW crypto enabled.
337 * With SW crypto, mac80211 sets key per packet
339 if (ahvif
->tx_encap_type
== HAL_TCL_ENCAP_TYPE_RAW
&&
340 test_bit(ATH12K_FLAG_HW_CRYPTO_DISABLED
, &ab
->dev_flags
))
342 u32_encode_bits(ath12k_dp_tx_get_encrypt_type(ahvif
->key_cipher
),
343 HAL_TX_BANK_CONFIG_ENCRYPT_TYPE
);
345 bank_config
|= u32_encode_bits(ahvif
->tx_encap_type
,
346 HAL_TX_BANK_CONFIG_ENCAP_TYPE
);
347 bank_config
|= u32_encode_bits(0, HAL_TX_BANK_CONFIG_SRC_BUFFER_SWAP
) |
348 u32_encode_bits(0, HAL_TX_BANK_CONFIG_LINK_META_SWAP
) |
349 u32_encode_bits(0, HAL_TX_BANK_CONFIG_EPD
);
351 /* only valid if idx_lookup_override is not set in tcl_data_cmd */
352 bank_config
|= u32_encode_bits(0, HAL_TX_BANK_CONFIG_INDEX_LOOKUP_EN
);
354 bank_config
|= u32_encode_bits(arvif
->hal_addr_search_flags
& HAL_TX_ADDRX_EN
,
355 HAL_TX_BANK_CONFIG_ADDRX_EN
) |
356 u32_encode_bits(!!(arvif
->hal_addr_search_flags
&
358 HAL_TX_BANK_CONFIG_ADDRY_EN
);
360 bank_config
|= u32_encode_bits(ieee80211_vif_is_mesh(ahvif
->vif
) ? 3 : 0,
361 HAL_TX_BANK_CONFIG_MESH_EN
) |
362 u32_encode_bits(arvif
->vdev_id_check_en
,
363 HAL_TX_BANK_CONFIG_VDEV_ID_CHECK_EN
);
365 bank_config
|= u32_encode_bits(0, HAL_TX_BANK_CONFIG_DSCP_TIP_MAP_ID
);
370 static int ath12k_dp_tx_get_bank_profile(struct ath12k_base
*ab
,
371 struct ath12k_link_vif
*arvif
,
372 struct ath12k_dp
*dp
)
374 int bank_id
= DP_INVALID_BANK_ID
;
377 bool configure_register
= false;
379 /* convert vdev params into hal_tx_bank_config */
380 bank_config
= ath12k_dp_tx_get_vdev_bank_config(ab
, arvif
);
382 spin_lock_bh(&dp
->tx_bank_lock
);
383 /* TODO: implement using idr kernel framework*/
384 for (i
= 0; i
< dp
->num_bank_profiles
; i
++) {
385 if (dp
->bank_profiles
[i
].is_configured
&&
386 (dp
->bank_profiles
[i
].bank_config
^ bank_config
) == 0) {
388 goto inc_ref_and_return
;
390 if (!dp
->bank_profiles
[i
].is_configured
||
391 !dp
->bank_profiles
[i
].num_users
) {
393 goto configure_and_return
;
397 if (bank_id
== DP_INVALID_BANK_ID
) {
398 spin_unlock_bh(&dp
->tx_bank_lock
);
399 ath12k_err(ab
, "unable to find TX bank!");
403 configure_and_return
:
404 dp
->bank_profiles
[bank_id
].is_configured
= true;
405 dp
->bank_profiles
[bank_id
].bank_config
= bank_config
;
406 configure_register
= true;
408 dp
->bank_profiles
[bank_id
].num_users
++;
409 spin_unlock_bh(&dp
->tx_bank_lock
);
411 if (configure_register
)
412 ath12k_hal_tx_configure_bank_register(ab
, bank_config
, bank_id
);
414 ath12k_dbg(ab
, ATH12K_DBG_DP_HTT
, "dp_htt tcl bank_id %d input 0x%x match 0x%x num_users %u",
415 bank_id
, bank_config
, dp
->bank_profiles
[bank_id
].bank_config
,
416 dp
->bank_profiles
[bank_id
].num_users
);
421 void ath12k_dp_tx_put_bank_profile(struct ath12k_dp
*dp
, u8 bank_id
)
423 spin_lock_bh(&dp
->tx_bank_lock
);
424 dp
->bank_profiles
[bank_id
].num_users
--;
425 spin_unlock_bh(&dp
->tx_bank_lock
);
428 static void ath12k_dp_deinit_bank_profiles(struct ath12k_base
*ab
)
430 struct ath12k_dp
*dp
= &ab
->dp
;
432 kfree(dp
->bank_profiles
);
433 dp
->bank_profiles
= NULL
;
436 static int ath12k_dp_init_bank_profiles(struct ath12k_base
*ab
)
438 struct ath12k_dp
*dp
= &ab
->dp
;
439 u32 num_tcl_banks
= ab
->hw_params
->num_tcl_banks
;
442 dp
->num_bank_profiles
= num_tcl_banks
;
443 dp
->bank_profiles
= kmalloc_array(num_tcl_banks
,
444 sizeof(struct ath12k_dp_tx_bank_profile
),
446 if (!dp
->bank_profiles
)
449 spin_lock_init(&dp
->tx_bank_lock
);
451 for (i
= 0; i
< num_tcl_banks
; i
++) {
452 dp
->bank_profiles
[i
].is_configured
= false;
453 dp
->bank_profiles
[i
].num_users
= 0;
459 static void ath12k_dp_srng_common_cleanup(struct ath12k_base
*ab
)
461 struct ath12k_dp
*dp
= &ab
->dp
;
464 ath12k_dp_srng_cleanup(ab
, &dp
->reo_status_ring
);
465 ath12k_dp_srng_cleanup(ab
, &dp
->reo_cmd_ring
);
466 ath12k_dp_srng_cleanup(ab
, &dp
->reo_except_ring
);
467 ath12k_dp_srng_cleanup(ab
, &dp
->rx_rel_ring
);
468 ath12k_dp_srng_cleanup(ab
, &dp
->reo_reinject_ring
);
469 for (i
= 0; i
< ab
->hw_params
->max_tx_ring
; i
++) {
470 ath12k_dp_srng_cleanup(ab
, &dp
->tx_ring
[i
].tcl_comp_ring
);
471 ath12k_dp_srng_cleanup(ab
, &dp
->tx_ring
[i
].tcl_data_ring
);
473 ath12k_dp_srng_cleanup(ab
, &dp
->wbm_desc_rel_ring
);
476 static int ath12k_dp_srng_common_setup(struct ath12k_base
*ab
)
478 struct ath12k_dp
*dp
= &ab
->dp
;
479 const struct ath12k_hal_tcl_to_wbm_rbm_map
*map
;
480 struct hal_srng
*srng
;
481 int i
, ret
, tx_comp_ring_num
;
484 ret
= ath12k_dp_srng_setup(ab
, &dp
->wbm_desc_rel_ring
,
485 HAL_SW2WBM_RELEASE
, 0, 0,
486 DP_WBM_RELEASE_RING_SIZE
);
488 ath12k_warn(ab
, "failed to set up wbm2sw_release ring :%d\n",
493 for (i
= 0; i
< ab
->hw_params
->max_tx_ring
; i
++) {
494 map
= ab
->hw_params
->hal_ops
->tcl_to_wbm_rbm_map
;
495 tx_comp_ring_num
= map
[i
].wbm_ring_num
;
497 ret
= ath12k_dp_srng_setup(ab
, &dp
->tx_ring
[i
].tcl_data_ring
,
499 DP_TCL_DATA_RING_SIZE
);
501 ath12k_warn(ab
, "failed to set up tcl_data ring (%d) :%d\n",
506 ret
= ath12k_dp_srng_setup(ab
, &dp
->tx_ring
[i
].tcl_comp_ring
,
507 HAL_WBM2SW_RELEASE
, tx_comp_ring_num
, 0,
508 DP_TX_COMP_RING_SIZE
);
510 ath12k_warn(ab
, "failed to set up tcl_comp ring (%d) :%d\n",
511 tx_comp_ring_num
, ret
);
516 ret
= ath12k_dp_srng_setup(ab
, &dp
->reo_reinject_ring
, HAL_REO_REINJECT
,
517 0, 0, DP_REO_REINJECT_RING_SIZE
);
519 ath12k_warn(ab
, "failed to set up reo_reinject ring :%d\n",
524 ret
= ath12k_dp_srng_setup(ab
, &dp
->rx_rel_ring
, HAL_WBM2SW_RELEASE
,
525 HAL_WBM2SW_REL_ERR_RING_NUM
, 0,
526 DP_RX_RELEASE_RING_SIZE
);
528 ath12k_warn(ab
, "failed to set up rx_rel ring :%d\n", ret
);
532 ret
= ath12k_dp_srng_setup(ab
, &dp
->reo_except_ring
, HAL_REO_EXCEPTION
,
533 0, 0, DP_REO_EXCEPTION_RING_SIZE
);
535 ath12k_warn(ab
, "failed to set up reo_exception ring :%d\n",
540 ret
= ath12k_dp_srng_setup(ab
, &dp
->reo_cmd_ring
, HAL_REO_CMD
,
541 0, 0, DP_REO_CMD_RING_SIZE
);
543 ath12k_warn(ab
, "failed to set up reo_cmd ring :%d\n", ret
);
547 srng
= &ab
->hal
.srng_list
[dp
->reo_cmd_ring
.ring_id
];
548 ath12k_hal_reo_init_cmd_ring(ab
, srng
);
550 ret
= ath12k_dp_srng_setup(ab
, &dp
->reo_status_ring
, HAL_REO_STATUS
,
551 0, 0, DP_REO_STATUS_RING_SIZE
);
553 ath12k_warn(ab
, "failed to set up reo_status ring :%d\n", ret
);
557 /* When hash based routing of rx packet is enabled, 32 entries to map
558 * the hash values to the ring will be configured. Each hash entry uses
559 * four bits to map to a particular ring. The ring mapping will be
560 * 0:TCL, 1:SW1, 2:SW2, 3:SW3, 4:SW4, 5:Release, 6:FW and 7:SW5
561 * 8:SW6, 9:SW7, 10:SW8, 11:Not used.
563 ring_hash_map
= HAL_HASH_ROUTING_RING_SW1
|
564 HAL_HASH_ROUTING_RING_SW2
<< 4 |
565 HAL_HASH_ROUTING_RING_SW3
<< 8 |
566 HAL_HASH_ROUTING_RING_SW4
<< 12 |
567 HAL_HASH_ROUTING_RING_SW1
<< 16 |
568 HAL_HASH_ROUTING_RING_SW2
<< 20 |
569 HAL_HASH_ROUTING_RING_SW3
<< 24 |
570 HAL_HASH_ROUTING_RING_SW4
<< 28;
572 ath12k_hal_reo_hw_setup(ab
, ring_hash_map
);
577 ath12k_dp_srng_common_cleanup(ab
);
582 static void ath12k_dp_scatter_idle_link_desc_cleanup(struct ath12k_base
*ab
)
584 struct ath12k_dp
*dp
= &ab
->dp
;
585 struct hal_wbm_idle_scatter_list
*slist
= dp
->scatter_list
;
588 for (i
= 0; i
< DP_IDLE_SCATTER_BUFS_MAX
; i
++) {
592 dma_free_coherent(ab
->dev
, HAL_WBM_IDLE_SCATTER_BUF_SIZE_MAX
,
593 slist
[i
].vaddr
, slist
[i
].paddr
);
594 slist
[i
].vaddr
= NULL
;
598 static int ath12k_dp_scatter_idle_link_desc_setup(struct ath12k_base
*ab
,
600 u32 n_link_desc_bank
,
604 struct ath12k_dp
*dp
= &ab
->dp
;
605 struct dp_link_desc_bank
*link_desc_banks
= dp
->link_desc_banks
;
606 struct hal_wbm_idle_scatter_list
*slist
= dp
->scatter_list
;
607 u32 n_entries_per_buf
;
608 int num_scatter_buf
, scatter_idx
;
609 struct hal_wbm_link_desc
*scatter_buf
;
610 int align_bytes
, n_entries
;
615 u32 end_offset
, cookie
;
616 enum hal_rx_buf_return_buf_manager rbm
= dp
->idle_link_rbm
;
618 n_entries_per_buf
= HAL_WBM_IDLE_SCATTER_BUF_SIZE
/
619 ath12k_hal_srng_get_entrysize(ab
, HAL_WBM_IDLE_LINK
);
620 num_scatter_buf
= DIV_ROUND_UP(size
, HAL_WBM_IDLE_SCATTER_BUF_SIZE
);
622 if (num_scatter_buf
> DP_IDLE_SCATTER_BUFS_MAX
)
625 for (i
= 0; i
< num_scatter_buf
; i
++) {
626 slist
[i
].vaddr
= dma_alloc_coherent(ab
->dev
,
627 HAL_WBM_IDLE_SCATTER_BUF_SIZE_MAX
,
628 &slist
[i
].paddr
, GFP_KERNEL
);
629 if (!slist
[i
].vaddr
) {
636 scatter_buf
= slist
[scatter_idx
].vaddr
;
637 rem_entries
= n_entries_per_buf
;
639 for (i
= 0; i
< n_link_desc_bank
; i
++) {
640 align_bytes
= link_desc_banks
[i
].vaddr
-
641 link_desc_banks
[i
].vaddr_unaligned
;
642 n_entries
= (DP_LINK_DESC_ALLOC_SIZE_THRESH
- align_bytes
) /
644 paddr
= link_desc_banks
[i
].paddr
;
646 cookie
= DP_LINK_DESC_COOKIE_SET(n_entries
, i
);
647 ath12k_hal_set_link_desc_addr(scatter_buf
, cookie
,
650 paddr
+= HAL_LINK_DESC_SIZE
;
657 rem_entries
= n_entries_per_buf
;
659 scatter_buf
= slist
[scatter_idx
].vaddr
;
663 end_offset
= (scatter_buf
- slist
[scatter_idx
].vaddr
) *
664 sizeof(struct hal_wbm_link_desc
);
665 ath12k_hal_setup_link_idle_list(ab
, slist
, num_scatter_buf
,
666 n_link_desc
, end_offset
);
671 ath12k_dp_scatter_idle_link_desc_cleanup(ab
);
677 ath12k_dp_link_desc_bank_free(struct ath12k_base
*ab
,
678 struct dp_link_desc_bank
*link_desc_banks
)
682 for (i
= 0; i
< DP_LINK_DESC_BANKS_MAX
; i
++) {
683 if (link_desc_banks
[i
].vaddr_unaligned
) {
684 dma_free_coherent(ab
->dev
,
685 link_desc_banks
[i
].size
,
686 link_desc_banks
[i
].vaddr_unaligned
,
687 link_desc_banks
[i
].paddr_unaligned
);
688 link_desc_banks
[i
].vaddr_unaligned
= NULL
;
693 static int ath12k_dp_link_desc_bank_alloc(struct ath12k_base
*ab
,
694 struct dp_link_desc_bank
*desc_bank
,
695 int n_link_desc_bank
,
698 struct ath12k_dp
*dp
= &ab
->dp
;
701 int desc_sz
= DP_LINK_DESC_ALLOC_SIZE_THRESH
;
703 for (i
= 0; i
< n_link_desc_bank
; i
++) {
704 if (i
== (n_link_desc_bank
- 1) && last_bank_sz
)
705 desc_sz
= last_bank_sz
;
707 desc_bank
[i
].vaddr_unaligned
=
708 dma_alloc_coherent(ab
->dev
, desc_sz
,
709 &desc_bank
[i
].paddr_unaligned
,
711 if (!desc_bank
[i
].vaddr_unaligned
) {
716 desc_bank
[i
].vaddr
= PTR_ALIGN(desc_bank
[i
].vaddr_unaligned
,
717 HAL_LINK_DESC_ALIGN
);
718 desc_bank
[i
].paddr
= desc_bank
[i
].paddr_unaligned
+
719 ((unsigned long)desc_bank
[i
].vaddr
-
720 (unsigned long)desc_bank
[i
].vaddr_unaligned
);
721 desc_bank
[i
].size
= desc_sz
;
727 ath12k_dp_link_desc_bank_free(ab
, dp
->link_desc_banks
);
732 void ath12k_dp_link_desc_cleanup(struct ath12k_base
*ab
,
733 struct dp_link_desc_bank
*desc_bank
,
734 u32 ring_type
, struct dp_srng
*ring
)
736 ath12k_dp_link_desc_bank_free(ab
, desc_bank
);
738 if (ring_type
!= HAL_RXDMA_MONITOR_DESC
) {
739 ath12k_dp_srng_cleanup(ab
, ring
);
740 ath12k_dp_scatter_idle_link_desc_cleanup(ab
);
744 static int ath12k_wbm_idle_ring_setup(struct ath12k_base
*ab
, u32
*n_link_desc
)
746 struct ath12k_dp
*dp
= &ab
->dp
;
747 u32 n_mpdu_link_desc
, n_mpdu_queue_desc
;
748 u32 n_tx_msdu_link_desc
, n_rx_msdu_link_desc
;
751 n_mpdu_link_desc
= (DP_NUM_TIDS_MAX
* DP_AVG_MPDUS_PER_TID_MAX
) /
752 HAL_NUM_MPDUS_PER_LINK_DESC
;
754 n_mpdu_queue_desc
= n_mpdu_link_desc
/
755 HAL_NUM_MPDU_LINKS_PER_QUEUE_DESC
;
757 n_tx_msdu_link_desc
= (DP_NUM_TIDS_MAX
* DP_AVG_FLOWS_PER_TID
*
758 DP_AVG_MSDUS_PER_FLOW
) /
759 HAL_NUM_TX_MSDUS_PER_LINK_DESC
;
761 n_rx_msdu_link_desc
= (DP_NUM_TIDS_MAX
* DP_AVG_MPDUS_PER_TID_MAX
*
762 DP_AVG_MSDUS_PER_MPDU
) /
763 HAL_NUM_RX_MSDUS_PER_LINK_DESC
;
765 *n_link_desc
= n_mpdu_link_desc
+ n_mpdu_queue_desc
+
766 n_tx_msdu_link_desc
+ n_rx_msdu_link_desc
;
768 if (*n_link_desc
& (*n_link_desc
- 1))
769 *n_link_desc
= 1 << fls(*n_link_desc
);
771 ret
= ath12k_dp_srng_setup(ab
, &dp
->wbm_idle_ring
,
772 HAL_WBM_IDLE_LINK
, 0, 0, *n_link_desc
);
774 ath12k_warn(ab
, "failed to setup wbm_idle_ring: %d\n", ret
);
780 int ath12k_dp_link_desc_setup(struct ath12k_base
*ab
,
781 struct dp_link_desc_bank
*link_desc_banks
,
782 u32 ring_type
, struct hal_srng
*srng
,
786 u32 n_link_desc_bank
, last_bank_sz
;
787 u32 entry_sz
, align_bytes
, n_entries
;
788 struct hal_wbm_link_desc
*desc
;
792 enum hal_rx_buf_return_buf_manager rbm
= ab
->dp
.idle_link_rbm
;
794 tot_mem_sz
= n_link_desc
* HAL_LINK_DESC_SIZE
;
795 tot_mem_sz
+= HAL_LINK_DESC_ALIGN
;
797 if (tot_mem_sz
<= DP_LINK_DESC_ALLOC_SIZE_THRESH
) {
798 n_link_desc_bank
= 1;
799 last_bank_sz
= tot_mem_sz
;
801 n_link_desc_bank
= tot_mem_sz
/
802 (DP_LINK_DESC_ALLOC_SIZE_THRESH
-
803 HAL_LINK_DESC_ALIGN
);
804 last_bank_sz
= tot_mem_sz
%
805 (DP_LINK_DESC_ALLOC_SIZE_THRESH
-
806 HAL_LINK_DESC_ALIGN
);
809 n_link_desc_bank
+= 1;
812 if (n_link_desc_bank
> DP_LINK_DESC_BANKS_MAX
)
815 ret
= ath12k_dp_link_desc_bank_alloc(ab
, link_desc_banks
,
816 n_link_desc_bank
, last_bank_sz
);
820 /* Setup link desc idle list for HW internal usage */
821 entry_sz
= ath12k_hal_srng_get_entrysize(ab
, ring_type
);
822 tot_mem_sz
= entry_sz
* n_link_desc
;
824 /* Setup scatter desc list when the total memory requirement is more */
825 if (tot_mem_sz
> DP_LINK_DESC_ALLOC_SIZE_THRESH
&&
826 ring_type
!= HAL_RXDMA_MONITOR_DESC
) {
827 ret
= ath12k_dp_scatter_idle_link_desc_setup(ab
, tot_mem_sz
,
832 ath12k_warn(ab
, "failed to setup scatting idle list descriptor :%d\n",
834 goto fail_desc_bank_free
;
840 spin_lock_bh(&srng
->lock
);
842 ath12k_hal_srng_access_begin(ab
, srng
);
844 for (i
= 0; i
< n_link_desc_bank
; i
++) {
845 align_bytes
= link_desc_banks
[i
].vaddr
-
846 link_desc_banks
[i
].vaddr_unaligned
;
847 n_entries
= (link_desc_banks
[i
].size
- align_bytes
) /
849 paddr
= link_desc_banks
[i
].paddr
;
851 (desc
= ath12k_hal_srng_src_get_next_entry(ab
, srng
))) {
852 cookie
= DP_LINK_DESC_COOKIE_SET(n_entries
, i
);
853 ath12k_hal_set_link_desc_addr(desc
, cookie
, paddr
, rbm
);
855 paddr
+= HAL_LINK_DESC_SIZE
;
859 ath12k_hal_srng_access_end(ab
, srng
);
861 spin_unlock_bh(&srng
->lock
);
866 ath12k_dp_link_desc_bank_free(ab
, link_desc_banks
);
871 int ath12k_dp_service_srng(struct ath12k_base
*ab
,
872 struct ath12k_ext_irq_grp
*irq_grp
,
875 struct napi_struct
*napi
= &irq_grp
->napi
;
876 int grp_id
= irq_grp
->grp_id
;
879 int tot_work_done
= 0;
880 enum dp_monitor_mode monitor_mode
;
883 if (ab
->hw_params
->ring_mask
->tx
[grp_id
]) {
884 i
= fls(ab
->hw_params
->ring_mask
->tx
[grp_id
]) - 1;
885 ath12k_dp_tx_completion_handler(ab
, i
);
888 if (ab
->hw_params
->ring_mask
->rx_err
[grp_id
]) {
889 work_done
= ath12k_dp_rx_process_err(ab
, napi
, budget
);
891 tot_work_done
+= work_done
;
896 if (ab
->hw_params
->ring_mask
->rx_wbm_rel
[grp_id
]) {
897 work_done
= ath12k_dp_rx_process_wbm_err(ab
,
901 tot_work_done
+= work_done
;
907 if (ab
->hw_params
->ring_mask
->rx
[grp_id
]) {
908 i
= fls(ab
->hw_params
->ring_mask
->rx
[grp_id
]) - 1;
909 work_done
= ath12k_dp_rx_process(ab
, i
, napi
,
912 tot_work_done
+= work_done
;
917 if (ab
->hw_params
->ring_mask
->rx_mon_dest
[grp_id
]) {
918 monitor_mode
= ATH12K_DP_RX_MONITOR_MODE
;
919 ring_mask
= ab
->hw_params
->ring_mask
->rx_mon_dest
[grp_id
];
920 for (i
= 0; i
< ab
->num_radios
; i
++) {
921 for (j
= 0; j
< ab
->hw_params
->num_rxdma_per_pdev
; j
++) {
922 int id
= i
* ab
->hw_params
->num_rxdma_per_pdev
+ j
;
924 if (ring_mask
& BIT(id
)) {
926 ath12k_dp_mon_process_ring(ab
, id
, napi
, budget
,
929 tot_work_done
+= work_done
;
938 if (ab
->hw_params
->ring_mask
->tx_mon_dest
[grp_id
]) {
939 monitor_mode
= ATH12K_DP_TX_MONITOR_MODE
;
940 ring_mask
= ab
->hw_params
->ring_mask
->tx_mon_dest
[grp_id
];
941 for (i
= 0; i
< ab
->num_radios
; i
++) {
942 for (j
= 0; j
< ab
->hw_params
->num_rxdma_per_pdev
; j
++) {
943 int id
= i
* ab
->hw_params
->num_rxdma_per_pdev
+ j
;
945 if (ring_mask
& BIT(id
)) {
947 ath12k_dp_mon_process_ring(ab
, id
, napi
, budget
,
950 tot_work_done
+= work_done
;
959 if (ab
->hw_params
->ring_mask
->reo_status
[grp_id
])
960 ath12k_dp_rx_process_reo_status(ab
);
962 if (ab
->hw_params
->ring_mask
->host2rxdma
[grp_id
]) {
963 struct ath12k_dp
*dp
= &ab
->dp
;
964 struct dp_rxdma_ring
*rx_ring
= &dp
->rx_refill_buf_ring
;
967 ath12k_dp_rx_bufs_replenish(ab
, rx_ring
, &list
, 0);
970 /* TODO: Implement handler for other interrupts */
973 return tot_work_done
;
976 void ath12k_dp_pdev_free(struct ath12k_base
*ab
)
980 del_timer_sync(&ab
->mon_reap_timer
);
982 for (i
= 0; i
< ab
->num_radios
; i
++)
983 ath12k_dp_rx_pdev_free(ab
, i
);
986 void ath12k_dp_pdev_pre_alloc(struct ath12k_base
*ab
)
989 struct ath12k_pdev_dp
*dp
;
992 for (i
= 0; i
< ab
->num_radios
; i
++) {
993 ar
= ab
->pdevs
[i
].ar
;
996 atomic_set(&dp
->num_tx_pending
, 0);
997 init_waitqueue_head(&dp
->tx_empty_waitq
);
999 /* TODO: Add any RXDMA setup required per pdev */
1003 bool ath12k_dp_wmask_compaction_rx_tlv_supported(struct ath12k_base
*ab
)
1005 if (test_bit(WMI_TLV_SERVICE_WMSK_COMPACTION_RX_TLVS
, ab
->wmi_ab
.svc_map
) &&
1006 ab
->hw_params
->hal_ops
->rxdma_ring_wmask_rx_mpdu_start
&&
1007 ab
->hw_params
->hal_ops
->rxdma_ring_wmask_rx_msdu_end
&&
1008 ab
->hw_params
->hal_ops
->get_hal_rx_compact_ops
) {
1014 void ath12k_dp_hal_rx_desc_init(struct ath12k_base
*ab
)
1016 if (ath12k_dp_wmask_compaction_rx_tlv_supported(ab
)) {
1017 /* RX TLVS compaction is supported, hence change the hal_rx_ops
1018 * to compact hal_rx_ops.
1020 ab
->hal_rx_ops
= ab
->hw_params
->hal_ops
->get_hal_rx_compact_ops();
1022 ab
->hal
.hal_desc_sz
=
1023 ab
->hal_rx_ops
->rx_desc_get_desc_size();
1026 static void ath12k_dp_service_mon_ring(struct timer_list
*t
)
1028 struct ath12k_base
*ab
= from_timer(ab
, t
, mon_reap_timer
);
1031 for (i
= 0; i
< ab
->hw_params
->num_rxdma_per_pdev
; i
++)
1032 ath12k_dp_mon_process_ring(ab
, i
, NULL
, DP_MON_SERVICE_BUDGET
,
1033 ATH12K_DP_RX_MONITOR_MODE
);
1035 mod_timer(&ab
->mon_reap_timer
, jiffies
+
1036 msecs_to_jiffies(ATH12K_MON_TIMER_INTERVAL
));
1039 static void ath12k_dp_mon_reap_timer_init(struct ath12k_base
*ab
)
1041 if (ab
->hw_params
->rxdma1_enable
)
1044 timer_setup(&ab
->mon_reap_timer
, ath12k_dp_service_mon_ring
, 0);
1047 int ath12k_dp_pdev_alloc(struct ath12k_base
*ab
)
1053 ret
= ath12k_dp_rx_htt_setup(ab
);
1057 ath12k_dp_mon_reap_timer_init(ab
);
1059 /* TODO: Per-pdev rx ring unlike tx ring which is mapped to different AC's */
1060 for (i
= 0; i
< ab
->num_radios
; i
++) {
1061 ar
= ab
->pdevs
[i
].ar
;
1062 ret
= ath12k_dp_rx_pdev_alloc(ab
, i
);
1064 ath12k_warn(ab
, "failed to allocate pdev rx for pdev_id :%d\n",
1068 ret
= ath12k_dp_rx_pdev_mon_attach(ar
);
1070 ath12k_warn(ab
, "failed to initialize mon pdev %d\n", i
);
1077 ath12k_dp_pdev_free(ab
);
1082 int ath12k_dp_htt_connect(struct ath12k_dp
*dp
)
1084 struct ath12k_htc_svc_conn_req conn_req
= {0};
1085 struct ath12k_htc_svc_conn_resp conn_resp
= {0};
1088 conn_req
.ep_ops
.ep_tx_complete
= ath12k_dp_htt_htc_tx_complete
;
1089 conn_req
.ep_ops
.ep_rx_complete
= ath12k_dp_htt_htc_t2h_msg_handler
;
1091 /* connect to control service */
1092 conn_req
.service_id
= ATH12K_HTC_SVC_ID_HTT_DATA_MSG
;
1094 status
= ath12k_htc_connect_service(&dp
->ab
->htc
, &conn_req
,
1100 dp
->eid
= conn_resp
.eid
;
1105 static void ath12k_dp_update_vdev_search(struct ath12k_link_vif
*arvif
)
1107 switch (arvif
->ahvif
->vdev_type
) {
1108 case WMI_VDEV_TYPE_STA
:
1109 /* TODO: Verify the search type and flags since ast hash
1110 * is not part of peer mapv3
1112 arvif
->hal_addr_search_flags
= HAL_TX_ADDRY_EN
;
1113 arvif
->search_type
= HAL_TX_ADDR_SEARCH_DEFAULT
;
1115 case WMI_VDEV_TYPE_AP
:
1116 case WMI_VDEV_TYPE_IBSS
:
1117 arvif
->hal_addr_search_flags
= HAL_TX_ADDRX_EN
;
1118 arvif
->search_type
= HAL_TX_ADDR_SEARCH_DEFAULT
;
1120 case WMI_VDEV_TYPE_MONITOR
:
1126 void ath12k_dp_vdev_tx_attach(struct ath12k
*ar
, struct ath12k_link_vif
*arvif
)
1128 struct ath12k_base
*ab
= ar
->ab
;
1130 arvif
->tcl_metadata
|= u32_encode_bits(1, HTT_TCL_META_DATA_TYPE
) |
1131 u32_encode_bits(arvif
->vdev_id
,
1132 HTT_TCL_META_DATA_VDEV_ID
) |
1133 u32_encode_bits(ar
->pdev
->pdev_id
,
1134 HTT_TCL_META_DATA_PDEV_ID
);
1136 /* set HTT extension valid bit to 0 by default */
1137 arvif
->tcl_metadata
&= ~HTT_TCL_META_DATA_VALID_HTT
;
1139 ath12k_dp_update_vdev_search(arvif
);
1140 arvif
->vdev_id_check_en
= true;
1141 arvif
->bank_id
= ath12k_dp_tx_get_bank_profile(ab
, arvif
, &ab
->dp
);
1143 /* TODO: error path for bank id failure */
1144 if (arvif
->bank_id
== DP_INVALID_BANK_ID
) {
1145 ath12k_err(ar
->ab
, "Failed to initialize DP TX Banks");
1150 static void ath12k_dp_cc_cleanup(struct ath12k_base
*ab
)
1152 struct ath12k_rx_desc_info
*desc_info
;
1153 struct ath12k_tx_desc_info
*tx_desc_info
, *tmp1
;
1154 struct ath12k_dp
*dp
= &ab
->dp
;
1155 struct ath12k_skb_cb
*skb_cb
;
1156 struct sk_buff
*skb
;
1159 u32 pool_id
, tx_spt_page
;
1164 /* RX Descriptor cleanup */
1165 spin_lock_bh(&dp
->rx_desc_lock
);
1167 for (i
= 0; i
< ATH12K_NUM_RX_SPT_PAGES
; i
++) {
1168 desc_info
= dp
->rxbaddr
[i
];
1170 for (j
= 0; j
< ATH12K_MAX_SPT_ENTRIES
; j
++) {
1171 if (!desc_info
[j
].in_use
) {
1172 list_del(&desc_info
[j
].list
);
1176 skb
= desc_info
[j
].skb
;
1180 dma_unmap_single(ab
->dev
, ATH12K_SKB_RXCB(skb
)->paddr
,
1181 skb
->len
+ skb_tailroom(skb
), DMA_FROM_DEVICE
);
1182 dev_kfree_skb_any(skb
);
1186 for (i
= 0; i
< ATH12K_NUM_RX_SPT_PAGES
; i
++) {
1187 if (!dp
->rxbaddr
[i
])
1190 kfree(dp
->rxbaddr
[i
]);
1191 dp
->rxbaddr
[i
] = NULL
;
1194 spin_unlock_bh(&dp
->rx_desc_lock
);
1196 /* TX Descriptor cleanup */
1197 for (i
= 0; i
< ATH12K_HW_MAX_QUEUES
; i
++) {
1198 spin_lock_bh(&dp
->tx_desc_lock
[i
]);
1200 list_for_each_entry_safe(tx_desc_info
, tmp1
, &dp
->tx_desc_used_list
[i
],
1202 list_del(&tx_desc_info
->list
);
1203 skb
= tx_desc_info
->skb
;
1208 /* if we are unregistering, hw would've been destroyed and
1209 * ar is no longer valid.
1211 if (!(test_bit(ATH12K_FLAG_UNREGISTERING
, &ab
->dev_flags
))) {
1212 skb_cb
= ATH12K_SKB_CB(skb
);
1215 if (atomic_dec_and_test(&ar
->dp
.num_tx_pending
))
1216 wake_up(&ar
->dp
.tx_empty_waitq
);
1219 dma_unmap_single(ab
->dev
, ATH12K_SKB_CB(skb
)->paddr
,
1220 skb
->len
, DMA_TO_DEVICE
);
1221 dev_kfree_skb_any(skb
);
1224 spin_unlock_bh(&dp
->tx_desc_lock
[i
]);
1227 for (pool_id
= 0; pool_id
< ATH12K_HW_MAX_QUEUES
; pool_id
++) {
1228 spin_lock_bh(&dp
->tx_desc_lock
[pool_id
]);
1230 for (i
= 0; i
< ATH12K_TX_SPT_PAGES_PER_POOL
; i
++) {
1231 tx_spt_page
= i
+ pool_id
* ATH12K_TX_SPT_PAGES_PER_POOL
;
1232 if (!dp
->txbaddr
[tx_spt_page
])
1235 kfree(dp
->txbaddr
[tx_spt_page
]);
1236 dp
->txbaddr
[tx_spt_page
] = NULL
;
1239 spin_unlock_bh(&dp
->tx_desc_lock
[pool_id
]);
1242 /* unmap SPT pages */
1243 for (i
= 0; i
< dp
->num_spt_pages
; i
++) {
1244 if (!dp
->spt_info
[i
].vaddr
)
1247 dma_free_coherent(ab
->dev
, ATH12K_PAGE_SIZE
,
1248 dp
->spt_info
[i
].vaddr
, dp
->spt_info
[i
].paddr
);
1249 dp
->spt_info
[i
].vaddr
= NULL
;
1252 kfree(dp
->spt_info
);
1253 dp
->spt_info
= NULL
;
1256 static void ath12k_dp_reoq_lut_cleanup(struct ath12k_base
*ab
)
1258 struct ath12k_dp
*dp
= &ab
->dp
;
1260 if (!ab
->hw_params
->reoq_lut_support
)
1263 if (!dp
->reoq_lut
.vaddr
)
1266 dma_free_coherent(ab
->dev
, DP_REOQ_LUT_SIZE
,
1267 dp
->reoq_lut
.vaddr
, dp
->reoq_lut
.paddr
);
1268 dp
->reoq_lut
.vaddr
= NULL
;
1270 ath12k_hif_write32(ab
,
1271 HAL_SEQ_WCSS_UMAC_REO_REG
+ HAL_REO1_QDESC_LUT_BASE0(ab
), 0);
1274 void ath12k_dp_free(struct ath12k_base
*ab
)
1276 struct ath12k_dp
*dp
= &ab
->dp
;
1279 ath12k_dp_link_desc_cleanup(ab
, dp
->link_desc_banks
,
1280 HAL_WBM_IDLE_LINK
, &dp
->wbm_idle_ring
);
1282 ath12k_dp_cc_cleanup(ab
);
1283 ath12k_dp_reoq_lut_cleanup(ab
);
1284 ath12k_dp_deinit_bank_profiles(ab
);
1285 ath12k_dp_srng_common_cleanup(ab
);
1287 ath12k_dp_rx_reo_cmd_list_cleanup(ab
);
1289 for (i
= 0; i
< ab
->hw_params
->max_tx_ring
; i
++) {
1290 kfree(dp
->tx_ring
[i
].tx_status
);
1291 dp
->tx_ring
[i
].tx_status
= NULL
;
1294 ath12k_dp_rx_free(ab
);
1295 /* Deinit any SOC level resource */
1298 void ath12k_dp_cc_config(struct ath12k_base
*ab
)
1300 u32 cmem_base
= ab
->qmi
.dev_mem
[ATH12K_QMI_DEVMEM_CMEM_INDEX
].start
;
1301 u32 reo_base
= HAL_SEQ_WCSS_UMAC_REO_REG
;
1302 u32 wbm_base
= HAL_SEQ_WCSS_UMAC_WBM_REG
;
1305 ath12k_hif_write32(ab
, reo_base
+ HAL_REO1_SW_COOKIE_CFG0(ab
), cmem_base
);
1307 val
|= u32_encode_bits(ATH12K_CMEM_ADDR_MSB
,
1308 HAL_REO1_SW_COOKIE_CFG_CMEM_BASE_ADDR_MSB
) |
1309 u32_encode_bits(ATH12K_CC_PPT_MSB
,
1310 HAL_REO1_SW_COOKIE_CFG_COOKIE_PPT_MSB
) |
1311 u32_encode_bits(ATH12K_CC_SPT_MSB
,
1312 HAL_REO1_SW_COOKIE_CFG_COOKIE_SPT_MSB
) |
1313 u32_encode_bits(1, HAL_REO1_SW_COOKIE_CFG_ALIGN
) |
1314 u32_encode_bits(1, HAL_REO1_SW_COOKIE_CFG_ENABLE
) |
1315 u32_encode_bits(1, HAL_REO1_SW_COOKIE_CFG_GLOBAL_ENABLE
);
1317 ath12k_hif_write32(ab
, reo_base
+ HAL_REO1_SW_COOKIE_CFG1(ab
), val
);
1319 /* Enable HW CC for WBM */
1320 ath12k_hif_write32(ab
, wbm_base
+ HAL_WBM_SW_COOKIE_CFG0
, cmem_base
);
1322 val
= u32_encode_bits(ATH12K_CMEM_ADDR_MSB
,
1323 HAL_WBM_SW_COOKIE_CFG_CMEM_BASE_ADDR_MSB
) |
1324 u32_encode_bits(ATH12K_CC_PPT_MSB
,
1325 HAL_WBM_SW_COOKIE_CFG_COOKIE_PPT_MSB
) |
1326 u32_encode_bits(ATH12K_CC_SPT_MSB
,
1327 HAL_WBM_SW_COOKIE_CFG_COOKIE_SPT_MSB
) |
1328 u32_encode_bits(1, HAL_WBM_SW_COOKIE_CFG_ALIGN
);
1330 ath12k_hif_write32(ab
, wbm_base
+ HAL_WBM_SW_COOKIE_CFG1
, val
);
1332 /* Enable conversion complete indication */
1333 val
= ath12k_hif_read32(ab
, wbm_base
+ HAL_WBM_SW_COOKIE_CFG2
);
1334 val
|= u32_encode_bits(1, HAL_WBM_SW_COOKIE_CFG_RELEASE_PATH_EN
) |
1335 u32_encode_bits(1, HAL_WBM_SW_COOKIE_CFG_ERR_PATH_EN
) |
1336 u32_encode_bits(1, HAL_WBM_SW_COOKIE_CFG_CONV_IND_EN
);
1338 ath12k_hif_write32(ab
, wbm_base
+ HAL_WBM_SW_COOKIE_CFG2
, val
);
1340 /* Enable Cookie conversion for WBM2SW Rings */
1341 val
= ath12k_hif_read32(ab
, wbm_base
+ HAL_WBM_SW_COOKIE_CONVERT_CFG
);
1342 val
|= u32_encode_bits(1, HAL_WBM_SW_COOKIE_CONV_CFG_GLOBAL_EN
) |
1343 ab
->hw_params
->hal_params
->wbm2sw_cc_enable
;
1345 ath12k_hif_write32(ab
, wbm_base
+ HAL_WBM_SW_COOKIE_CONVERT_CFG
, val
);
1348 static u32
ath12k_dp_cc_cookie_gen(u16 ppt_idx
, u16 spt_idx
)
1350 return (u32
)ppt_idx
<< ATH12K_CC_PPT_SHIFT
| spt_idx
;
1353 static inline void *ath12k_dp_cc_get_desc_addr_ptr(struct ath12k_base
*ab
,
1354 u16 ppt_idx
, u16 spt_idx
)
1356 struct ath12k_dp
*dp
= &ab
->dp
;
1358 return dp
->spt_info
[ppt_idx
].vaddr
+ spt_idx
;
1361 struct ath12k_rx_desc_info
*ath12k_dp_get_rx_desc(struct ath12k_base
*ab
,
1364 struct ath12k_dp
*dp
= &ab
->dp
;
1365 struct ath12k_rx_desc_info
**desc_addr_ptr
;
1366 u16 start_ppt_idx
, end_ppt_idx
, ppt_idx
, spt_idx
;
1368 ppt_idx
= u32_get_bits(cookie
, ATH12K_DP_CC_COOKIE_PPT
);
1369 spt_idx
= u32_get_bits(cookie
, ATH12K_DP_CC_COOKIE_SPT
);
1371 start_ppt_idx
= dp
->rx_ppt_base
+ ATH12K_RX_SPT_PAGE_OFFSET
;
1372 end_ppt_idx
= start_ppt_idx
+ ATH12K_NUM_RX_SPT_PAGES
;
1374 if (ppt_idx
< start_ppt_idx
||
1375 ppt_idx
>= end_ppt_idx
||
1376 spt_idx
> ATH12K_MAX_SPT_ENTRIES
)
1379 ppt_idx
= ppt_idx
- dp
->rx_ppt_base
;
1380 desc_addr_ptr
= ath12k_dp_cc_get_desc_addr_ptr(ab
, ppt_idx
, spt_idx
);
1382 return *desc_addr_ptr
;
1385 struct ath12k_tx_desc_info
*ath12k_dp_get_tx_desc(struct ath12k_base
*ab
,
1388 struct ath12k_tx_desc_info
**desc_addr_ptr
;
1389 u16 start_ppt_idx
, end_ppt_idx
, ppt_idx
, spt_idx
;
1391 ppt_idx
= u32_get_bits(cookie
, ATH12K_DP_CC_COOKIE_PPT
);
1392 spt_idx
= u32_get_bits(cookie
, ATH12K_DP_CC_COOKIE_SPT
);
1394 start_ppt_idx
= ATH12K_TX_SPT_PAGE_OFFSET
;
1395 end_ppt_idx
= start_ppt_idx
+
1396 (ATH12K_TX_SPT_PAGES_PER_POOL
* ATH12K_HW_MAX_QUEUES
);
1398 if (ppt_idx
< start_ppt_idx
||
1399 ppt_idx
>= end_ppt_idx
||
1400 spt_idx
> ATH12K_MAX_SPT_ENTRIES
)
1403 desc_addr_ptr
= ath12k_dp_cc_get_desc_addr_ptr(ab
, ppt_idx
, spt_idx
);
1405 return *desc_addr_ptr
;
1408 static int ath12k_dp_cc_desc_init(struct ath12k_base
*ab
)
1410 struct ath12k_dp
*dp
= &ab
->dp
;
1411 struct ath12k_rx_desc_info
*rx_descs
, **rx_desc_addr
;
1412 struct ath12k_tx_desc_info
*tx_descs
, **tx_desc_addr
;
1413 u32 i
, j
, pool_id
, tx_spt_page
;
1414 u32 ppt_idx
, cookie_ppt_idx
;
1416 spin_lock_bh(&dp
->rx_desc_lock
);
1418 /* First ATH12K_NUM_RX_SPT_PAGES of allocated SPT pages are used for RX */
1419 for (i
= 0; i
< ATH12K_NUM_RX_SPT_PAGES
; i
++) {
1420 rx_descs
= kcalloc(ATH12K_MAX_SPT_ENTRIES
, sizeof(*rx_descs
),
1424 spin_unlock_bh(&dp
->rx_desc_lock
);
1428 ppt_idx
= ATH12K_RX_SPT_PAGE_OFFSET
+ i
;
1429 cookie_ppt_idx
= dp
->rx_ppt_base
+ ppt_idx
;
1430 dp
->rxbaddr
[i
] = &rx_descs
[0];
1432 for (j
= 0; j
< ATH12K_MAX_SPT_ENTRIES
; j
++) {
1433 rx_descs
[j
].cookie
= ath12k_dp_cc_cookie_gen(cookie_ppt_idx
, j
);
1434 rx_descs
[j
].magic
= ATH12K_DP_RX_DESC_MAGIC
;
1435 list_add_tail(&rx_descs
[j
].list
, &dp
->rx_desc_free_list
);
1437 /* Update descriptor VA in SPT */
1438 rx_desc_addr
= ath12k_dp_cc_get_desc_addr_ptr(ab
, ppt_idx
, j
);
1439 *rx_desc_addr
= &rx_descs
[j
];
1443 spin_unlock_bh(&dp
->rx_desc_lock
);
1445 for (pool_id
= 0; pool_id
< ATH12K_HW_MAX_QUEUES
; pool_id
++) {
1446 spin_lock_bh(&dp
->tx_desc_lock
[pool_id
]);
1447 for (i
= 0; i
< ATH12K_TX_SPT_PAGES_PER_POOL
; i
++) {
1448 tx_descs
= kcalloc(ATH12K_MAX_SPT_ENTRIES
, sizeof(*tx_descs
),
1452 spin_unlock_bh(&dp
->tx_desc_lock
[pool_id
]);
1453 /* Caller takes care of TX pending and RX desc cleanup */
1457 tx_spt_page
= i
+ pool_id
* ATH12K_TX_SPT_PAGES_PER_POOL
;
1458 ppt_idx
= ATH12K_TX_SPT_PAGE_OFFSET
+ tx_spt_page
;
1460 dp
->txbaddr
[tx_spt_page
] = &tx_descs
[0];
1462 for (j
= 0; j
< ATH12K_MAX_SPT_ENTRIES
; j
++) {
1463 tx_descs
[j
].desc_id
= ath12k_dp_cc_cookie_gen(ppt_idx
, j
);
1464 tx_descs
[j
].pool_id
= pool_id
;
1465 list_add_tail(&tx_descs
[j
].list
,
1466 &dp
->tx_desc_free_list
[pool_id
]);
1468 /* Update descriptor VA in SPT */
1470 ath12k_dp_cc_get_desc_addr_ptr(ab
, ppt_idx
, j
);
1471 *tx_desc_addr
= &tx_descs
[j
];
1474 spin_unlock_bh(&dp
->tx_desc_lock
[pool_id
]);
1479 static int ath12k_dp_cmem_init(struct ath12k_base
*ab
,
1480 struct ath12k_dp
*dp
,
1481 enum ath12k_dp_desc_type type
)
1486 cmem_base
= ab
->qmi
.dev_mem
[ATH12K_QMI_DEVMEM_CMEM_INDEX
].start
;
1489 case ATH12K_DP_TX_DESC
:
1490 start
= ATH12K_TX_SPT_PAGE_OFFSET
;
1491 end
= start
+ ATH12K_NUM_TX_SPT_PAGES
;
1493 case ATH12K_DP_RX_DESC
:
1494 cmem_base
+= ATH12K_PPT_ADDR_OFFSET(dp
->rx_ppt_base
);
1495 start
= ATH12K_RX_SPT_PAGE_OFFSET
;
1496 end
= start
+ ATH12K_NUM_RX_SPT_PAGES
;
1499 ath12k_err(ab
, "invalid descriptor type %d in cmem init\n", type
);
1503 /* Write to PPT in CMEM */
1504 for (i
= start
; i
< end
; i
++)
1505 ath12k_hif_write32(ab
, cmem_base
+ ATH12K_PPT_ADDR_OFFSET(i
),
1506 dp
->spt_info
[i
].paddr
>> ATH12K_SPT_4K_ALIGN_OFFSET
);
1511 static int ath12k_dp_cc_init(struct ath12k_base
*ab
)
1513 struct ath12k_dp
*dp
= &ab
->dp
;
1516 INIT_LIST_HEAD(&dp
->rx_desc_free_list
);
1517 spin_lock_init(&dp
->rx_desc_lock
);
1519 for (i
= 0; i
< ATH12K_HW_MAX_QUEUES
; i
++) {
1520 INIT_LIST_HEAD(&dp
->tx_desc_free_list
[i
]);
1521 INIT_LIST_HEAD(&dp
->tx_desc_used_list
[i
]);
1522 spin_lock_init(&dp
->tx_desc_lock
[i
]);
1525 dp
->num_spt_pages
= ATH12K_NUM_SPT_PAGES
;
1526 if (dp
->num_spt_pages
> ATH12K_MAX_PPT_ENTRIES
)
1527 dp
->num_spt_pages
= ATH12K_MAX_PPT_ENTRIES
;
1529 dp
->spt_info
= kcalloc(dp
->num_spt_pages
, sizeof(struct ath12k_spt_info
),
1532 if (!dp
->spt_info
) {
1533 ath12k_warn(ab
, "SPT page allocation failure");
1537 dp
->rx_ppt_base
= ab
->device_id
* ATH12K_NUM_RX_SPT_PAGES
;
1539 for (i
= 0; i
< dp
->num_spt_pages
; i
++) {
1540 dp
->spt_info
[i
].vaddr
= dma_alloc_coherent(ab
->dev
,
1542 &dp
->spt_info
[i
].paddr
,
1545 if (!dp
->spt_info
[i
].vaddr
) {
1550 if (dp
->spt_info
[i
].paddr
& ATH12K_SPT_4K_ALIGN_CHECK
) {
1551 ath12k_warn(ab
, "SPT allocated memory is not 4K aligned");
1557 ret
= ath12k_dp_cmem_init(ab
, dp
, ATH12K_DP_TX_DESC
);
1559 ath12k_warn(ab
, "HW CC Tx cmem init failed %d", ret
);
1563 ret
= ath12k_dp_cmem_init(ab
, dp
, ATH12K_DP_RX_DESC
);
1565 ath12k_warn(ab
, "HW CC Rx cmem init failed %d", ret
);
1569 ret
= ath12k_dp_cc_desc_init(ab
);
1571 ath12k_warn(ab
, "HW CC desc init failed %d", ret
);
1577 ath12k_dp_cc_cleanup(ab
);
1581 static int ath12k_dp_reoq_lut_setup(struct ath12k_base
*ab
)
1583 struct ath12k_dp
*dp
= &ab
->dp
;
1585 if (!ab
->hw_params
->reoq_lut_support
)
1588 dp
->reoq_lut
.vaddr
= dma_alloc_coherent(ab
->dev
,
1590 &dp
->reoq_lut
.paddr
,
1591 GFP_KERNEL
| __GFP_ZERO
);
1592 if (!dp
->reoq_lut
.vaddr
) {
1593 ath12k_warn(ab
, "failed to allocate memory for reoq table");
1597 ath12k_hif_write32(ab
, HAL_SEQ_WCSS_UMAC_REO_REG
+ HAL_REO1_QDESC_LUT_BASE0(ab
),
1598 dp
->reoq_lut
.paddr
);
1602 static enum hal_rx_buf_return_buf_manager
1603 ath12k_dp_get_idle_link_rbm(struct ath12k_base
*ab
)
1605 switch (ab
->device_id
) {
1607 return HAL_RX_BUF_RBM_WBM_DEV0_IDLE_DESC_LIST
;
1609 return HAL_RX_BUF_RBM_WBM_DEV1_IDLE_DESC_LIST
;
1611 return HAL_RX_BUF_RBM_WBM_DEV2_IDLE_DESC_LIST
;
1613 ath12k_warn(ab
, "invalid %d device id, so choose default rbm\n",
1616 return HAL_RX_BUF_RBM_WBM_DEV0_IDLE_DESC_LIST
;
1620 int ath12k_dp_alloc(struct ath12k_base
*ab
)
1622 struct ath12k_dp
*dp
= &ab
->dp
;
1623 struct hal_srng
*srng
= NULL
;
1625 u32 n_link_desc
= 0;
1631 INIT_LIST_HEAD(&dp
->reo_cmd_list
);
1632 INIT_LIST_HEAD(&dp
->reo_cmd_cache_flush_list
);
1633 spin_lock_init(&dp
->reo_cmd_lock
);
1635 dp
->reo_cmd_cache_flush_count
= 0;
1636 dp
->idle_link_rbm
= ath12k_dp_get_idle_link_rbm(ab
);
1638 ret
= ath12k_wbm_idle_ring_setup(ab
, &n_link_desc
);
1640 ath12k_warn(ab
, "failed to setup wbm_idle_ring: %d\n", ret
);
1644 srng
= &ab
->hal
.srng_list
[dp
->wbm_idle_ring
.ring_id
];
1646 ret
= ath12k_dp_link_desc_setup(ab
, dp
->link_desc_banks
,
1647 HAL_WBM_IDLE_LINK
, srng
, n_link_desc
);
1649 ath12k_warn(ab
, "failed to setup link desc: %d\n", ret
);
1653 ret
= ath12k_dp_cc_init(ab
);
1656 ath12k_warn(ab
, "failed to setup cookie converter %d\n", ret
);
1657 goto fail_link_desc_cleanup
;
1659 ret
= ath12k_dp_init_bank_profiles(ab
);
1661 ath12k_warn(ab
, "failed to setup bank profiles %d\n", ret
);
1662 goto fail_hw_cc_cleanup
;
1665 ret
= ath12k_dp_srng_common_setup(ab
);
1667 goto fail_dp_bank_profiles_cleanup
;
1669 size
= sizeof(struct hal_wbm_release_ring_tx
) * DP_TX_COMP_RING_SIZE
;
1671 ret
= ath12k_dp_reoq_lut_setup(ab
);
1673 ath12k_warn(ab
, "failed to setup reoq table %d\n", ret
);
1674 goto fail_cmn_srng_cleanup
;
1677 for (i
= 0; i
< ab
->hw_params
->max_tx_ring
; i
++) {
1678 dp
->tx_ring
[i
].tcl_data_ring_id
= i
;
1680 dp
->tx_ring
[i
].tx_status_head
= 0;
1681 dp
->tx_ring
[i
].tx_status_tail
= DP_TX_COMP_RING_SIZE
- 1;
1682 dp
->tx_ring
[i
].tx_status
= kmalloc(size
, GFP_KERNEL
);
1683 if (!dp
->tx_ring
[i
].tx_status
) {
1685 /* FIXME: The allocated tx status is not freed
1688 goto fail_cmn_reoq_cleanup
;
1692 for (i
= 0; i
< HAL_DSCP_TID_MAP_TBL_NUM_ENTRIES_MAX
; i
++)
1693 ath12k_hal_tx_set_dscp_tid_map(ab
, i
);
1695 ret
= ath12k_dp_rx_alloc(ab
);
1697 goto fail_dp_rx_free
;
1699 /* Init any SOC level resource for DP */
1704 ath12k_dp_rx_free(ab
);
1706 fail_cmn_reoq_cleanup
:
1707 ath12k_dp_reoq_lut_cleanup(ab
);
1709 fail_cmn_srng_cleanup
:
1710 ath12k_dp_srng_common_cleanup(ab
);
1712 fail_dp_bank_profiles_cleanup
:
1713 ath12k_dp_deinit_bank_profiles(ab
);
1716 ath12k_dp_cc_cleanup(ab
);
1718 fail_link_desc_cleanup
:
1719 ath12k_dp_link_desc_cleanup(ab
, dp
->link_desc_banks
,
1720 HAL_WBM_IDLE_LINK
, &dp
->wbm_idle_ring
);