1 // SPDX-License-Identifier: BSD-3-Clause-Clear
3 * Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
4 * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
11 const struct ce_attr ath12k_host_ce_config_qcn9274
[] = {
12 /* CE0: host->target HTC control and raw streams */
14 .flags
= CE_ATTR_FLAGS
,
20 /* CE1: target->host HTT + HTC control */
22 .flags
= CE_ATTR_FLAGS
,
26 .recv_cb
= ath12k_htc_rx_completion_handler
,
29 /* CE2: target->host WMI */
31 .flags
= CE_ATTR_FLAGS
,
35 .recv_cb
= ath12k_htc_rx_completion_handler
,
38 /* CE3: host->target WMI (mac0) */
40 .flags
= CE_ATTR_FLAGS
,
46 /* CE4: host->target HTT */
48 .flags
= CE_ATTR_FLAGS
| CE_ATTR_DIS_INTR
,
54 /* CE5: target->host pktlog */
56 .flags
= CE_ATTR_FLAGS
,
60 .recv_cb
= ath12k_dp_htt_htc_t2h_msg_handler
,
63 /* CE6: target autonomous hif_memcpy */
65 .flags
= CE_ATTR_FLAGS
| CE_ATTR_DIS_INTR
,
71 /* CE7: host->target WMI (mac1) */
73 .flags
= CE_ATTR_FLAGS
,
79 /* CE8: target autonomous hif_memcpy */
81 .flags
= CE_ATTR_FLAGS
| CE_ATTR_DIS_INTR
,
89 .flags
= CE_ATTR_FLAGS
| CE_ATTR_DIS_INTR
,
97 .flags
= CE_ATTR_FLAGS
| CE_ATTR_DIS_INTR
,
105 .flags
= CE_ATTR_FLAGS
| CE_ATTR_DIS_INTR
,
111 /* CE12: CV Prefetch */
113 .flags
= CE_ATTR_FLAGS
| CE_ATTR_DIS_INTR
,
119 /* CE13: CV Prefetch */
121 .flags
= CE_ATTR_FLAGS
| CE_ATTR_DIS_INTR
,
127 /* CE14: target->host dbg log */
129 .flags
= CE_ATTR_FLAGS
,
132 .dest_nentries
= 512,
133 .recv_cb
= ath12k_htc_rx_completion_handler
,
136 /* CE15: reserved for future use */
138 .flags
= (CE_ATTR_FLAGS
| CE_ATTR_DIS_INTR
),
145 const struct ce_attr ath12k_host_ce_config_wcn7850
[] = {
146 /* CE0: host->target HTC control and raw streams */
148 .flags
= CE_ATTR_FLAGS
,
154 /* CE1: target->host HTT + HTC control */
156 .flags
= CE_ATTR_FLAGS
,
159 .dest_nentries
= 512,
160 .recv_cb
= ath12k_htc_rx_completion_handler
,
163 /* CE2: target->host WMI */
165 .flags
= CE_ATTR_FLAGS
,
169 .recv_cb
= ath12k_htc_rx_completion_handler
,
172 /* CE3: host->target WMI (mac0) */
174 .flags
= CE_ATTR_FLAGS
,
180 /* CE4: host->target HTT */
182 .flags
= CE_ATTR_FLAGS
| CE_ATTR_DIS_INTR
,
183 .src_nentries
= 2048,
188 /* CE5: target->host pktlog */
190 .flags
= CE_ATTR_FLAGS
,
196 /* CE6: target autonomous hif_memcpy */
198 .flags
= CE_ATTR_FLAGS
| CE_ATTR_DIS_INTR
,
204 /* CE7: host->target WMI (mac1) */
206 .flags
= CE_ATTR_FLAGS
| CE_ATTR_DIS_INTR
,
212 /* CE8: target autonomous hif_memcpy */
214 .flags
= CE_ATTR_FLAGS
| CE_ATTR_DIS_INTR
,
222 static int ath12k_ce_rx_buf_enqueue_pipe(struct ath12k_ce_pipe
*pipe
,
223 struct sk_buff
*skb
, dma_addr_t paddr
)
225 struct ath12k_base
*ab
= pipe
->ab
;
226 struct ath12k_ce_ring
*ring
= pipe
->dest_ring
;
227 struct hal_srng
*srng
;
228 unsigned int write_index
;
229 unsigned int nentries_mask
= ring
->nentries_mask
;
230 struct hal_ce_srng_dest_desc
*desc
;
233 lockdep_assert_held(&ab
->ce
.ce_lock
);
235 write_index
= ring
->write_index
;
237 srng
= &ab
->hal
.srng_list
[ring
->hal_ring_id
];
239 spin_lock_bh(&srng
->lock
);
241 ath12k_hal_srng_access_begin(ab
, srng
);
243 if (unlikely(ath12k_hal_srng_src_num_free(ab
, srng
, false) < 1)) {
248 desc
= ath12k_hal_srng_src_get_next_entry(ab
, srng
);
254 ath12k_hal_ce_dst_set_desc(desc
, paddr
);
256 ring
->skb
[write_index
] = skb
;
257 write_index
= CE_RING_IDX_INCR(nentries_mask
, write_index
);
258 ring
->write_index
= write_index
;
260 pipe
->rx_buf_needed
--;
264 ath12k_hal_srng_access_end(ab
, srng
);
266 spin_unlock_bh(&srng
->lock
);
271 static int ath12k_ce_rx_post_pipe(struct ath12k_ce_pipe
*pipe
)
273 struct ath12k_base
*ab
= pipe
->ab
;
278 if (!(pipe
->dest_ring
|| pipe
->status_ring
))
281 spin_lock_bh(&ab
->ce
.ce_lock
);
282 while (pipe
->rx_buf_needed
) {
283 skb
= dev_alloc_skb(pipe
->buf_sz
);
289 WARN_ON_ONCE(!IS_ALIGNED((unsigned long)skb
->data
, 4));
291 paddr
= dma_map_single(ab
->dev
, skb
->data
,
292 skb
->len
+ skb_tailroom(skb
),
294 if (unlikely(dma_mapping_error(ab
->dev
, paddr
))) {
295 ath12k_warn(ab
, "failed to dma map ce rx buf\n");
296 dev_kfree_skb_any(skb
);
301 ATH12K_SKB_RXCB(skb
)->paddr
= paddr
;
303 ret
= ath12k_ce_rx_buf_enqueue_pipe(pipe
, skb
, paddr
);
305 ath12k_warn(ab
, "failed to enqueue rx buf: %d\n", ret
);
306 dma_unmap_single(ab
->dev
, paddr
,
307 skb
->len
+ skb_tailroom(skb
),
309 dev_kfree_skb_any(skb
);
315 spin_unlock_bh(&ab
->ce
.ce_lock
);
319 static int ath12k_ce_completed_recv_next(struct ath12k_ce_pipe
*pipe
,
320 struct sk_buff
**skb
, int *nbytes
)
322 struct ath12k_base
*ab
= pipe
->ab
;
323 struct hal_ce_srng_dst_status_desc
*desc
;
324 struct hal_srng
*srng
;
325 unsigned int sw_index
;
326 unsigned int nentries_mask
;
329 spin_lock_bh(&ab
->ce
.ce_lock
);
331 sw_index
= pipe
->dest_ring
->sw_index
;
332 nentries_mask
= pipe
->dest_ring
->nentries_mask
;
334 srng
= &ab
->hal
.srng_list
[pipe
->status_ring
->hal_ring_id
];
336 spin_lock_bh(&srng
->lock
);
338 ath12k_hal_srng_access_begin(ab
, srng
);
340 desc
= ath12k_hal_srng_dst_get_next_entry(ab
, srng
);
346 *nbytes
= ath12k_hal_ce_dst_status_get_length(desc
);
352 *skb
= pipe
->dest_ring
->skb
[sw_index
];
353 pipe
->dest_ring
->skb
[sw_index
] = NULL
;
355 sw_index
= CE_RING_IDX_INCR(nentries_mask
, sw_index
);
356 pipe
->dest_ring
->sw_index
= sw_index
;
358 pipe
->rx_buf_needed
++;
360 ath12k_hal_srng_access_end(ab
, srng
);
362 spin_unlock_bh(&srng
->lock
);
364 spin_unlock_bh(&ab
->ce
.ce_lock
);
369 static void ath12k_ce_recv_process_cb(struct ath12k_ce_pipe
*pipe
)
371 struct ath12k_base
*ab
= pipe
->ab
;
373 struct sk_buff_head list
;
374 unsigned int nbytes
, max_nbytes
;
377 __skb_queue_head_init(&list
);
378 while (ath12k_ce_completed_recv_next(pipe
, &skb
, &nbytes
) == 0) {
379 max_nbytes
= skb
->len
+ skb_tailroom(skb
);
380 dma_unmap_single(ab
->dev
, ATH12K_SKB_RXCB(skb
)->paddr
,
381 max_nbytes
, DMA_FROM_DEVICE
);
383 if (unlikely(max_nbytes
< nbytes
)) {
384 ath12k_warn(ab
, "rxed more than expected (nbytes %d, max %d)",
386 dev_kfree_skb_any(skb
);
390 skb_put(skb
, nbytes
);
391 __skb_queue_tail(&list
, skb
);
394 while ((skb
= __skb_dequeue(&list
))) {
395 ath12k_dbg(ab
, ATH12K_DBG_AHB
, "rx ce pipe %d len %d\n",
396 pipe
->pipe_num
, skb
->len
);
397 pipe
->recv_cb(ab
, skb
);
400 ret
= ath12k_ce_rx_post_pipe(pipe
);
401 if (ret
&& ret
!= -ENOSPC
) {
402 ath12k_warn(ab
, "failed to post rx buf to pipe: %d err: %d\n",
403 pipe
->pipe_num
, ret
);
404 mod_timer(&ab
->rx_replenish_retry
,
405 jiffies
+ ATH12K_CE_RX_POST_RETRY_JIFFIES
);
409 static struct sk_buff
*ath12k_ce_completed_send_next(struct ath12k_ce_pipe
*pipe
)
411 struct ath12k_base
*ab
= pipe
->ab
;
412 struct hal_ce_srng_src_desc
*desc
;
413 struct hal_srng
*srng
;
414 unsigned int sw_index
;
415 unsigned int nentries_mask
;
418 spin_lock_bh(&ab
->ce
.ce_lock
);
420 sw_index
= pipe
->src_ring
->sw_index
;
421 nentries_mask
= pipe
->src_ring
->nentries_mask
;
423 srng
= &ab
->hal
.srng_list
[pipe
->src_ring
->hal_ring_id
];
425 spin_lock_bh(&srng
->lock
);
427 ath12k_hal_srng_access_begin(ab
, srng
);
429 desc
= ath12k_hal_srng_src_reap_next(ab
, srng
);
435 skb
= pipe
->src_ring
->skb
[sw_index
];
437 pipe
->src_ring
->skb
[sw_index
] = NULL
;
439 sw_index
= CE_RING_IDX_INCR(nentries_mask
, sw_index
);
440 pipe
->src_ring
->sw_index
= sw_index
;
443 spin_unlock_bh(&srng
->lock
);
445 spin_unlock_bh(&ab
->ce
.ce_lock
);
450 static void ath12k_ce_send_done_cb(struct ath12k_ce_pipe
*pipe
)
452 struct ath12k_base
*ab
= pipe
->ab
;
455 while (!IS_ERR(skb
= ath12k_ce_completed_send_next(pipe
))) {
459 dma_unmap_single(ab
->dev
, ATH12K_SKB_CB(skb
)->paddr
, skb
->len
,
461 dev_kfree_skb_any(skb
);
465 static void ath12k_ce_srng_msi_ring_params_setup(struct ath12k_base
*ab
, u32 ce_id
,
466 struct hal_srng_params
*ring_params
)
469 u32 msi_data_count
, msi_data_idx
;
475 ret
= ath12k_hif_get_user_msi_vector(ab
, "CE",
476 &msi_data_count
, &msi_data_start
,
482 ath12k_hif_get_msi_address(ab
, &addr_lo
, &addr_hi
);
483 ath12k_hif_get_ce_msi_idx(ab
, ce_id
, &msi_data_idx
);
485 ring_params
->msi_addr
= addr_lo
;
486 ring_params
->msi_addr
|= (dma_addr_t
)(((uint64_t)addr_hi
) << 32);
487 ring_params
->msi_data
= (msi_data_idx
% msi_data_count
) + msi_data_start
;
488 ring_params
->flags
|= HAL_SRNG_FLAGS_MSI_INTR
;
491 static int ath12k_ce_init_ring(struct ath12k_base
*ab
,
492 struct ath12k_ce_ring
*ce_ring
,
493 int ce_id
, enum hal_ring_type type
)
495 struct hal_srng_params params
= { 0 };
498 params
.ring_base_paddr
= ce_ring
->base_addr_ce_space
;
499 params
.ring_base_vaddr
= ce_ring
->base_addr_owner_space
;
500 params
.num_entries
= ce_ring
->nentries
;
502 if (!(CE_ATTR_DIS_INTR
& ab
->hw_params
->host_ce_config
[ce_id
].flags
))
503 ath12k_ce_srng_msi_ring_params_setup(ab
, ce_id
, ¶ms
);
507 if (!(CE_ATTR_DIS_INTR
& ab
->hw_params
->host_ce_config
[ce_id
].flags
))
508 params
.intr_batch_cntr_thres_entries
= 1;
511 params
.max_buffer_len
= ab
->hw_params
->host_ce_config
[ce_id
].src_sz_max
;
512 if (!(ab
->hw_params
->host_ce_config
[ce_id
].flags
& CE_ATTR_DIS_INTR
)) {
513 params
.intr_timer_thres_us
= 1024;
514 params
.flags
|= HAL_SRNG_FLAGS_LOW_THRESH_INTR_EN
;
515 params
.low_threshold
= ce_ring
->nentries
- 3;
518 case HAL_CE_DST_STATUS
:
519 if (!(ab
->hw_params
->host_ce_config
[ce_id
].flags
& CE_ATTR_DIS_INTR
)) {
520 params
.intr_batch_cntr_thres_entries
= 1;
521 params
.intr_timer_thres_us
= 0x1000;
525 ath12k_warn(ab
, "Invalid CE ring type %d\n", type
);
529 /* TODO: Init other params needed by HAL to init the ring */
531 ret
= ath12k_hal_srng_setup(ab
, type
, ce_id
, 0, ¶ms
);
533 ath12k_warn(ab
, "failed to setup srng: %d ring_id %d\n",
538 ce_ring
->hal_ring_id
= ret
;
543 static struct ath12k_ce_ring
*
544 ath12k_ce_alloc_ring(struct ath12k_base
*ab
, int nentries
, int desc_sz
)
546 struct ath12k_ce_ring
*ce_ring
;
547 dma_addr_t base_addr
;
549 ce_ring
= kzalloc(struct_size(ce_ring
, skb
, nentries
), GFP_KERNEL
);
551 return ERR_PTR(-ENOMEM
);
553 ce_ring
->nentries
= nentries
;
554 ce_ring
->nentries_mask
= nentries
- 1;
556 /* Legacy platforms that do not support cache
557 * coherent DMA are unsupported
559 ce_ring
->base_addr_owner_space_unaligned
=
560 dma_alloc_coherent(ab
->dev
,
561 nentries
* desc_sz
+ CE_DESC_RING_ALIGN
,
562 &base_addr
, GFP_KERNEL
);
563 if (!ce_ring
->base_addr_owner_space_unaligned
) {
565 return ERR_PTR(-ENOMEM
);
568 ce_ring
->base_addr_ce_space_unaligned
= base_addr
;
570 ce_ring
->base_addr_owner_space
=
571 PTR_ALIGN(ce_ring
->base_addr_owner_space_unaligned
,
574 ce_ring
->base_addr_ce_space
= ALIGN(ce_ring
->base_addr_ce_space_unaligned
,
580 static int ath12k_ce_alloc_pipe(struct ath12k_base
*ab
, int ce_id
)
582 struct ath12k_ce_pipe
*pipe
= &ab
->ce
.ce_pipe
[ce_id
];
583 const struct ce_attr
*attr
= &ab
->hw_params
->host_ce_config
[ce_id
];
584 struct ath12k_ce_ring
*ring
;
588 pipe
->attr_flags
= attr
->flags
;
590 if (attr
->src_nentries
) {
591 pipe
->send_cb
= ath12k_ce_send_done_cb
;
592 nentries
= roundup_pow_of_two(attr
->src_nentries
);
593 desc_sz
= ath12k_hal_ce_get_desc_size(HAL_CE_DESC_SRC
);
594 ring
= ath12k_ce_alloc_ring(ab
, nentries
, desc_sz
);
596 return PTR_ERR(ring
);
597 pipe
->src_ring
= ring
;
600 if (attr
->dest_nentries
) {
601 pipe
->recv_cb
= attr
->recv_cb
;
602 nentries
= roundup_pow_of_two(attr
->dest_nentries
);
603 desc_sz
= ath12k_hal_ce_get_desc_size(HAL_CE_DESC_DST
);
604 ring
= ath12k_ce_alloc_ring(ab
, nentries
, desc_sz
);
606 return PTR_ERR(ring
);
607 pipe
->dest_ring
= ring
;
609 desc_sz
= ath12k_hal_ce_get_desc_size(HAL_CE_DESC_DST_STATUS
);
610 ring
= ath12k_ce_alloc_ring(ab
, nentries
, desc_sz
);
612 return PTR_ERR(ring
);
613 pipe
->status_ring
= ring
;
619 void ath12k_ce_per_engine_service(struct ath12k_base
*ab
, u16 ce_id
)
621 struct ath12k_ce_pipe
*pipe
= &ab
->ce
.ce_pipe
[ce_id
];
627 ath12k_ce_recv_process_cb(pipe
);
630 void ath12k_ce_poll_send_completed(struct ath12k_base
*ab
, u8 pipe_id
)
632 struct ath12k_ce_pipe
*pipe
= &ab
->ce
.ce_pipe
[pipe_id
];
634 if ((pipe
->attr_flags
& CE_ATTR_DIS_INTR
) && pipe
->send_cb
)
638 int ath12k_ce_send(struct ath12k_base
*ab
, struct sk_buff
*skb
, u8 pipe_id
,
641 struct ath12k_ce_pipe
*pipe
= &ab
->ce
.ce_pipe
[pipe_id
];
642 struct hal_ce_srng_src_desc
*desc
;
643 struct hal_srng
*srng
;
644 unsigned int write_index
, sw_index
;
645 unsigned int nentries_mask
;
647 u8 byte_swap_data
= 0;
650 /* Check if some entries could be regained by handling tx completion if
651 * the CE has interrupts disabled and the used entries is more than the
652 * defined usage threshold.
654 if (pipe
->attr_flags
& CE_ATTR_DIS_INTR
) {
655 spin_lock_bh(&ab
->ce
.ce_lock
);
656 write_index
= pipe
->src_ring
->write_index
;
658 sw_index
= pipe
->src_ring
->sw_index
;
660 if (write_index
>= sw_index
)
661 num_used
= write_index
- sw_index
;
663 num_used
= pipe
->src_ring
->nentries
- sw_index
+
666 spin_unlock_bh(&ab
->ce
.ce_lock
);
668 if (num_used
> ATH12K_CE_USAGE_THRESHOLD
)
669 ath12k_ce_poll_send_completed(ab
, pipe
->pipe_num
);
672 if (test_bit(ATH12K_FLAG_CRASH_FLUSH
, &ab
->dev_flags
))
675 spin_lock_bh(&ab
->ce
.ce_lock
);
677 write_index
= pipe
->src_ring
->write_index
;
678 nentries_mask
= pipe
->src_ring
->nentries_mask
;
680 srng
= &ab
->hal
.srng_list
[pipe
->src_ring
->hal_ring_id
];
682 spin_lock_bh(&srng
->lock
);
684 ath12k_hal_srng_access_begin(ab
, srng
);
686 if (unlikely(ath12k_hal_srng_src_num_free(ab
, srng
, false) < 1)) {
687 ath12k_hal_srng_access_end(ab
, srng
);
692 desc
= ath12k_hal_srng_src_get_next_reaped(ab
, srng
);
694 ath12k_hal_srng_access_end(ab
, srng
);
699 if (pipe
->attr_flags
& CE_ATTR_BYTE_SWAP_DATA
)
702 ath12k_hal_ce_src_set_desc(desc
, ATH12K_SKB_CB(skb
)->paddr
,
703 skb
->len
, transfer_id
, byte_swap_data
);
705 pipe
->src_ring
->skb
[write_index
] = skb
;
706 pipe
->src_ring
->write_index
= CE_RING_IDX_INCR(nentries_mask
,
709 ath12k_hal_srng_access_end(ab
, srng
);
712 spin_unlock_bh(&srng
->lock
);
714 spin_unlock_bh(&ab
->ce
.ce_lock
);
719 static void ath12k_ce_rx_pipe_cleanup(struct ath12k_ce_pipe
*pipe
)
721 struct ath12k_base
*ab
= pipe
->ab
;
722 struct ath12k_ce_ring
*ring
= pipe
->dest_ring
;
726 if (!(ring
&& pipe
->buf_sz
))
729 for (i
= 0; i
< ring
->nentries
; i
++) {
735 dma_unmap_single(ab
->dev
, ATH12K_SKB_RXCB(skb
)->paddr
,
736 skb
->len
+ skb_tailroom(skb
), DMA_FROM_DEVICE
);
737 dev_kfree_skb_any(skb
);
741 void ath12k_ce_cleanup_pipes(struct ath12k_base
*ab
)
743 struct ath12k_ce_pipe
*pipe
;
746 for (pipe_num
= 0; pipe_num
< ab
->hw_params
->ce_count
; pipe_num
++) {
747 pipe
= &ab
->ce
.ce_pipe
[pipe_num
];
748 ath12k_ce_rx_pipe_cleanup(pipe
);
750 /* Cleanup any src CE's which have interrupts disabled */
751 ath12k_ce_poll_send_completed(ab
, pipe_num
);
753 /* NOTE: Should we also clean up tx buffer in all pipes? */
757 void ath12k_ce_rx_post_buf(struct ath12k_base
*ab
)
759 struct ath12k_ce_pipe
*pipe
;
763 for (i
= 0; i
< ab
->hw_params
->ce_count
; i
++) {
764 pipe
= &ab
->ce
.ce_pipe
[i
];
765 ret
= ath12k_ce_rx_post_pipe(pipe
);
770 ath12k_warn(ab
, "failed to post rx buf to pipe: %d err: %d\n",
772 mod_timer(&ab
->rx_replenish_retry
,
773 jiffies
+ ATH12K_CE_RX_POST_RETRY_JIFFIES
);
780 void ath12k_ce_rx_replenish_retry(struct timer_list
*t
)
782 struct ath12k_base
*ab
= from_timer(ab
, t
, rx_replenish_retry
);
784 ath12k_ce_rx_post_buf(ab
);
787 static void ath12k_ce_shadow_config(struct ath12k_base
*ab
)
791 for (i
= 0; i
< ab
->hw_params
->ce_count
; i
++) {
792 if (ab
->hw_params
->host_ce_config
[i
].src_nentries
)
793 ath12k_hal_srng_update_shadow_config(ab
, HAL_CE_SRC
, i
);
795 if (ab
->hw_params
->host_ce_config
[i
].dest_nentries
) {
796 ath12k_hal_srng_update_shadow_config(ab
, HAL_CE_DST
, i
);
797 ath12k_hal_srng_update_shadow_config(ab
, HAL_CE_DST_STATUS
, i
);
802 void ath12k_ce_get_shadow_config(struct ath12k_base
*ab
,
803 u32
**shadow_cfg
, u32
*shadow_cfg_len
)
805 if (!ab
->hw_params
->supports_shadow_regs
)
808 ath12k_hal_srng_get_shadow_config(ab
, shadow_cfg
, shadow_cfg_len
);
810 /* shadow is already configured */
814 /* shadow isn't configured yet, configure now.
815 * non-CE srngs are configured firstly, then
818 ath12k_hal_srng_shadow_config(ab
);
819 ath12k_ce_shadow_config(ab
);
821 /* get the shadow configuration */
822 ath12k_hal_srng_get_shadow_config(ab
, shadow_cfg
, shadow_cfg_len
);
825 int ath12k_ce_init_pipes(struct ath12k_base
*ab
)
827 struct ath12k_ce_pipe
*pipe
;
831 ath12k_ce_get_shadow_config(ab
, &ab
->qmi
.ce_cfg
.shadow_reg_v3
,
832 &ab
->qmi
.ce_cfg
.shadow_reg_v3_len
);
834 for (i
= 0; i
< ab
->hw_params
->ce_count
; i
++) {
835 pipe
= &ab
->ce
.ce_pipe
[i
];
837 if (pipe
->src_ring
) {
838 ret
= ath12k_ce_init_ring(ab
, pipe
->src_ring
, i
,
841 ath12k_warn(ab
, "failed to init src ring: %d\n",
843 /* Should we clear any partial init */
847 pipe
->src_ring
->write_index
= 0;
848 pipe
->src_ring
->sw_index
= 0;
851 if (pipe
->dest_ring
) {
852 ret
= ath12k_ce_init_ring(ab
, pipe
->dest_ring
, i
,
855 ath12k_warn(ab
, "failed to init dest ring: %d\n",
857 /* Should we clear any partial init */
861 pipe
->rx_buf_needed
= pipe
->dest_ring
->nentries
?
862 pipe
->dest_ring
->nentries
- 2 : 0;
864 pipe
->dest_ring
->write_index
= 0;
865 pipe
->dest_ring
->sw_index
= 0;
868 if (pipe
->status_ring
) {
869 ret
= ath12k_ce_init_ring(ab
, pipe
->status_ring
, i
,
872 ath12k_warn(ab
, "failed to init dest status ing: %d\n",
874 /* Should we clear any partial init */
878 pipe
->status_ring
->write_index
= 0;
879 pipe
->status_ring
->sw_index
= 0;
886 void ath12k_ce_free_pipes(struct ath12k_base
*ab
)
888 struct ath12k_ce_pipe
*pipe
;
892 for (i
= 0; i
< ab
->hw_params
->ce_count
; i
++) {
893 pipe
= &ab
->ce
.ce_pipe
[i
];
895 if (pipe
->src_ring
) {
896 desc_sz
= ath12k_hal_ce_get_desc_size(HAL_CE_DESC_SRC
);
897 dma_free_coherent(ab
->dev
,
898 pipe
->src_ring
->nentries
* desc_sz
+
900 pipe
->src_ring
->base_addr_owner_space
,
901 pipe
->src_ring
->base_addr_ce_space
);
902 kfree(pipe
->src_ring
);
903 pipe
->src_ring
= NULL
;
906 if (pipe
->dest_ring
) {
907 desc_sz
= ath12k_hal_ce_get_desc_size(HAL_CE_DESC_DST
);
908 dma_free_coherent(ab
->dev
,
909 pipe
->dest_ring
->nentries
* desc_sz
+
911 pipe
->dest_ring
->base_addr_owner_space
,
912 pipe
->dest_ring
->base_addr_ce_space
);
913 kfree(pipe
->dest_ring
);
914 pipe
->dest_ring
= NULL
;
917 if (pipe
->status_ring
) {
919 ath12k_hal_ce_get_desc_size(HAL_CE_DESC_DST_STATUS
);
920 dma_free_coherent(ab
->dev
,
921 pipe
->status_ring
->nentries
* desc_sz
+
923 pipe
->status_ring
->base_addr_owner_space
,
924 pipe
->status_ring
->base_addr_ce_space
);
925 kfree(pipe
->status_ring
);
926 pipe
->status_ring
= NULL
;
931 int ath12k_ce_alloc_pipes(struct ath12k_base
*ab
)
933 struct ath12k_ce_pipe
*pipe
;
936 const struct ce_attr
*attr
;
938 spin_lock_init(&ab
->ce
.ce_lock
);
940 for (i
= 0; i
< ab
->hw_params
->ce_count
; i
++) {
941 attr
= &ab
->hw_params
->host_ce_config
[i
];
942 pipe
= &ab
->ce
.ce_pipe
[i
];
945 pipe
->buf_sz
= attr
->src_sz_max
;
947 ret
= ath12k_ce_alloc_pipe(ab
, i
);
949 /* Free any partial successful allocation */
950 ath12k_ce_free_pipes(ab
);
958 int ath12k_ce_get_attr_flags(struct ath12k_base
*ab
, int ce_id
)
960 if (ce_id
>= ab
->hw_params
->ce_count
)
963 return ab
->hw_params
->host_ce_config
[ce_id
].flags
;